Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 50 additions & 1 deletion src/hyperlight_common/src/arch/amd64/vmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,10 @@ pub unsafe fn virt_to_phys<'a, Op: TableReadOps + 'a>(
) -> impl Iterator<Item = Mapping> + 'a {
// Undo sign-extension, and mask off any sub-page bits
let vmin = (address & ((1u64 << VA_BITS) - 1)) & !(PAGE_SIZE as u64 - 1);
let vmax = core::cmp::min(vmin + len, 1u64 << VA_BITS);
let addr_canonical = address & ((1u64 << VA_BITS) - 1);
// Calculate the maximum virtual address we need to look at based on the starting
// address and length ensuring we don't go past the end of the address space
let vmax = core::cmp::min(addr_canonical + len, 1u64 << VA_BITS);
modify_ptes::<47, 39, Op, _>(MapRequest {
table_base: op.as_ref().root_table(),
vmin,
Expand Down Expand Up @@ -920,6 +923,52 @@ mod tests {
assert_eq!(mapping.phys_base, 0x1000);
}

#[test]
fn test_virt_to_phys_unaligned_virt_and_across_pages_len() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: 2 * PAGE_SIZE as u64, // 2 page + 512 bytes
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
len: 2 * PAGE_SIZE as u64, // 2 page + 512 bytes
len: 2 * PAGE_SIZE as u64, // 2 page

kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};

unsafe { map(&ops, mapping) };

let mappings = unsafe { virt_to_phys(&ops, 0x1F00, 0x300).collect::<Vec<_>>() };
assert_eq!(mappings.len(), 2, "Should return 2 mappings for 2 pages");
assert_eq!(mappings[0].phys_base, 0x1000);
assert_eq!(mappings[1].phys_base, 0x2000);
}

#[test]
fn test_virt_to_phys_unaligned_virt_and_multiple_page_len() {
let ops = MockTableOps::new();
let mapping = Mapping {
phys_base: 0x1000,
virt_base: 0x1000,
len: PAGE_SIZE as u64 * 2 + 0x200, // 2 page + 512 bytes
kind: MappingKind::Basic(BasicMapping {
readable: true,
writable: true,
executable: false,
}),
};

unsafe { map(&ops, mapping) };

let mappings =
unsafe { virt_to_phys(&ops, 0x1234, PAGE_SIZE as u64 * 2 + 0x10).collect::<Vec<_>>() };
assert_eq!(mappings.len(), 3, "Should return 3 mappings for 3 pages");
assert_eq!(mappings[0].phys_base, 0x1000);
assert_eq!(mappings[1].phys_base, 0x2000);
assert_eq!(mappings[2].phys_base, 0x3000);
}

#[test]
fn test_virt_to_phys_perms() {
let test = |kind| {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ pub enum FunctionCallType {
}

/// `Functioncall` represents a call to a function in the guest or host.
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct FunctionCall {
/// The function name
pub function_name: String,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,15 @@ use crate::flatbuffers::hyperlight::generated::{
OpenSpanTypeArgs as FbOpenSpanTypeArgs,
};

/// TODO: Change these constant to be configurable at runtime by the guest
/// Maybe use a weak symbol that the guest can override at link time?
///
/// Pre-calculated capacity for the encoder buffer
/// This is to avoid reallocations in the guest
/// If the next event would exceed this size, the encoder will flush the current buffer to the host
/// before encoding the new event.
pub const MAX_TRACE_DATA_SIZE: usize = 4096;

/// Key-Value pair structure used in tracing spans/events
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EventKeyValue {
Expand Down
4 changes: 2 additions & 2 deletions src/hyperlight_common/src/vmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,8 @@ pub struct Mapping {
/// are being remapped, TLB invalidation may need to be performed
/// afterwards.
pub use arch::map;
/// This function is not presently used for anything, but is useful
/// for debugging
/// This function is presently used for reading the tracing data, also
/// it is useful for debugging
///
/// # Safety
/// This function traverses page table data structures, and should not
Expand Down
6 changes: 3 additions & 3 deletions src/hyperlight_guest/src/exit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ pub(crate) unsafe fn out32(port: u16, val: u32) {
asm!("out dx, eax",
in("dx") port,
in("eax") val,
in("r8") OutBAction::TraceBatch as u64,
in("r9") ptr,
in("r10") len,
in("r12") OutBAction::TraceBatch as u64,
in("r13") ptr,
in("r14") len,
options(preserves_flags, nomem, nostack)
)
};
Expand Down
32 changes: 23 additions & 9 deletions src/hyperlight_guest_bin/src/guest_function/call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,23 +75,25 @@ pub(crate) fn call_guest_function(function_call: FunctionCall) -> Result<Vec<u8>
pub(crate) fn internal_dispatch_function() {
// Read the current TSC to report it to the host with the spans/events
// This helps calculating the timestamps relative to the guest call
#[cfg(feature = "trace_guest")]
{
#[cfg(all(feature = "trace_guest", target_arch = "x86_64"))]
let _entered = {
let guest_start_tsc = hyperlight_guest_tracing::invariant_tsc::read_tsc();
// Reset the trace state for the new guest function call with the new start TSC
// This clears any existing spans/events from previous calls ensuring a clean state
hyperlight_guest_tracing::new_call(guest_start_tsc);
}

let handle = unsafe { GUEST_HANDLE };
tracing::span!(tracing::Level::INFO, "internal_dispatch_function").entered()
};

#[cfg(debug_assertions)]
log::trace!("internal_dispatch_function");
let handle = unsafe { GUEST_HANDLE };

let function_call = handle
.try_pop_shared_input_data_into::<FunctionCall>()
.expect("Function call deserialization failed");

#[cfg(debug_assertions)]
tracing::trace!("{:?}", function_call);

let res = call_guest_function(function_call);

match res {
Expand All @@ -111,8 +113,20 @@ pub(crate) fn internal_dispatch_function() {
}
}

// Ensure that any tracing output during the call is flushed to
// the host, if necessary.
// All this tracing logic shall be done right before the call to `hlt` which is done after this
// function returns
#[cfg(all(feature = "trace_guest", target_arch = "x86_64"))]
hyperlight_guest_tracing::flush();
{
// This span captures the internal dispatch function only, without tracing internals.
// Close the span before flushing to ensure that the `flush` call is not included in the span
// NOTE: This is necessary to avoid closing the span twice. Flush closes all the open
// spans, when preparing to close a guest function call context.
// It is not mandatory, though, but avoids a warning on the host that alerts a spans
// that has not been opened but is being closed.
_entered.exit();

// Ensure that any tracing output during the call is flushed to
// the host, if necessary.
hyperlight_guest_tracing::flush();
}
}
22 changes: 19 additions & 3 deletions src/hyperlight_guest_bin/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,12 @@ pub(crate) extern "C" fn generic_init(
hyperlight_guest_tracing::init_guest_tracing(guest_start_tsc);
}

// Open a span to partly capture the initialization of the guest.
// This is done here because the tracing subscriber is initialized and the guest is in a
// well-known state
#[cfg(all(feature = "trace_guest", target_arch = "x86_64"))]
let _entered = tracing::span!(tracing::Level::INFO, "generic_init").entered();

#[cfg(feature = "macros")]
for registration in __private::GUEST_FUNCTION_INIT {
registration();
Expand All @@ -235,10 +241,20 @@ pub(crate) extern "C" fn generic_init(
hyperlight_main();
}

// Ensure that any tracing output from the initialisation phase is
// flushed to the host, if necessary.
// All this tracing logic shall be done right before the call to `hlt` which is done after this
// function returns
#[cfg(all(feature = "trace_guest", target_arch = "x86_64"))]
hyperlight_guest_tracing::flush();
{
// NOTE: This is necessary to avoid closing the span twice. Flush closes all the open
// spans, when preparing to close a guest function call context.
// It is not mandatory, though, but avoids a warning on the host that alerts a spans
// that has not been opened but is being closed.
_entered.exit();

// Ensure that any tracing output from the initialisation phase is
// flushed to the host, if necessary.
hyperlight_guest_tracing::flush();
}

dispatch_function as usize as u64
}
Expand Down
16 changes: 5 additions & 11 deletions src/hyperlight_guest_tracing/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use alloc::vec::Vec;
use core::sync::atomic::{AtomicU64, Ordering};

use hyperlight_common::flatbuffer_wrappers::guest_trace_data::{
EventsBatchEncoder, EventsEncoder, GuestEvent,
EventsBatchEncoder, EventsEncoder, GuestEvent, MAX_TRACE_DATA_SIZE,
};
use hyperlight_common::outb::OutBAction;
use tracing_core::Event;
Expand All @@ -43,12 +43,6 @@ pub(crate) struct GuestState {
stack: Vec<u64>,
}

/// TODO: Change these constant to be configurable at runtime by the guest
/// Maybe use a weak symbol that the guest can override at link time?
///
/// Pre-calculated capacity for the encoder buffer
/// This is to avoid reallocations in the guest
const ENCODER_CAPACITY: usize = 4096;
/// Start with a stack capacity for active spans
const ACTIVE_SPANS_CAPACITY: usize = 64;

Expand All @@ -60,16 +54,16 @@ fn send_to_host(data: &[u8]) {
in("dx") OutBAction::TraceBatch as u16,
in("al") 0u8,
// Additional magic number to identify the action
in("r8") OutBAction::TraceBatch as u64,
in("r9") data.as_ptr() as u64,
in("r10") data.len() as u64,
in("r12") OutBAction::TraceBatch as u64,
in("r13") data.as_ptr() as u64,
in("r14") data.len() as u64,
);
}
}

impl GuestState {
pub(crate) fn new(guest_start_tsc: u64) -> Self {
let mut encoder = EventsBatchEncoder::new(ENCODER_CAPACITY, send_to_host);
let mut encoder = EventsBatchEncoder::new(MAX_TRACE_DATA_SIZE, send_to_host);
encoder.encode(&GuestEvent::GuestStart {
tsc: guest_start_tsc,
});
Expand Down
18 changes: 14 additions & 4 deletions src/hyperlight_host/src/hypervisor/hyperlight_vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ pub enum RunVmError {
DebugHandler(#[from] HandleDebugError),
#[error("Execution was cancelled by the host")]
ExecutionCancelledByHost,
#[error("Failed to access page: {0}")]
PageTableAccess(AccessPageTableError),
#[cfg(feature = "trace_guest")]
#[error("Failed to get registers: {0}")]
GetRegs(RegisterError),
Expand Down Expand Up @@ -754,10 +756,18 @@ impl HyperlightVm {
tc.end_host_trace();
// Handle the guest trace data if any
let regs = self.vm.regs().map_err(RunVmError::GetRegs)?;
if let Err(e) = tc.handle_trace(&regs, mem_mgr) {
// If no trace data is available, we just log a message and continue
// Is this the right thing to do?
log::debug!("Error handling guest trace: {:?}", e);

// Only parse the trace if it has reported
if tc.has_trace_data(&regs) {
let root_pt = self.get_root_pt().map_err(RunVmError::PageTableAccess)?;

// If something goes wrong with parsing the trace data, we log the error and
// continue execution instead of returning an error since this is not critical
// to correct execution of the guest
tc.handle_trace(&regs, mem_mgr, root_pt)
.unwrap_or_else(|e| {
tracing::error!("Cannot handle trace data: {}", e);
});
}
}
result
Expand Down
111 changes: 111 additions & 0 deletions src/hyperlight_host/src/mem/mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,117 @@ impl SandboxMemoryManager<HostSharedMemory> {

Ok(())
}

/// Read guest memory at a Guest Virtual Address (GVA) by walking the
/// page tables to translate GVA → GPA, then reading from the correct
/// backing memory (shared_mem or scratch_mem).
///
/// This is necessary because with Copy-on-Write (CoW) the guest's
/// virtual pages are backed by physical pages in the scratch
/// region rather than being identity-mapped.
///
/// # Arguments
/// * `gva` - The Guest Virtual Address to read from
/// * `len` - The number of bytes to read
/// * `root_pt` - The root page table physical address (CR3)
#[cfg(feature = "trace_guest")]
pub(crate) fn read_guest_memory_by_gva(
&mut self,
gva: u64,
len: usize,
root_pt: u64,
) -> Result<Vec<u8>> {
use hyperlight_common::vmem::PAGE_SIZE;

use crate::sandbox::snapshot::{SharedMemoryPageTableBuffer, access_gpa};

let scratch_size = self.scratch_mem.mem_size();

self.shared_mem.with_exclusivity(|snap| {
self.scratch_mem.with_exclusivity(|scratch| {
let pt_buf = SharedMemoryPageTableBuffer::new(snap, scratch, scratch_size, root_pt);

// Walk page tables to get all mappings that cover the GVA range
let mappings: Vec<_> = unsafe {
hyperlight_common::vmem::virt_to_phys(&pt_buf, gva, len as u64)
}
.collect();

if mappings.is_empty() {
return Err(new_error!(
"No page table mappings found for GVA {:#x} (len {})",
gva,
len,
));
}

// Resulting vector of bytes to return
let mut result = Vec::with_capacity(len);
let mut current_gva = gva;

for mapping in &mappings {
// The page table walker should only return valid mappings
// that cover our current read position.
if mapping.virt_base > current_gva {
return Err(new_error!(
"Page table walker returned mapping with virt_base {:#x} > current read position {:#x}",
mapping.virt_base,
current_gva,
));
}

// Calculate the offset within this page where to start copying
let page_offset = (current_gva - mapping.virt_base) as usize;

let bytes_remaining = len - result.len();
let available_in_page = PAGE_SIZE - page_offset;
let bytes_to_copy = bytes_remaining.min(available_in_page);

// Translate the GPA to host memory
let gpa = mapping.phys_base + page_offset as u64;
let (mem, offset) = access_gpa(snap, scratch, scratch_size, gpa)
.ok_or_else(|| {
new_error!(
"Failed to resolve GPA {:#x} to host memory (GVA {:#x})",
gpa,
gva
)
})?;

let slice = mem
.as_slice()
.get(offset..offset + bytes_to_copy)
.ok_or_else(|| {
new_error!(
"GPA {:#x} resolved to out-of-bounds host offset {} (need {} bytes)",
gpa,
offset,
bytes_to_copy
)
})?;

result.extend_from_slice(slice);
current_gva += bytes_to_copy as u64;
}

if result.len() != len {
tracing::error!(
"Page table walker returned mappings that don't cover the full requested length: got {}, expected {}",
result.len(),
len,
);
return Err(new_error!(
"Could not read full GVA range: got {} of {} bytes {:?}",
result.len(),
len,
mappings
));
}

Ok(result)
})
})??
}
}

#[cfg(test)]
Expand Down
Loading