Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions crates/wasmtime/src/runtime/func.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2124,6 +2124,14 @@ impl<T> Caller<'_, T> {
self.store.gc(why)
}

/// Returns the current capacity of the GC heap in bytes.
///
/// Same as [`Store::gc_heap_capacity`](crate::Store::gc_heap_capacity).
#[cfg(feature = "gc")]
pub fn gc_heap_capacity(&self) -> usize {
self.store.0.gc_heap_capacity()
}

/// Perform garbage collection asynchronously.
///
/// Same as [`Store::gc_async`](crate::Store::gc_async).
Expand Down
17 changes: 17 additions & 0 deletions crates/wasmtime/src/runtime/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1016,6 +1016,13 @@ impl<T> Store<T> {
StoreContextMut(&mut self.inner).gc(why)
}

/// Returns the current capacity of the GC heap in bytes, or 0 if the GC
/// heap has not been initialized yet.
#[cfg(feature = "gc")]
pub fn gc_heap_capacity(&self) -> usize {
self.inner.gc_heap_capacity()
}

/// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
/// be configured via [`Store::set_fuel`].
///
Expand Down Expand Up @@ -2032,6 +2039,16 @@ impl StoreOpaque {
}
}

/// Returns the current capacity of the GC heap in bytes, or 0 if the GC
/// heap has not been initialized yet.
#[cfg(feature = "gc")]
pub(crate) fn gc_heap_capacity(&self) -> usize {
match self.gc_store.as_ref() {
Some(gc_store) => gc_store.gc_heap_capacity(),
None => 0,
}
}

/// Helper to assert that a GC store was previously allocated and is
/// present.
///
Expand Down
77 changes: 61 additions & 16 deletions crates/wasmtime/src/runtime/store/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@ use super::*;
use crate::runtime::vm::VMGcRef;

impl StoreOpaque {
/// Attempt to grow the GC heap by `bytes_needed` or, if that fails, perform
/// a garbage collection.
/// Perform any growth or GC needed to allocate `bytes_needed` bytes.
///
/// Note that even when this function returns it is not guaranteed
/// that a GC allocation of size `bytes_needed` will succeed. Growing the GC
Expand All @@ -28,7 +27,7 @@ impl StoreOpaque {
let root = root.map(|r| scope.gc_roots_mut().push_lifo_root(store_id, r));

scope
.grow_or_collect_gc_heap(limiter, bytes_needed, asyncness)
.collect_and_maybe_grow_gc_heap(limiter, bytes_needed, asyncness)
.await;

root.map(|r| {
Expand All @@ -50,23 +49,27 @@ impl StoreOpaque {
}
}

async fn grow_or_collect_gc_heap(
/// Helper invoked as part of `gc`, whose purpose is to GC and
/// maybe grow for a pending allocation of a given size.
async fn collect_and_maybe_grow_gc_heap(
&mut self,
limiter: Option<&mut StoreResourceLimiter<'_>>,
bytes_needed: Option<u64>,
asyncness: Asyncness,
) {
self.do_gc(asyncness).await;
if let Some(n) = bytes_needed
// The gc_zeal's allocation counter will pass `bytes_needed == 0` to
// signify that we shouldn't grow the GC heap, just do a collection.
// The gc_zeal's allocation counter will pass `bytes_needed == 0` to
// signify that we shouldn't grow the GC heap, just do a collection.
&& n > 0
&& n > u64::try_from(self.gc_heap_capacity())
.unwrap()
.saturating_sub(self.gc_store.as_ref().map_or(0, |gc| {
u64::try_from(gc.last_post_gc_allocated_bytes.unwrap_or(0)).unwrap()
}))
{
if self.grow_gc_heap(limiter, n).await.is_ok() {
return;
}
let _ = self.grow_gc_heap(limiter, n).await;
}

self.do_gc(asyncness).await;
}

/// Attempt to grow the GC heap by `bytes_needed` bytes.
Expand Down Expand Up @@ -170,8 +173,14 @@ impl StoreOpaque {
}
}

/// Attempt an allocation, if it fails due to GC OOM, then do a GC and
/// retry.
/// Attempt an allocation, if it fails due to GC OOM, apply the
/// grow-or-collect heuristic and retry.
///
/// The heuristic is:
/// - If the last post-collection heap usage is less than half the current
/// capacity, collect first, then retry. If that still fails, grow and
/// retry one final time.
/// - Otherwise, grow first and retry.
pub(crate) async fn retry_after_gc_async<T, U>(
&mut self,
mut limiter: Option<&mut StoreResourceLimiter<'_>>,
Expand All @@ -188,9 +197,45 @@ impl StoreOpaque {
Err(e) => match e.downcast::<crate::GcHeapOutOfMemory<T>>() {
Ok(oom) => {
let (value, oom) = oom.take_inner();
self.gc(limiter, None, Some(oom.bytes_needed()), asyncness)
.await;
alloc_func(self, value)
let bytes_needed = oom.bytes_needed();

// Determine whether to collect or grow first.
let should_collect_first = self.gc_store.as_ref().map_or(false, |gc_store| {
let capacity = gc_store.gc_heap_capacity();
let last_usage = gc_store.last_post_gc_allocated_bytes.unwrap_or(0);
last_usage < capacity / 2
});

if should_collect_first {
// Collect first, then retry.
self.gc(limiter.as_deref_mut(), None, None, asyncness).await;

match alloc_func(self, value) {
Ok(x) => Ok(x),
Err(e) => match e.downcast::<crate::GcHeapOutOfMemory<T>>() {
Ok(oom2) => {
// Collection wasn't enough; grow and try
// one final time.
let (value, _) = oom2.take_inner();
// Ignore error; we'll get one
// from `alloc_func` below if
// growth failed and failure to
// grow was fatal.
let _ = self.grow_gc_heap(limiter, bytes_needed).await;
alloc_func(self, value)
}
Err(e) => Err(e),
},
}
} else {
// Grow first and retry.
//
// Ignore error; we'll get one from
// `alloc_func` below if growth failed and
// failure to grow was fatal.
let _ = self.grow_gc_heap(limiter, bytes_needed).await;
alloc_func(self, value)
}
}
Err(e) => Err(e),
},
Expand Down
12 changes: 12 additions & 0 deletions crates/wasmtime/src/runtime/vm/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ pub struct GcStore {
/// The function-references table for this GC heap.
pub func_ref_table: FuncRefTable,

/// The total allocated bytes recorded after the last GC collection.
/// `None` if no collection has been performed yet. Used by the
/// grow-or-collect heuristic.
pub last_post_gc_allocated_bytes: Option<usize>,

/// An allocation counter that triggers GC when it reaches zero.
///
/// Initialized from the `WASMTIME_GC_ZEAL_ALLOC_COUNTER` environment
Expand Down Expand Up @@ -86,6 +91,7 @@ impl GcStore {
gc_heap,
host_data_table,
func_ref_table,
last_post_gc_allocated_bytes: None,
#[cfg(all(gc_zeal, feature = "std"))]
gc_zeal_alloc_counter: gc_zeal_alloc_counter_init,
#[cfg(all(gc_zeal, feature = "std"))]
Expand All @@ -98,10 +104,16 @@ impl GcStore {
self.gc_heap.vmmemory()
}

/// Get the current capacity (in bytes) of this GC heap.
pub fn gc_heap_capacity(&self) -> usize {
self.gc_heap.heap_slice().len()
}

/// Asynchronously perform garbage collection within this heap.
pub async fn gc(&mut self, asyncness: Asyncness, roots: GcRootsIter<'_>) {
let collection = self.gc_heap.gc(roots, &mut self.host_data_table);
collect_async(collection, asyncness).await;
self.last_post_gc_allocated_bytes = Some(self.gc_heap.allocated_bytes());
}

/// Get the kind of the given GC reference.
Expand Down
12 changes: 12 additions & 0 deletions crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,9 @@ struct DrcHeap {
/// behind an empty vec instead of `None`) but we keep it because it will
/// help us catch unexpected re-entry, similar to how a `RefCell` would.
dec_ref_stack: Option<Vec<VMGcRef>>,

/// Running total of bytes currently allocated (live objects) in this heap.
allocated_bytes: usize,
}

impl DrcHeap {
Expand All @@ -167,6 +170,7 @@ impl DrcHeap {
vmmemory: None,
free_list: None,
dec_ref_stack: Some(Vec::with_capacity(1)),
allocated_bytes: 0,
})
}

Expand All @@ -186,6 +190,7 @@ impl DrcHeap {
self.heap_slice_mut()[index..][..layout.size()].fill(POISON);
}

self.allocated_bytes -= layout.size();
self.free_list.as_mut().unwrap().dealloc(index, layout);
}

Expand Down Expand Up @@ -794,6 +799,7 @@ unsafe impl GcHeap for DrcHeap {
dec_ref_stack,
memory,
vmmemory,
allocated_bytes,

// NB: we will only ever be reused with the same engine, so no need
// to clear out our tracing info just to fill it back in with the
Expand All @@ -805,6 +811,7 @@ unsafe impl GcHeap for DrcHeap {
**over_approximated_stack_roots = None;
*free_list = None;
*vmmemory = None;
*allocated_bytes = 0;
debug_assert!(dec_ref_stack.as_ref().is_some_and(|s| s.is_empty()));

memory.take().unwrap()
Expand Down Expand Up @@ -959,6 +966,7 @@ unsafe impl GcHeap for DrcHeap {
next_over_approximated_stack_root: None,
object_size,
};
self.allocated_bytes += layout.size();
log::trace!("new object: increment {gc_ref:#p} ref count -> 1");
Ok(Ok(gc_ref))
}
Expand Down Expand Up @@ -1016,6 +1024,10 @@ unsafe impl GcHeap for DrcHeap {
.length
}

fn allocated_bytes(&self) -> usize {
self.allocated_bytes
}

fn gc<'a>(
&'a mut self,
roots: GcRootsIter<'a>,
Expand Down
7 changes: 7 additions & 0 deletions crates/wasmtime/src/runtime/vm/gc/enabled/null.rs
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,13 @@ unsafe impl GcHeap for NullHeap {
self.index(arrayref).length
}

fn allocated_bytes(&self) -> usize {
// The null collector never frees, so everything from the start of
// the heap up to the bump pointer is allocated.
let next = unsafe { *self.next.get() };
usize::try_from(next.get()).unwrap()
}

fn gc<'a>(
&'a mut self,
_roots: GcRootsIter<'a>,
Expand Down
6 changes: 6 additions & 0 deletions crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,12 @@ pub unsafe trait GcHeap: 'static + Send + Sync {
////////////////////////////////////////////////////////////////////////////
// Garbage Collection Methods

/// Get the total number of bytes currently allocated (live or
/// dead-but-not-collected) in this heap.
///
/// This is distinct from the heap capacity.
fn allocated_bytes(&self) -> usize;

/// Start a new garbage collection process.
///
/// The given `roots` are GC roots and should not be collected (nor anything
Expand Down
54 changes: 54 additions & 0 deletions tests/all/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1877,3 +1877,57 @@ fn select_gc_ref_stack_map() -> Result<()> {

Ok(())
}

#[test]
#[cfg_attr(miri, ignore)]
fn gc_heap_does_not_grow_unboundedly() -> Result<()> {
let _ = env_logger::try_init();

let mut config = Config::new();
config.wasm_function_references(true);
config.wasm_gc(true);
config.collector(Collector::DeferredReferenceCounting);

let engine = Engine::new(&config)?;

let module = Module::new(
&engine,
r#"
(module
(type $small (struct (field i32)))
(import "" "check" (func $check))

(func (export "run") (param i32)
(local $i i32)
(local $tmp (ref null $small))
(loop $loop
(local.set $tmp (struct.new $small (i32.const 42)))

;; Call the host to check heap size.
(call $check)

;; Loop counter.
(local.set $i (i32.add (local.get $i) (i32.const 1)))
(br_if $loop (i32.lt_u (local.get $i) (local.get 0)))
)
)
)
"#,
)?;

let mut store = Store::new(&engine, ());

let check = Func::wrap(&mut store, |caller: Caller<'_, _>| {
let heap_size = caller.gc_heap_capacity();
assert!(
heap_size <= 65536,
"GC heap grew too large: {heap_size} bytes (limit: 64KiB)"
);
});

let instance = Instance::new(&mut store, &module, &[check.into()])?;
let run = instance.get_typed_func::<(i32,), ()>(&mut store, "run")?;
run.call(&mut store, (100_000,))?;

Ok(())
}
Loading