wingo pushed a commit to branch wip-whippet
in repository guile.

commit ba65e32b00c1df50a739c2e745920ad5e58dedb7
Author: Andy Wingo <wi...@igalia.com>
AuthorDate: Mon Jan 13 17:22:43 2025 +0100

    pcc / copy-space: Allow allocations to fail
    
    This fixes an issue in which minor collection of a nursery full of live
    data can fail because of fragmentation, whereas really it should just
    fall back to promotion.
---
 src/copy-space.h | 59 +++++++++++++++++++++---------------
 src/pcc.c        | 92 ++++++++++++++++++++++++++++++++++++++++++--------------
 2 files changed, 104 insertions(+), 47 deletions(-)

diff --git a/src/copy-space.h b/src/copy-space.h
index b866d0ff6..d32de0298 100644
--- a/src/copy-space.h
+++ b/src/copy-space.h
@@ -149,6 +149,17 @@ struct copy_space {
   size_t nslabs;
 };
 
+enum copy_space_forward_result {
+  // We went to forward an edge, but the target was already forwarded, so we
+  // just updated the edge.
+  COPY_SPACE_FORWARD_UPDATED,
+  // We went to forward an edge and evacuated the referent to a new location.
+  COPY_SPACE_FORWARD_EVACUATED,
+  // We went to forward an edge but failed to acquire memory for its new
+  // location.
+  COPY_SPACE_FORWARD_FAILED,
+};
+
 struct copy_space_allocator {
   uintptr_t hp;
   uintptr_t limit;
@@ -473,9 +484,7 @@ copy_space_allocator_release_partly_full_block(struct 
copy_space_allocator *allo
 static inline struct gc_ref
 copy_space_allocate(struct copy_space_allocator *alloc,
                     struct copy_space *space,
-                    size_t size,
-                    void (*get_more_empty_blocks)(void *data),
-                    void *data) {
+                    size_t size) {
   GC_ASSERT(size > 0);
   GC_ASSERT(size <= gc_allocator_large_threshold());
   size = align_up(size, gc_allocator_small_granule_size());
@@ -490,8 +499,8 @@ copy_space_allocate(struct copy_space_allocator *alloc,
       goto done;
     copy_space_allocator_release_full_block(alloc, space);
   }
-  while (!copy_space_allocator_acquire_empty_block(alloc, space))
-    get_more_empty_blocks(data);
+  if (!copy_space_allocator_acquire_empty_block(alloc, space))
+    return gc_ref_null();
   // The newly acquired block is empty and is therefore large enough for
   // a small allocation.
 
@@ -588,12 +597,13 @@ copy_space_gc_during_evacuation(void *data) {
   GC_CRASH();
 }
 
-static inline int
+static inline enum copy_space_forward_result
 copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
                           struct gc_ref old_ref,
                           struct copy_space_allocator *alloc) {
   struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
 
+retry:
   if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
     gc_atomic_forward_acquire(&fwd);
 
@@ -605,33 +615,34 @@ copy_space_forward_atomic(struct copy_space *space, 
struct gc_edge edge,
   case GC_FORWARDING_STATE_ACQUIRED: {
     // We claimed the object successfully; evacuating is up to us.
     size_t bytes = gc_atomic_forward_object_size(&fwd);
-    struct gc_ref new_ref =
-      copy_space_allocate(alloc, space, bytes,
-                          copy_space_gc_during_evacuation, NULL);
+    struct gc_ref new_ref = copy_space_allocate(alloc, space, bytes);
+    if (gc_ref_is_null(new_ref)) {
+      gc_atomic_forward_abort(&fwd);
+      return COPY_SPACE_FORWARD_FAILED;
+    }
     // Copy object contents before committing, as we don't know what
     // part of the object (if any) will be overwritten by the
     // commit.
     memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), bytes);
     gc_atomic_forward_commit(&fwd, new_ref);
     gc_edge_update(edge, new_ref);
-    return 1;
+    return COPY_SPACE_FORWARD_EVACUATED;
   }
   case GC_FORWARDING_STATE_BUSY:
     // Someone else claimed this object first.  Spin until new address
     // known, or evacuation aborts.
     for (size_t spin_count = 0;; spin_count++) {
       if (gc_atomic_forward_retry_busy(&fwd))
-        break;
+        goto retry;
       yield_for_spin(spin_count);
     }
-    GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
-    // Fall through.
+    GC_CRASH(); // Unreachable.
   case GC_FORWARDING_STATE_FORWARDED:
     // The object has been evacuated already.  Update the edge;
     // whoever forwarded the object will make sure it's eventually
     // traced.
     gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
-    return 0;
+    return COPY_SPACE_FORWARD_UPDATED;
   }
 }
 
@@ -640,6 +651,7 @@ copy_space_forward_if_traced_atomic(struct copy_space 
*space,
                                     struct gc_edge edge,
                                     struct gc_ref old_ref) {
   struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
+retry:
   switch (fwd.state) {
   case GC_FORWARDING_STATE_NOT_FORWARDED:
     return 0;
@@ -648,11 +660,10 @@ copy_space_forward_if_traced_atomic(struct copy_space 
*space,
     // known.
     for (size_t spin_count = 0;; spin_count++) {
       if (gc_atomic_forward_retry_busy(&fwd))
-        break;
+        goto retry;
       yield_for_spin(spin_count);
     }
-    GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
-    // Fall through.
+    GC_CRASH(); // Unreachable.
   case GC_FORWARDING_STATE_FORWARDED:
     gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
     return 1;
@@ -661,24 +672,24 @@ copy_space_forward_if_traced_atomic(struct copy_space 
*space,
   }
 }
 
-static inline int
+static inline enum copy_space_forward_result
 copy_space_forward_nonatomic(struct copy_space *space, struct gc_edge edge,
                              struct gc_ref old_ref,
                              struct copy_space_allocator *alloc) {
   uintptr_t forwarded = gc_object_forwarded_nonatomic(old_ref);
   if (forwarded) {
     gc_edge_update(edge, gc_ref(forwarded));
-    return 0;
+    return COPY_SPACE_FORWARD_UPDATED;
   } else {
     size_t size;
     gc_trace_object(old_ref, NULL, NULL, NULL, &size);
-    struct gc_ref new_ref =
-      copy_space_allocate(alloc, space, size,
-                          copy_space_gc_during_evacuation, NULL);
+    struct gc_ref new_ref = copy_space_allocate(alloc, space, size);
+    if (gc_ref_is_null(new_ref))
+      return COPY_SPACE_FORWARD_FAILED;
     memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), size);
     gc_object_forward_nonatomic(old_ref, new_ref);
     gc_edge_update(edge, new_ref);
-    return 1;
+    return COPY_SPACE_FORWARD_EVACUATED;
   }
 }
 
@@ -694,7 +705,7 @@ copy_space_forward_if_traced_nonatomic(struct copy_space 
*space,
   return 0;
 }
 
-static inline int
+static inline enum copy_space_forward_result
 copy_space_forward(struct copy_space *src_space, struct copy_space *dst_space,
                    struct gc_edge edge,
                    struct gc_ref old_ref,
diff --git a/src/pcc.c b/src/pcc.c
index ff10375ef..422276afd 100644
--- a/src/pcc.c
+++ b/src/pcc.c
@@ -287,6 +287,31 @@ static inline int edge_is_from_survivor(struct gc_heap 
*heap,
   return copy_space_contains_edge_aligned(heap_new_space(heap), edge);
 }
 
+static inline int forward(struct copy_space *src_space,
+                          struct copy_space *dst_space,
+                          struct gc_edge edge,
+                          struct gc_ref ref,
+                          struct copy_space_allocator *dst_alloc) {
+  switch (copy_space_forward(src_space, dst_space, edge, ref, dst_alloc)) {
+  case COPY_SPACE_FORWARD_UPDATED:
+    return 0;
+  case COPY_SPACE_FORWARD_EVACUATED:
+    return 1;
+  case COPY_SPACE_FORWARD_FAILED:
+    // If space is really tight and reordering of objects during evacuation
+    // resulted in more end-of-block fragmentation and thus block use than
+    // before collection started, we can actually run out of memory while
+    // collecting.  We should probably attempt to expand the heap here, at
+    // least by a single block; it's better than the alternatives.  For now,
+    // abort.
+    fprintf(stderr, "Out of memory\n");
+    GC_CRASH();
+    break;
+  default:
+    GC_CRASH();
+  }
+}
+
 static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
                                  struct gc_ref ref,
                                  struct gc_trace_worker_data *data) {
@@ -324,16 +349,32 @@ static inline int do_minor_trace(struct gc_heap *heap, 
struct gc_edge edge,
     // However however, it is hard to distinguish between edges from promoted
     // objects and edges from old objects, so we mostly just rely on an
     // idempotent "log if unlogged" operation instead.
-    int promote = copy_space_should_promote(new_space, ref);
-    struct copy_space *dst_space = promote ? old_space : new_space;
-    struct copy_space_allocator *alloc = promote
-      ? trace_worker_old_space_allocator(data)
-      : trace_worker_new_space_allocator(data);
-    // Update the remembered set for promoted-to-survivor edges.
-    if (!promote && !edge_is_from_survivor(heap, edge)
-        && remember_edge_to_survivor_object(heap, edge))
-      gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
-    return copy_space_forward(new_space, dst_space, edge, ref, alloc);
+    if (!copy_space_should_promote(new_space, ref)) {
+      // Try to leave the object in newspace as a survivor.  If the edge is 
from
+      // a promoted object, we will need to add it to the remembered set.
+      if (!edge_is_from_survivor(heap, edge)
+          && remember_edge_to_survivor_object(heap, edge)) {
+        // Log the edge even though in rare conditions the referent could end 
up
+        // being promoted by us (if we run out of newspace) or a remote
+        // evacuation thread (if they run out of newspace).
+        gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
+      }
+      switch (copy_space_forward(new_space, new_space, edge, ref,
+                                 trace_worker_new_space_allocator(data))) {
+      case COPY_SPACE_FORWARD_UPDATED:
+        return 0;
+      case COPY_SPACE_FORWARD_EVACUATED:
+        return 1;
+      case COPY_SPACE_FORWARD_FAILED:
+        // Ran out of newspace!  Fall through to promote instead.
+        break;
+      default:
+        GC_CRASH();
+      }
+    }
+    // Promote the object.
+    return forward(new_space, old_space, edge, ref,
+                   trace_worker_old_space_allocator(data));
   } else {
     // Note that although the target of the edge might not be in lospace, this
     // will do what we want and return 1 if and only if ref is was a young
@@ -354,16 +395,16 @@ static inline int do_trace(struct gc_heap *heap, struct 
gc_edge edge,
     struct copy_space *new_space = heap_new_space(heap);
     struct copy_space *old_space = heap_old_space(heap);
     if (new_space_contains(heap, ref))
-      return copy_space_forward(new_space, old_space, edge, ref,
-                                trace_worker_old_space_allocator(data));
+      return forward(new_space, old_space, edge, ref,
+                     trace_worker_old_space_allocator(data));
     if (old_space_contains(heap, ref))
-      return copy_space_forward(old_space, old_space, edge, ref,
-                                trace_worker_old_space_allocator(data));
+      return forward(old_space, old_space, edge, ref,
+                     trace_worker_old_space_allocator(data));
   } else {
     if (GC_LIKELY(copy_space_contains(heap_mono_space(heap), ref)))
-      return copy_space_forward(heap_mono_space(heap), heap_mono_space(heap),
-                                edge, ref,
-                                trace_worker_mono_space_allocator(data));
+      return forward(heap_mono_space(heap), heap_mono_space(heap),
+                     edge, ref,
+                     trace_worker_mono_space_allocator(data));
   }
 
   // Fall through for objects in large or extern spaces.
@@ -916,12 +957,17 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t 
size) {
   if (size > gc_allocator_large_threshold())
     return allocate_large(mut, size);
 
-  struct gc_ref ret =
-    copy_space_allocate(&mut->allocator,
-                        heap_allocation_space(mutator_heap(mut)),
-                        size,
-                        get_more_empty_blocks_for_mutator,
-                        mut);
+  struct gc_ref ret;
+  while (1) {
+    ret = copy_space_allocate(&mut->allocator,
+                              heap_allocation_space(mutator_heap(mut)),
+                              size);
+    if (gc_ref_is_null(ret))
+      trigger_collection(mut, GC_COLLECTION_MINOR);
+    else
+      break;
+  }
+
   gc_clear_fresh_allocation(ret, size);
   return gc_ref_heap_object(ret);
 }

Reply via email to