wingo pushed a commit to branch wip-whippet
in repository guile.

commit 0c7bdacc5140ce0d05409015d13aa7a188011f89
Author: Andy Wingo <wi...@igalia.com>
AuthorDate: Wed Oct 18 10:28:46 2023 +0200

    Revert "Allow embedder to request a major GC"
    
    This reverts commit f39e6ee69f0aa2e14227a019a5332ba129418977.
---
 api/gc-api.h                 |   4 +-
 api/gc-basic-stats.h         |   5 +-
 api/gc-collection-kind.h     |  11 ----
 api/gc-event-listener.h      |   4 +-
 api/gc-null-event-listener.h |   3 +-
 benchmarks/ephemerons.c      |   3 +-
 src/bdw.c                    |  22 ++------
 src/semi.c                   |   6 +--
 src/whippet.c                | 116 ++++++++++++++++++++-----------------------
 9 files changed, 70 insertions(+), 104 deletions(-)

diff --git a/api/gc-api.h b/api/gc-api.h
index 4831500fe..c66959321 100644
--- a/api/gc-api.h
+++ b/api/gc-api.h
@@ -4,7 +4,6 @@
 #include "gc-config.h"
 #include "gc-assert.h"
 #include "gc-attrs.h"
-#include "gc-collection-kind.h"
 #include "gc-edge.h"
 #include "gc-event-listener.h"
 #include "gc-inline.h"
@@ -48,8 +47,7 @@ GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
 GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
                                  void *data) GC_NEVER_INLINE;
 
-GC_API_ void gc_collect(struct gc_mutator *mut,
-                        enum gc_collection_kind requested_kind);
+GC_API_ void gc_collect(struct gc_mutator *mut);
 
 static inline void gc_clear_fresh_allocation(struct gc_ref obj,
                                              size_t size) GC_ALWAYS_INLINE;
diff --git a/api/gc-basic-stats.h b/api/gc-basic-stats.h
index 0cf927b95..8e57e40f1 100644
--- a/api/gc-basic-stats.h
+++ b/api/gc-basic-stats.h
@@ -37,9 +37,10 @@ static inline void gc_basic_stats_init(void *data, size_t 
heap_size) {
 }
 
 static inline void gc_basic_stats_prepare_gc(void *data,
-                                             enum gc_collection_kind kind) {
+                                             int is_minor,
+                                             int is_compacting) {
   struct gc_basic_stats *stats = data;
-  if (kind == GC_COLLECTION_MINOR)
+  if (is_minor)
     stats->minor_collection_count++;
   else
     stats->major_collection_count++;
diff --git a/api/gc-collection-kind.h b/api/gc-collection-kind.h
deleted file mode 100644
index 11cfc276a..000000000
--- a/api/gc-collection-kind.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef GC_COLLECTION_KIND_H
-#define GC_COLLECTION_KIND_H
-
-enum gc_collection_kind {
-  GC_COLLECTION_ANY,
-  GC_COLLECTION_MINOR,
-  GC_COLLECTION_MAJOR,
-  GC_COLLECTION_COMPACTING,
-};
-
-#endif // GC_COLLECTION_KIND_H
diff --git a/api/gc-event-listener.h b/api/gc-event-listener.h
index 25558838b..57df09719 100644
--- a/api/gc-event-listener.h
+++ b/api/gc-event-listener.h
@@ -1,11 +1,9 @@
 #ifndef GC_EVENT_LISTENER_H
 #define GC_EVENT_LISTENER_H
 
-#include "gc-collection-kind.h"
-
 struct gc_event_listener {
   void (*init)(void *data, size_t heap_size);
-  void (*prepare_gc)(void *data, enum gc_collection_kind kind);
+  void (*prepare_gc)(void *data, int is_minor, int is_compacting);
   void (*requesting_stop)(void *data);
   void (*waiting_for_stop)(void *data);
   void (*mutators_stopped)(void *data);
diff --git a/api/gc-null-event-listener.h b/api/gc-null-event-listener.h
index 5ca17975e..7060bd729 100644
--- a/api/gc-null-event-listener.h
+++ b/api/gc-null-event-listener.h
@@ -5,7 +5,8 @@
 
 static inline void gc_null_event_listener_init(void *data, size_t size) {}
 static inline void gc_null_event_listener_prepare_gc(void *data,
-                                                     enum gc_collection_kind) 
{}
+                                               int is_minor,
+                                               int is_compacting) {}
 static inline void gc_null_event_listener_requesting_stop(void *data) {}
 static inline void gc_null_event_listener_waiting_for_stop(void *data) {}
 static inline void gc_null_event_listener_mutators_stopped(void *data) {}
diff --git a/benchmarks/ephemerons.c b/benchmarks/ephemerons.c
index 2262bd5c9..2193f1fe0 100644
--- a/benchmarks/ephemerons.c
+++ b/benchmarks/ephemerons.c
@@ -105,8 +105,7 @@ static double heap_multiplier;
 static size_t nthreads;
 
 static void cause_gc(struct gc_mutator *mut) {
-  // Doing a full collection lets us reason precisely about liveness.
-  gc_collect(mut, GC_COLLECTION_MAJOR);
+  gc_collect(mut);
 }
 
 static void make_ephemeron_chain(struct thread *t, EphemeronHandle *head,
diff --git a/src/bdw.c b/src/bdw.c
index 186b50a8c..809ad8808 100644
--- a/src/bdw.c
+++ b/src/bdw.c
@@ -125,22 +125,8 @@ void* gc_allocate_pointerless(struct gc_mutator *mut,
   return GC_malloc_atomic(size);
 }
 
-void gc_collect(struct gc_mutator *mut,
-                enum gc_collection_kind requested_kind) {
-  switch (requested_kind) {
-  case GC_COLLECTION_MINOR:
-    GC_collect_a_little();
-    break;
-  case GC_COLLECTION_ANY:
-  case GC_COLLECTION_MAJOR:
-    GC_gcollect();
-    break;
-  case GC_COLLECTION_COMPACTING:
-    GC_gcollect_and_unmap();
-    break;
-  default:
-    GC_CRASH();
-  }
+void gc_collect(struct gc_mutator *mut) {
+  GC_gcollect();
 }
 
 void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
@@ -344,7 +330,9 @@ gc_heap_pending_ephemerons(struct gc_heap *heap) {
 static void on_collection_event(GC_EventType event) {
   switch (event) {
   case GC_EVENT_START: {
-    HEAP_EVENT(prepare_gc, GC_COLLECTION_MAJOR);
+    int is_minor = 0;
+    int is_compacting = 0;
+    HEAP_EVENT(prepare_gc, is_minor, is_compacting);
     HEAP_EVENT(requesting_stop);
     HEAP_EVENT(waiting_for_stop);
     break;
diff --git a/src/semi.c b/src/semi.c
index fdcd03792..7097b2d49 100644
--- a/src/semi.c
+++ b/src/semi.c
@@ -354,7 +354,7 @@ static void collect(struct gc_mutator *mut, size_t 
for_alloc) {
   struct gc_heap *heap = mutator_heap(mut);
   int is_minor = 0;
   int is_compacting = 1;
-  HEAP_EVENT(heap, prepare_gc, GC_COLLECTION_COMPACTING);
+  HEAP_EVENT(heap, prepare_gc, is_minor, is_compacting);
 
   HEAP_EVENT(heap, requesting_stop);
   HEAP_EVENT(heap, waiting_for_stop);
@@ -414,9 +414,7 @@ static void collect_for_alloc(struct gc_mutator *mut, 
size_t bytes) {
   GC_CRASH();
 }
 
-void gc_collect(struct gc_mutator *mut,
-                enum gc_collection_kind requested_kind) {
-  // Ignore requested kind, because we always compact.
+void gc_collect(struct gc_mutator *mut) {
   collect(mut, 0);
 }
 
diff --git a/src/whippet.c b/src/whippet.c
index 4c58db275..1ece0d3ff 100644
--- a/src/whippet.c
+++ b/src/whippet.c
@@ -288,6 +288,15 @@ struct mark_space {
   uintptr_t fragmentation_granules_since_last_collection; // atomically
 };
 
+enum gc_kind {
+  GC_KIND_FLAG_MINOR = GC_GENERATIONAL, // 0 or 1
+  GC_KIND_FLAG_EVACUATING = 0x2,
+  GC_KIND_MINOR_IN_PLACE = GC_KIND_FLAG_MINOR,
+  GC_KIND_MINOR_EVACUATING = GC_KIND_FLAG_MINOR | GC_KIND_FLAG_EVACUATING,
+  GC_KIND_MAJOR_IN_PLACE = 0,
+  GC_KIND_MAJOR_EVACUATING = GC_KIND_FLAG_EVACUATING,
+};
+
 struct gc_heap {
   struct mark_space mark_space;
   struct large_object_space large_object_space;
@@ -301,7 +310,7 @@ struct gc_heap {
   int mark_while_stopping;
   int check_pending_ephemerons;
   struct gc_pending_ephemerons *pending_ephemerons;
-  enum gc_collection_kind gc_kind;
+  enum gc_kind gc_kind;
   int multithreaded;
   size_t active_mutator_count;
   size_t mutator_count;
@@ -309,7 +318,7 @@ struct gc_heap {
   struct gc_mutator *mutator_trace_list;
   long count;
   long minor_count;
-  enum gc_collection_kind last_collection_kind;
+  uint8_t last_collection_was_minor;
   struct gc_mutator *deactivated_mutators;
   struct tracer tracer;
   double fragmentation_low_threshold;
@@ -371,8 +380,7 @@ static inline void clear_memory(uintptr_t addr, size_t 
size) {
   memset((char*)addr, 0, size);
 }
 
-static void collect(struct gc_mutator *mut,
-                    enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
+static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
 
 static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
   GC_ASSERT(((uintptr_t)mark & 7) == 0);
@@ -1408,7 +1416,7 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t 
obj_size,
 
 static void trace_generational_roots(struct gc_heap *heap) {
   // TODO: Add lospace nursery.
-  if (atomic_load(&heap->gc_kind) == GC_COLLECTION_MINOR) {
+  if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
     mark_space_trace_remembered_set(heap_mark_space(heap), heap);
     large_object_space_trace_remembered_set(heap_large_object_space(heap),
                                             enqueue_generational_root,
@@ -1572,12 +1580,10 @@ static double clamp_major_gc_yield_threshold(struct 
gc_heap *heap,
   return threshold;
 }
 
-static enum gc_collection_kind
-determine_collection_kind(struct gc_heap *heap,
-                          enum gc_collection_kind requested) {
+static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
   struct mark_space *mark_space = heap_mark_space(heap);
-  enum gc_collection_kind previous_gc_kind = atomic_load(&heap->gc_kind);
-  enum gc_collection_kind gc_kind;
+  enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
+  enum gc_kind gc_kind;
   int mark_while_stopping = 1;
   double yield = heap_last_gc_yield(heap);
   double fragmentation = heap_fragmentation(heap);
@@ -1586,16 +1592,13 @@ determine_collection_kind(struct gc_heap *heap,
 
   if (heap->count == 0) {
     DEBUG("first collection is always major\n");
-    gc_kind = GC_COLLECTION_MAJOR;
-  } else if (requested != GC_COLLECTION_ANY) {
-    DEBUG("user specifically requested collection kind %d\n", (int)requested);
-    gc_kind = requested;
+    gc_kind = GC_KIND_MAJOR_IN_PLACE;
   } else if (pending > 0) {
     DEBUG("evacuating due to need to reclaim %zd bytes\n", pending);
     // During the last cycle, a large allocation could not find enough
     // free blocks, and we decided not to expand the heap.  Let's do an
     // evacuating major collection to maximize the free block yield.
-    gc_kind = GC_COLLECTION_COMPACTING;
+    gc_kind = GC_KIND_MAJOR_EVACUATING;
 
     // Generally speaking, we allow mutators to mark their own stacks
     // before pausing.  This is a limited form of concurrent marking, as
@@ -1607,7 +1610,7 @@ determine_collection_kind(struct gc_heap *heap,
     // marking.  Of course if the mutator has conservative roots we will
     // have pinning anyway and might as well allow ragged stops.
     mark_while_stopping = gc_has_conservative_roots();
-  } else if (previous_gc_kind == GC_COLLECTION_COMPACTING
+  } else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
              && fragmentation >= heap->fragmentation_low_threshold) {
     DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
           fragmentation * 100.,
@@ -1615,50 +1618,46 @@ determine_collection_kind(struct gc_heap *heap,
     // For some reason, we already decided to compact in the past,
     // and fragmentation hasn't yet fallen below a low-water-mark.
     // Keep going.
-    gc_kind = GC_COLLECTION_COMPACTING;
+    gc_kind = GC_KIND_MAJOR_EVACUATING;
   } else if (fragmentation > heap->fragmentation_high_threshold) {
     // Switch to evacuation mode if the heap is too fragmented.
     DEBUG("triggering compaction due to fragmentation %.2f%% > %.2f%%\n",
           fragmentation * 100.,
           heap->fragmentation_high_threshold * 100.);
-    gc_kind = GC_COLLECTION_COMPACTING;
-  } else if (previous_gc_kind == GC_COLLECTION_COMPACTING) {
+    gc_kind = GC_KIND_MAJOR_EVACUATING;
+  } else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING) {
     // We were evacuating, but we're good now.  Go back to minor
     // collections.
     DEBUG("returning to in-place collection, fragmentation %.2f%% < %.2f%%\n",
           fragmentation * 100.,
           heap->fragmentation_low_threshold * 100.);
-    gc_kind = GC_GENERATIONAL ? GC_COLLECTION_MINOR : GC_COLLECTION_MAJOR;
-  } else if (!GC_GENERATIONAL) {
-    DEBUG("keeping on with major in-place GC\n");
-    GC_ASSERT(previous_gc_kind == GC_COLLECTION_MAJOR);
-    gc_kind = GC_COLLECTION_MAJOR;
-  } else if (previous_gc_kind != GC_COLLECTION_MINOR) {
+    gc_kind = GC_KIND_MINOR_IN_PLACE;
+  } else if (previous_gc_kind != GC_KIND_MINOR_IN_PLACE) {
     DEBUG("returning to minor collection after major collection\n");
     // Go back to minor collections.
-    gc_kind = GC_COLLECTION_MINOR;
+    gc_kind = GC_KIND_MINOR_IN_PLACE;
   } else if (yield < heap->major_gc_yield_threshold) {
     DEBUG("collection yield too low, triggering major collection\n");
     // Nursery is getting tight; trigger a major GC.
-    gc_kind = GC_COLLECTION_MAJOR;
+    gc_kind = GC_KIND_MAJOR_IN_PLACE;
   } else {
     DEBUG("keeping on with minor GC\n");
     // Nursery has adequate space; keep trucking with minor GCs.
-    GC_ASSERT(previous_gc_kind == GC_COLLECTION_MINOR);
-    gc_kind = GC_COLLECTION_MINOR;
+    GC_ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE);
+    gc_kind = GC_KIND_MINOR_IN_PLACE;
   }
 
   if (gc_has_conservative_intraheap_edges() &&
-      gc_kind == GC_COLLECTION_COMPACTING) {
+      (gc_kind & GC_KIND_FLAG_EVACUATING)) {
     DEBUG("welp.  conservative heap scanning, no evacuation for you\n");
-    gc_kind = GC_COLLECTION_MAJOR;
+    gc_kind = GC_KIND_MAJOR_IN_PLACE;
     mark_while_stopping = 1;
   }
 
   // If this is the first in a series of minor collections, reset the
   // threshold at which we should do a major GC.
-  if (gc_kind == GC_COLLECTION_MINOR &&
-      previous_gc_kind != GC_COLLECTION_MINOR) {
+  if ((gc_kind & GC_KIND_FLAG_MINOR) &&
+      (previous_gc_kind & GC_KIND_FLAG_MINOR) != GC_KIND_FLAG_MINOR) {
     double yield = heap_last_gc_yield(heap);
     double threshold = yield * heap->minor_gc_yield_threshold;
     double clamped = clamp_major_gc_yield_threshold(heap, threshold);
@@ -1688,7 +1687,7 @@ static void release_evacuation_target_blocks(struct 
mark_space *space) {
 static void prepare_for_evacuation(struct gc_heap *heap) {
   struct mark_space *space = heap_mark_space(heap);
 
-  if (heap->gc_kind != GC_COLLECTION_COMPACTING) {
+  if ((heap->gc_kind & GC_KIND_FLAG_EVACUATING) == 0) {
     space->evacuating = 0;
     space->evacuation_reserve = space->evacuation_minimum_reserve;
     return;
@@ -1797,7 +1796,7 @@ static void trace_roots_after_stop(struct gc_heap *heap) {
 }
 
 static void mark_space_finish_gc(struct mark_space *space,
-                                 enum gc_collection_kind gc_kind) {
+                                 enum gc_kind gc_kind) {
   space->evacuating = 0;
   reset_sweeper(space);
   update_mark_patterns(space, 0);
@@ -1825,8 +1824,7 @@ static void sweep_ephemerons(struct gc_heap *heap) {
   return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
 }
 
-static void collect(struct gc_mutator *mut,
-                    enum gc_collection_kind requested_kind) {
+static void collect(struct gc_mutator *mut) {
   struct gc_heap *heap = mutator_heap(mut);
   struct mark_space *space = heap_mark_space(heap);
   struct large_object_space *lospace = heap_large_object_space(heap);
@@ -1837,12 +1835,12 @@ static void collect(struct gc_mutator *mut,
   }
   MUTATOR_EVENT(mut, mutator_cause_gc);
   DEBUG("start collect #%ld:\n", heap->count);
-  enum gc_collection_kind gc_kind =
-    determine_collection_kind(heap, requested_kind);
-  HEAP_EVENT(heap, prepare_gc, gc_kind);
-  update_mark_patterns(space, gc_kind != GC_COLLECTION_MINOR);
-  large_object_space_start_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
-  gc_extern_space_start_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
+  enum gc_kind gc_kind = determine_collection_kind(heap);
+  HEAP_EVENT(heap, prepare_gc, gc_kind & GC_KIND_FLAG_MINOR,
+             gc_kind & GC_KIND_FLAG_EVACUATING);
+  update_mark_patterns(space, !(gc_kind & GC_KIND_FLAG_MINOR));
+  large_object_space_start_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
+  gc_extern_space_start_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
   resolve_ephemerons_lazily(heap);
   tracer_prepare(heap);
   HEAP_EVENT(heap, requesting_stop);
@@ -1870,11 +1868,11 @@ static void collect(struct gc_mutator *mut,
   sweep_ephemerons(heap);
   tracer_release(heap);
   mark_space_finish_gc(space, gc_kind);
-  large_object_space_finish_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
-  gc_extern_space_finish_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
+  large_object_space_finish_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
+  gc_extern_space_finish_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
   heap->count++;
-  heap->last_collection_kind = gc_kind;
-  if (gc_kind == GC_COLLECTION_MINOR)
+  heap->last_collection_was_minor = gc_kind & GC_KIND_FLAG_MINOR;
+  if (heap->last_collection_was_minor)
     heap->minor_count++;
   heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection);
   HEAP_EVENT(heap, restarting_mutators);
@@ -2081,7 +2079,7 @@ static size_t next_hole(struct gc_mutator *mut) {
           // mark bytes didn't rotate, so we have no cleanup to do; and
           // we shouldn't try to allocate into them as it's not worth
           // it.  Any wasted space is measured as fragmentation.
-          if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR)
+          if (mutator_heap(mut)->last_collection_was_minor)
             continue;
           else
             block_summary_clear_flag(summary, BLOCK_VENERABLE);
@@ -2094,7 +2092,7 @@ static size_t next_hole(struct gc_mutator *mut) {
             // mostly old data.  Sweep any garbage, commit the mark as
             // venerable, and avoid allocating into it.
             block_summary_clear_flag(summary, BLOCK_VENERABLE_AFTER_SWEEP);
-            if (mutator_heap(mut)->last_collection_kind == 
GC_COLLECTION_MINOR) {
+            if (mutator_heap(mut)->last_collection_was_minor) {
               finish_sweeping_in_block(mut);
               block_summary_set_flag(summary, BLOCK_VENERABLE);
               continue;
@@ -2155,22 +2153,18 @@ static void finish_sweeping(struct gc_mutator *mut) {
     finish_hole(mut);
 }
 
-static void trigger_collection(struct gc_mutator *mut,
-                               enum gc_collection_kind requested_kind) {
+static void trigger_collection(struct gc_mutator *mut) {
   struct gc_heap *heap = mutator_heap(mut);
-  enum gc_collection_kind last_collection_kind = GC_COLLECTION_ANY;
   heap_lock(heap);
-  while (mutators_are_stopping(heap)) {
+  if (mutators_are_stopping(heap))
     pause_mutator_for_collection_with_lock(mut);
-    last_collection_kind = heap->last_collection_kind;
-  }
-  if (last_collection_kind < requested_kind)
-    collect(mut, requested_kind);
+  else
+    collect(mut);
   heap_unlock(heap);
 }
 
-void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
-  trigger_collection(mut, kind);
+void gc_collect(struct gc_mutator *mut) {
+  trigger_collection(mut);
 }
 
 static void* allocate_large(struct gc_mutator *mut, size_t size) {
@@ -2183,7 +2177,7 @@ static void* allocate_large(struct gc_mutator *mut, 
size_t size) {
                                     npages << space->page_size_log2);
 
   while (!sweep_until_memory_released(mut))
-    trigger_collection(mut, GC_COLLECTION_COMPACTING);
+    trigger_collection(mut);
   atomic_fetch_add(&heap->large_object_pages, npages);
 
   void *ret = large_object_space_alloc(space, npages);
@@ -2221,7 +2215,7 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t 
size) {
         break;
       }
       if (!hole)
-        trigger_collection(mut, GC_COLLECTION_ANY);
+        trigger_collection(mut);
     }
     ret = gc_ref(mut->alloc);
     mut->alloc += size;

Reply via email to