wingo pushed a commit to branch wip-whippet
in repository guile.

commit 92b8f1e917b966241a6e5ce39ca6d76d1950a824
Author: Andy Wingo <wi...@igalia.com>
AuthorDate: Tue Aug 16 21:35:16 2022 +0200

    Add gc_ prefix to struct heap, struct mutator
---
 bdw.c                |  38 ++++-----
 gc-api.h             |  47 +++++------
 large-object-space.h |   4 +-
 mt-gcbench.c         |  32 ++++----
 parallel-tracer.h    |  20 ++---
 quads.c              |   8 +-
 semi.c               |  58 +++++++-------
 serial-tracer.h      |  16 ++--
 simple-allocator.h   |   4 +-
 whippet.c            | 214 +++++++++++++++++++++++++--------------------------
 10 files changed, 218 insertions(+), 223 deletions(-)

diff --git a/bdw.c b/bdw.c
index 7f3ec186b..9325726f5 100644
--- a/bdw.c
+++ b/bdw.c
@@ -39,14 +39,14 @@
    up to 256 bytes.  */
 #define GC_INLINE_FREELIST_COUNT (256U / GC_INLINE_GRANULE_BYTES)
 
-struct heap {
+struct gc_heap {
   pthread_mutex_t lock;
   int multithreaded;
 };
 
-struct mutator {
+struct gc_mutator {
   void *freelists[GC_INLINE_FREELIST_COUNT];
-  struct heap *heap;
+  struct gc_heap *heap;
 };
 
 static inline size_t gc_inline_bytes_to_freelist_index(size_t bytes) {
@@ -91,18 +91,18 @@ allocate_small(void **freelist, size_t idx, enum 
gc_inline_kind kind) {
   return head;
 }
 
-void* gc_allocate_large(struct mutator *mut, size_t size) {
+void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
   return GC_malloc(size);
 }
 
-void* gc_allocate_small(struct mutator *mut, size_t size) {
+void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
   GC_ASSERT(size != 0);
   GC_ASSERT(size <= gc_allocator_large_threshold());
   size_t idx = gc_inline_bytes_to_freelist_index(size);
   return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
 }
 
-void* gc_allocate_pointerless(struct mutator *mut,
+void* gc_allocate_pointerless(struct gc_mutator *mut,
                                             size_t size) {
   // Because the BDW API requires us to implement a custom marker so
   // that the pointerless freelist gets traced, even though it's in a
@@ -110,17 +110,17 @@ void* gc_allocate_pointerless(struct mutator *mut,
   return GC_malloc_atomic(size);
 }
 
-static inline void collect(struct mutator *mut) {
+static inline void collect(struct gc_mutator *mut) {
   GC_gcollect();
 }
 
-static inline struct mutator *add_mutator(struct heap *heap) {
-  struct mutator *ret = GC_malloc(sizeof(struct mutator));
+static inline struct gc_mutator *add_mutator(struct gc_heap *heap) {
+  struct gc_mutator *ret = GC_malloc(sizeof(struct gc_mutator));
   ret->heap = heap;
   return ret;
 }
 
-static inline struct heap *mutator_heap(struct mutator *mutator) {
+static inline struct gc_heap *mutator_heap(struct gc_mutator *mutator) {
   return mutator->heap;
 }
 
@@ -188,7 +188,7 @@ static int parse_options(int argc, struct gc_option argv[],
 }
 
 int gc_init(int argc, struct gc_option argv[],
-            struct heap **heap, struct mutator **mutator) {
+            struct gc_heap **heap, struct gc_mutator **mutator) {
   GC_ASSERT_EQ(gc_allocator_small_granule_size(), GC_INLINE_GRANULE_BYTES);
   GC_ASSERT_EQ(gc_allocator_large_threshold(),
                GC_INLINE_FREELIST_COUNT * GC_INLINE_GRANULE_BYTES);
@@ -212,14 +212,14 @@ int gc_init(int argc, struct gc_option argv[],
   if (options.fixed_heap_size > current_heap_size)
     GC_expand_hp(options.fixed_heap_size - current_heap_size);
   GC_allow_register_threads();
-  *heap = GC_malloc(sizeof(struct heap));
+  *heap = GC_malloc(sizeof(struct gc_heap));
   pthread_mutex_init(&(*heap)->lock, NULL);
   *mutator = add_mutator(*heap);
   return 1;
 }
 
-struct mutator* gc_init_for_thread(uintptr_t *stack_base,
-                                   struct heap *heap) {
+struct gc_mutator* gc_init_for_thread(uintptr_t *stack_base,
+                                   struct gc_heap *heap) {
   pthread_mutex_lock(&heap->lock);
   if (!heap->multithreaded) {
     GC_allow_register_threads();
@@ -231,23 +231,23 @@ struct mutator* gc_init_for_thread(uintptr_t *stack_base,
   GC_register_my_thread(&base);
   return add_mutator(heap);
 }
-void gc_finish_for_thread(struct mutator *mut) {
+void gc_finish_for_thread(struct gc_mutator *mut) {
   GC_unregister_my_thread();
 }
 
-void* gc_call_without_gc(struct mutator *mut,
+void* gc_call_without_gc(struct gc_mutator *mut,
                          void* (*f)(void*),
                          void *data) {
   return GC_do_blocking(f, data);
 }
 
-void gc_mutator_set_roots(struct mutator *mut,
+void gc_mutator_set_roots(struct gc_mutator *mut,
                           struct gc_mutator_roots *roots) {
 }
-void gc_heap_set_roots(struct heap *heap, struct gc_heap_roots *roots) {
+void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots *roots) {
 }
 
-void gc_print_stats(struct heap *heap) {
+void gc_print_stats(struct gc_heap *heap) {
   printf("Completed %ld collections\n", (long)GC_get_gc_no());
   printf("Heap size is %ld\n", (long)GC_get_heap_size());
 }
diff --git a/gc-api.h b/gc-api.h
index c00881f4f..2c4a636a1 100644
--- a/gc-api.h
+++ b/gc-api.h
@@ -12,9 +12,8 @@
 #include <stdint.h>
 #include <string.h>
 
-// FIXME: prefix with gc_
-struct heap;
-struct mutator;
+struct gc_heap;
+struct gc_mutator;
 
 enum {
   GC_OPTION_FIXED_HEAP_SIZE,
@@ -26,10 +25,6 @@ struct gc_option {
   double value;
 };
 
-struct gc_mutator {
-  void *user_data;
-};
-
 // FIXME: Conflict with bdw-gc GC_API.  Switch prefix?
 #ifndef GC_API_
 #define GC_API_ __attribute__((visibility("hidden")))
@@ -37,20 +32,20 @@ struct gc_mutator {
 
 GC_API_ int gc_option_from_string(const char *str);
 GC_API_ int gc_init(int argc, struct gc_option argv[],
-                    struct heap **heap, struct mutator **mutator);
+                    struct gc_heap **heap, struct gc_mutator **mutator);
 
 struct gc_mutator_roots;
 struct gc_heap_roots;
-GC_API_ void gc_mutator_set_roots(struct mutator *mut,
+GC_API_ void gc_mutator_set_roots(struct gc_mutator *mut,
                                   struct gc_mutator_roots *roots);
-GC_API_ void gc_heap_set_roots(struct heap *heap, struct gc_heap_roots *roots);
+GC_API_ void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots 
*roots);
 
-GC_API_ struct mutator* gc_init_for_thread(uintptr_t *stack_base,
-                                           struct heap *heap);
-GC_API_ void gc_finish_for_thread(struct mutator *mut);
-GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*),
+GC_API_ struct gc_mutator* gc_init_for_thread(uintptr_t *stack_base,
+                                           struct gc_heap *heap);
+GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
+GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
                                  void *data) GC_NEVER_INLINE;
-GC_API_ void gc_print_stats(struct heap *heap);
+GC_API_ void gc_print_stats(struct gc_heap *heap);
 
 static inline void gc_clear_fresh_allocation(struct gc_ref obj,
                                              size_t size) GC_ALWAYS_INLINE;
@@ -60,10 +55,10 @@ static inline void gc_clear_fresh_allocation(struct gc_ref 
obj,
   memset(gc_ref_heap_object(obj), 0, size);
 }
 
-static inline void gc_update_alloc_table(struct mutator *mut,
+static inline void gc_update_alloc_table(struct gc_mutator *mut,
                                          struct gc_ref obj,
                                          size_t size) GC_ALWAYS_INLINE;
-static inline void gc_update_alloc_table(struct mutator *mut,
+static inline void gc_update_alloc_table(struct gc_mutator *mut,
                                          struct gc_ref obj,
                                          size_t size) {
   size_t alignment = gc_allocator_alloc_table_alignment();
@@ -92,12 +87,12 @@ static inline void gc_update_alloc_table(struct mutator 
*mut,
   }
 }
 
-GC_API_ void* gc_allocate_small(struct mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
-GC_API_ void* gc_allocate_large(struct mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
+GC_API_ void* gc_allocate_small(struct gc_mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
+GC_API_ void* gc_allocate_large(struct gc_mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
 
 static inline void*
-gc_allocate_bump_pointer(struct mutator *mut, size_t size) GC_ALWAYS_INLINE;
-static inline void* gc_allocate_bump_pointer(struct mutator *mut, size_t size) 
{
+gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
+static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t 
size) {
   GC_ASSERT(size <= gc_allocator_large_threshold());
 
   size_t granule_size = gc_allocator_small_granule_size();
@@ -124,9 +119,9 @@ static inline void* gc_allocate_bump_pointer(struct mutator 
*mut, size_t size) {
   return (void*)hp;
 }
 
-static inline void* gc_allocate_freelist(struct mutator *mut,
+static inline void* gc_allocate_freelist(struct gc_mutator *mut,
                                          size_t size) GC_ALWAYS_INLINE;
-static inline void* gc_allocate_freelist(struct mutator *mut, size_t size) {
+static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) {
   GC_ASSERT(size <= gc_allocator_large_threshold());
 
   size_t freelist_offset = gc_allocator_freelist_offset(size);
@@ -145,8 +140,8 @@ static inline void* gc_allocate_freelist(struct mutator 
*mut, size_t size) {
   return head;
 }
 
-static inline void* gc_allocate(struct mutator *mut, size_t bytes) 
GC_ALWAYS_INLINE;
-static inline void* gc_allocate(struct mutator *mut, size_t size) {
+static inline void* gc_allocate(struct gc_mutator *mut, size_t bytes) 
GC_ALWAYS_INLINE;
+static inline void* gc_allocate(struct gc_mutator *mut, size_t size) {
   GC_ASSERT(size != 0);
   if (size > gc_allocator_large_threshold())
     return gc_allocate_large(mut, size);
@@ -164,7 +159,7 @@ static inline void* gc_allocate(struct mutator *mut, size_t 
size) {
 }
 
 // FIXME: remove :P
-GC_API_ void* gc_allocate_pointerless(struct mutator *mut, size_t bytes);
+GC_API_ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t bytes);
 
 static inline void gc_small_write_barrier(struct gc_ref obj, struct gc_edge 
edge,
                                           struct gc_ref new_val) 
GC_ALWAYS_INLINE;
diff --git a/large-object-space.h b/large-object-space.h
index 68f1cf1cf..dd223f94e 100644
--- a/large-object-space.h
+++ b/large-object-space.h
@@ -18,7 +18,7 @@
 // objects are in which space.  That way we slot into the abstraction of a
 // copying collector while not actually copying data.
 
-struct heap;
+struct gc_heap;
 struct gcobj;
 
 struct large_object_space {
@@ -39,7 +39,7 @@ struct large_object_space {
 };
 
 static int large_object_space_init(struct large_object_space *space,
-                                   struct heap *heap) {
+                                   struct gc_heap *heap) {
   pthread_mutex_init(&space->lock, NULL);
   space->page_size = getpagesize();
   space->page_size_log2 = __builtin_ctz(space->page_size);
diff --git a/mt-gcbench.c b/mt-gcbench.c
index 2d2012ae1..9c6e6b5d4 100644
--- a/mt-gcbench.c
+++ b/mt-gcbench.c
@@ -65,12 +65,12 @@ static const int max_tree_depth = 16;
 typedef HANDLE_TO(Node) NodeHandle;
 typedef HANDLE_TO(DoubleArray) DoubleArrayHandle;
 
-static Node* allocate_node(struct mutator *mut) {
+static Node* allocate_node(struct gc_mutator *mut) {
   // memset to 0 by the collector.
   return gc_allocate_with_kind(mut, ALLOC_KIND_NODE, sizeof (Node));
 }
 
-static DoubleArray* allocate_double_array(struct mutator *mut,
+static DoubleArray* allocate_double_array(struct gc_mutator *mut,
                                                  size_t size) {
   // May be uninitialized.
   size_t bytes = sizeof(DoubleArray) + sizeof (double) * size;
@@ -80,7 +80,7 @@ static DoubleArray* allocate_double_array(struct mutator *mut,
   return ret;
 }
 
-static Hole* allocate_hole(struct mutator *mut, size_t size) {
+static Hole* allocate_hole(struct gc_mutator *mut, size_t size) {
   size_t bytes = sizeof(Hole) + sizeof (uintptr_t) * size;
   Hole *ret = gc_allocate_with_kind(mut, ALLOC_KIND_HOLE, bytes);
   ret->length = size;
@@ -136,7 +136,7 @@ static size_t power_law(size_t *counter) {
 }
 
 struct thread {
-  struct mutator *mut;
+  struct gc_mutator *mut;
   struct gc_mutator_roots roots;
   size_t counter;
 };
@@ -157,7 +157,7 @@ static void set_field(Node *obj, Node **field, Node *val) {
 
 // Build tree top down, assigning to older objects.
 static void populate(struct thread *t, int depth, Node *node) {
-  struct mutator *mut = t->mut;
+  struct gc_mutator *mut = t->mut;
   if (depth <= 0)
     return;
 
@@ -185,7 +185,7 @@ static void populate(struct thread *t, int depth, Node 
*node) {
 
 // Build tree bottom-up
 static Node* make_tree(struct thread *t, int depth) {
-  struct mutator *mut = t->mut;
+  struct gc_mutator *mut = t->mut;
   if (depth <= 0)
     return allocate_node(mut);
 
@@ -224,7 +224,7 @@ static void validate_tree(Node *tree, int depth) {
 }
 
 static void time_construction(struct thread *t, int depth) {
-  struct mutator *mut = t->mut;
+  struct gc_mutator *mut = t->mut;
   int num_iters = compute_num_iters(depth);
   NodeHandle temp_tree = { NULL };
   PUSH_HANDLE(t, temp_tree);
@@ -270,23 +270,23 @@ static void* call_with_stack_base(void* (*f)(uintptr_t 
*stack_base, void *arg),
 }
 
 struct call_with_gc_data {
-  void* (*f)(struct mutator *);
-  struct heap *heap;
+  void* (*f)(struct gc_mutator *);
+  struct gc_heap *heap;
 };
 static void* call_with_gc_inner(uintptr_t *stack_base, void *arg) {
   struct call_with_gc_data *data = arg;
-  struct mutator *mut = gc_init_for_thread(stack_base, data->heap);
+  struct gc_mutator *mut = gc_init_for_thread(stack_base, data->heap);
   void *ret = data->f(mut);
   gc_finish_for_thread(mut);
   return ret;
 }
-static void* call_with_gc(void* (*f)(struct mutator *),
-                          struct heap *heap) {
+static void* call_with_gc(void* (*f)(struct gc_mutator *),
+                          struct gc_heap *heap) {
   struct call_with_gc_data data = { f, heap };
   return call_with_stack_base(call_with_gc_inner, &data);
 }
 
-static void* run_one_test(struct mutator *mut) {
+static void* run_one_test(struct gc_mutator *mut) {
   NodeHandle long_lived_tree = { NULL };
   NodeHandle temp_tree = { NULL };
   DoubleArrayHandle array = { NULL };
@@ -330,7 +330,7 @@ static void* run_one_test(struct mutator *mut) {
 }
 
 static void* run_one_test_in_thread(void *arg) {
-  struct heap *heap = arg;
+  struct gc_heap *heap = arg;
   return call_with_gc(run_one_test, heap);
 }
 
@@ -375,8 +375,8 @@ int main(int argc, char *argv[]) {
   size_t heap_size = heap_max_live * multiplier * nthreads;
   struct gc_option options[] = { { GC_OPTION_FIXED_HEAP_SIZE, heap_size },
                                  { GC_OPTION_PARALLELISM, parallelism } };
-  struct heap *heap;
-  struct mutator *mut;
+  struct gc_heap *heap;
+  struct gc_mutator *mut;
   if (!gc_init(sizeof options / sizeof options[0], options, &heap, &mut)) {
     fprintf(stderr, "Failed to initialize GC with heap size %zu bytes\n",
             heap_size);
diff --git a/parallel-tracer.h b/parallel-tracer.h
index 07561e215..56dfa39b6 100644
--- a/parallel-tracer.h
+++ b/parallel-tracer.h
@@ -291,9 +291,9 @@ enum trace_worker_state {
   TRACE_WORKER_DEAD
 };
 
-struct heap;
+struct gc_heap;
 struct trace_worker {
-  struct heap *heap;
+  struct gc_heap *heap;
   size_t id;
   size_t steal_id;
   pthread_t thread;
@@ -318,15 +318,15 @@ struct tracer {
 struct local_tracer {
   struct trace_worker *worker;
   struct trace_deque *share_deque;
-  struct heap *heap;
+  struct gc_heap *heap;
   struct local_trace_queue local;
 };
 
 struct context;
-static inline struct tracer* heap_tracer(struct heap *heap);
+static inline struct tracer* heap_tracer(struct gc_heap *heap);
 
 static int
-trace_worker_init(struct trace_worker *worker, struct heap *heap,
+trace_worker_init(struct trace_worker *worker, struct gc_heap *heap,
                  struct tracer *tracer, size_t id) {
   worker->heap = heap;
   worker->id = id;
@@ -414,7 +414,7 @@ trace_worker_request_stop(struct trace_worker *worker) {
 }  
 
 static int
-tracer_init(struct heap *heap, size_t parallelism) {
+tracer_init(struct gc_heap *heap, size_t parallelism) {
   struct tracer *tracer = heap_tracer(heap);
   atomic_init(&tracer->active_tracers, 0);
   atomic_init(&tracer->running_tracers, 0);
@@ -436,12 +436,12 @@ tracer_init(struct heap *heap, size_t parallelism) {
   return tracer->worker_count > 0;
 }
 
-static void tracer_prepare(struct heap *heap) {
+static void tracer_prepare(struct gc_heap *heap) {
   struct tracer *tracer = heap_tracer(heap);
   for (size_t i = 0; i < tracer->worker_count; i++)
     tracer->workers[i].steal_id = 0;
 }
-static void tracer_release(struct heap *heap) {
+static void tracer_release(struct gc_heap *heap) {
   struct tracer *tracer = heap_tracer(heap);
   for (size_t i = 0; i < tracer->worker_count; i++)
     trace_deque_release(&tracer->workers[i].deque);
@@ -450,7 +450,7 @@ static void tracer_release(struct heap *heap) {
 struct gcobj;
 static inline void tracer_visit(struct gc_edge edge, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline void trace_one(struct gcobj *obj, void *trace_data) 
GC_ALWAYS_INLINE;
-static inline int trace_edge(struct heap *heap,
+static inline int trace_edge(struct gc_heap *heap,
                              struct gc_edge edge) GC_ALWAYS_INLINE;
 
 static inline void
@@ -601,7 +601,7 @@ tracer_enqueue_roots(struct tracer *tracer, struct gcobj 
**objv,
 }
 
 static inline void
-tracer_trace(struct heap *heap) {
+tracer_trace(struct gc_heap *heap) {
   struct tracer *tracer = heap_tracer(heap);
 
   pthread_mutex_lock(&tracer->lock);
diff --git a/quads.c b/quads.c
index 7620ff693..6136988b8 100644
--- a/quads.c
+++ b/quads.c
@@ -15,7 +15,7 @@
 
 typedef HANDLE_TO(Quad) QuadHandle;
 
-static Quad* allocate_quad(struct mutator *mut) {
+static Quad* allocate_quad(struct gc_mutator *mut) {
   // memset to 0 by the collector.
   return gc_allocate_with_kind(mut, ALLOC_KIND_QUAD, sizeof (Quad));
 }
@@ -30,7 +30,7 @@ static unsigned long current_time(void)
 }
 
 struct thread {
-  struct mutator *mut;
+  struct gc_mutator *mut;
   struct gc_mutator_roots roots;
   size_t counter;
 };
@@ -134,8 +134,8 @@ int main(int argc, char *argv[]) {
 
   struct gc_option options[] = { { GC_OPTION_FIXED_HEAP_SIZE, heap_size },
                                  { GC_OPTION_PARALLELISM, parallelism } };
-  struct heap *heap;
-  struct mutator *mut;
+  struct gc_heap *heap;
+  struct gc_mutator *mut;
   if (!gc_init(sizeof options / sizeof options[0], options, &heap, &mut)) {
     fprintf(stderr, "Failed to initialize GC with heap size %zu bytes\n",
             heap_size);
diff --git a/semi.c b/semi.c
index 64a9ee652..7e44527a1 100644
--- a/semi.c
+++ b/semi.c
@@ -28,13 +28,13 @@ struct semi_space {
   size_t size;
   long count;
 };
-struct heap {
+struct gc_heap {
   struct semi_space semi_space;
   struct large_object_space large_object_space;
 };
 // One mutator per space, can just store the heap in the mutator.
-struct mutator {
-  struct heap heap;
+struct gc_mutator {
+  struct gc_heap heap;
   struct gc_mutator_roots *roots;
 };
 
@@ -43,16 +43,16 @@ static inline void clear_memory(uintptr_t addr, size_t 
size) {
   memset((char*)addr, 0, size);
 }
 
-static inline struct heap* mutator_heap(struct mutator *mut) {
+static inline struct gc_heap* mutator_heap(struct gc_mutator *mut) {
   return &mut->heap;
 }
-static inline struct semi_space* heap_semi_space(struct heap *heap) {
+static inline struct semi_space* heap_semi_space(struct gc_heap *heap) {
   return &heap->semi_space;
 }
-static inline struct large_object_space* heap_large_object_space(struct heap 
*heap) {
+static inline struct large_object_space* heap_large_object_space(struct 
gc_heap *heap) {
   return &heap->large_object_space;
 }
-static inline struct semi_space* mutator_semi_space(struct mutator *mut) {
+static inline struct semi_space* mutator_semi_space(struct gc_mutator *mut) {
   return heap_semi_space(mutator_heap(mut));
 }
 
@@ -60,8 +60,8 @@ static uintptr_t align_up(uintptr_t addr, size_t align) {
   return (addr + align - 1) & ~(align-1);
 }
 
-static void collect(struct mutator *mut) GC_NEVER_INLINE;
-static void collect_for_alloc(struct mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
+static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
+static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) 
GC_NEVER_INLINE;
 
 static void visit(struct gc_edge edge, void *visit_data);
 
@@ -111,7 +111,7 @@ static void* copy(struct semi_space *space, void *obj) {
   return new_obj;
 }
 
-static uintptr_t scan(struct heap *heap, uintptr_t grey) {
+static uintptr_t scan(struct gc_heap *heap, uintptr_t grey) {
   size_t size;
   gc_trace_object((void*)grey, visit, heap, &size);
   return grey + align_up(size, GC_ALIGNMENT);
@@ -122,12 +122,12 @@ static void* forward(struct semi_space *space, void *obj) 
{
   return forwarded ? (void*)forwarded : copy(space, obj);
 }  
 
-static void visit_semi_space(struct heap *heap, struct semi_space *space,
+static void visit_semi_space(struct gc_heap *heap, struct semi_space *space,
                              struct gc_edge edge, void *obj) {
   gc_edge_update(edge, gc_ref_from_heap_object(forward(space, obj)));
 }
 
-static void visit_large_object_space(struct heap *heap,
+static void visit_large_object_space(struct gc_heap *heap,
                                      struct large_object_space *space,
                                      void *obj) {
   if (large_object_space_copy(space, (uintptr_t)obj))
@@ -139,7 +139,7 @@ static int semi_space_contains(struct semi_space *space, 
void *obj) {
 }
 
 static void visit(struct gc_edge edge, void *visit_data) {
-  struct heap *heap = visit_data;
+  struct gc_heap *heap = visit_data;
   struct gc_ref ref = gc_edge_ref(edge);
   if (!gc_ref_is_heap_object(ref))
     return;
@@ -152,8 +152,8 @@ static void visit(struct gc_edge edge, void *visit_data) {
     GC_CRASH();
 }
 
-static void collect(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void collect(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   struct semi_space *semi = heap_semi_space(heap);
   struct large_object_space *large = heap_large_object_space(heap);
   // fprintf(stderr, "start collect #%ld:\n", space->count);
@@ -170,7 +170,7 @@ static void collect(struct mutator *mut) {
   // fprintf(stderr, "%zd bytes copied\n", 
(space->size>>1)-(space->limit-space->hp));
 }
 
-static void collect_for_alloc(struct mutator *mut, size_t bytes) {
+static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
   collect(mut);
   struct semi_space *space = mutator_semi_space(mut);
   if (space->limit - space->hp < bytes) {
@@ -179,8 +179,8 @@ static void collect_for_alloc(struct mutator *mut, size_t 
bytes) {
   }
 }
 
-void* gc_allocate_large(struct mutator *mut, size_t size) {
-  struct heap *heap = mutator_heap(mut);
+void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
+  struct gc_heap *heap = mutator_heap(mut);
   struct large_object_space *space = heap_large_object_space(heap);
   struct semi_space *semi_space = heap_semi_space(heap);
 
@@ -205,7 +205,7 @@ void* gc_allocate_large(struct mutator *mut, size_t size) {
   return ret;
 }
 
-void* gc_allocate_small(struct mutator *mut, size_t size) {
+void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
   struct semi_space *space = mutator_semi_space(mut);
   while (1) {
     uintptr_t addr = space->hp;
@@ -220,7 +220,7 @@ void* gc_allocate_small(struct mutator *mut, size_t size) {
     return (void *)addr;
   }
 }
-void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
+void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
   return gc_allocate(mut, size);
 }
 
@@ -311,7 +311,7 @@ static int parse_options(int argc, struct gc_option argv[],
 }
 
 int gc_init(int argc, struct gc_option argv[],
-            struct heap **heap, struct mutator **mut) {
+            struct gc_heap **heap, struct gc_mutator **mut) {
   GC_ASSERT_EQ(gc_allocator_allocation_pointer_offset(),
                offsetof(struct semi_space, hp));
   GC_ASSERT_EQ(gc_allocator_allocation_limit_offset(),
@@ -321,7 +321,7 @@ int gc_init(int argc, struct gc_option argv[],
   if (!parse_options(argc, argv, &options))
     return 0;
 
-  *mut = calloc(1, sizeof(struct mutator));
+  *mut = calloc(1, sizeof(struct gc_mutator));
   if (!*mut) GC_CRASH();
   *heap = mutator_heap(*mut);
 
@@ -336,30 +336,30 @@ int gc_init(int argc, struct gc_option argv[],
   return 1;
 }
 
-void gc_mutator_set_roots(struct mutator *mut,
+void gc_mutator_set_roots(struct gc_mutator *mut,
                           struct gc_mutator_roots *roots) {
   mut->roots = roots;
 }
-void gc_heap_set_roots(struct heap *heap, struct gc_heap_roots *roots) {
+void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots *roots) {
   GC_CRASH();
 }
 
-struct mutator* gc_init_for_thread(uintptr_t *stack_base,
-                                   struct heap *heap) {
+struct gc_mutator* gc_init_for_thread(uintptr_t *stack_base,
+                                   struct gc_heap *heap) {
   fprintf(stderr,
           "Semispace copying collector not appropriate for multithreaded 
use.\n");
   GC_CRASH();
 }
-void gc_finish_for_thread(struct mutator *space) {
+void gc_finish_for_thread(struct gc_mutator *space) {
 }
 
-void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*),
+void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
                          void *data) {
   // Can't be threads, then there won't be collection.
   return f(data);
 }
 
-void gc_print_stats(struct heap *heap) {
+void gc_print_stats(struct gc_heap *heap) {
   struct semi_space *space = heap_semi_space(heap);
   printf("Completed %ld collections\n", space->count);
   printf("Heap size is %zd\n", space->size);
diff --git a/serial-tracer.h b/serial-tracer.h
index 27ff36882..c61acf04e 100644
--- a/serial-tracer.h
+++ b/serial-tracer.h
@@ -125,22 +125,22 @@ struct tracer {
   struct trace_queue queue;
 };
 
-struct heap;
-static inline struct tracer* heap_tracer(struct heap *heap);
+struct gc_heap;
+static inline struct tracer* heap_tracer(struct gc_heap *heap);
 
 static int
-tracer_init(struct heap *heap, size_t parallelism) {
+tracer_init(struct gc_heap *heap, size_t parallelism) {
   return trace_queue_init(&heap_tracer(heap)->queue);
 }
-static void tracer_prepare(struct heap *heap) {}
-static void tracer_release(struct heap *heap) {
+static void tracer_prepare(struct gc_heap *heap) {}
+static void tracer_release(struct gc_heap *heap) {
   trace_queue_release(&heap_tracer(heap)->queue);
 }
 
 struct gcobj;
 static inline void tracer_visit(struct gc_edge edge, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline void trace_one(struct gcobj *obj, void *trace_data) 
GC_ALWAYS_INLINE;
-static inline int trace_edge(struct heap *heap,
+static inline int trace_edge(struct gc_heap *heap,
                              struct gc_edge edge) GC_ALWAYS_INLINE;
 
 static inline void
@@ -154,13 +154,13 @@ tracer_enqueue_roots(struct tracer *tracer, struct gcobj 
**objs,
 }
 static inline void
 tracer_visit(struct gc_edge edge, void *trace_data) {
-  struct heap *heap = trace_data;
+  struct gc_heap *heap = trace_data;
   if (trace_edge(heap, edge))
     tracer_enqueue_root(heap_tracer(heap),
                         gc_ref_heap_object(gc_edge_ref(edge)));
 }
 static inline void
-tracer_trace(struct heap *heap) {
+tracer_trace(struct gc_heap *heap) {
   struct gcobj *obj;
   while ((obj = trace_queue_pop(&heap_tracer(heap)->queue)))
     trace_one(obj, heap);
diff --git a/simple-allocator.h b/simple-allocator.h
index f1f02f341..e7f5c6f15 100644
--- a/simple-allocator.h
+++ b/simple-allocator.h
@@ -5,14 +5,14 @@
 #include "gc-api.h"
 
 static inline void*
-gc_allocate_with_kind(struct mutator *mut, enum alloc_kind kind, size_t bytes) 
{
+gc_allocate_with_kind(struct gc_mutator *mut, enum alloc_kind kind, size_t 
bytes) {
   void *obj = gc_allocate(mut, bytes);
   *tag_word(obj) = tag_live(kind);
   return obj;
 }
 
 static inline void*
-gc_allocate_pointerless_with_kind(struct mutator *mut, enum alloc_kind kind, 
size_t bytes) {
+gc_allocate_pointerless_with_kind(struct gc_mutator *mut, enum alloc_kind 
kind, size_t bytes) {
   void *obj = gc_allocate_pointerless(mut, bytes);
   *tag_word(obj) = tag_live(kind);
   return obj;
diff --git a/whippet.c b/whippet.c
index 2fdafa1c7..c8f657a55 100644
--- a/whippet.c
+++ b/whippet.c
@@ -308,7 +308,7 @@ enum gc_kind {
   GC_KIND_MAJOR_EVACUATING = GC_KIND_FLAG_EVACUATING,
 };
 
-struct heap {
+struct gc_heap {
   struct mark_space mark_space;
   struct large_object_space large_object_space;
   size_t large_object_pages;
@@ -323,11 +323,11 @@ struct heap {
   size_t active_mutator_count;
   size_t mutator_count;
   struct gc_heap_roots *roots;
-  struct mutator *mutator_trace_list;
+  struct gc_mutator *mutator_trace_list;
   long count;
   long minor_count;
   uint8_t last_collection_was_minor;
-  struct mutator *deactivated_mutators;
+  struct gc_mutator *deactivated_mutators;
   struct tracer tracer;
   double fragmentation_low_threshold;
   double fragmentation_high_threshold;
@@ -336,37 +336,37 @@ struct heap {
   double minimum_major_gc_yield_threshold;
 };
 
-struct mutator_mark_buf {
+struct gc_mutator_mark_buf {
   size_t size;
   size_t capacity;
   struct gcobj **objects;
 };
 
-struct mutator {
+struct gc_mutator {
   // Bump-pointer allocation into holes.
   uintptr_t alloc;
   uintptr_t sweep;
   uintptr_t block;
-  struct heap *heap;
+  struct gc_heap *heap;
   struct gc_mutator_roots *roots;
-  struct mutator_mark_buf mark_buf;
+  struct gc_mutator_mark_buf mark_buf;
   // Three uses for this in-object linked-list pointer:
   //  - inactive (blocked in syscall) mutators
   //  - grey objects when stopping active mutators for mark-in-place
   //  - untraced mutators when stopping active mutators for evacuation
-  struct mutator *next;
+  struct gc_mutator *next;
 };
 
-static inline struct tracer* heap_tracer(struct heap *heap) {
+static inline struct tracer* heap_tracer(struct gc_heap *heap) {
   return &heap->tracer;
 }
-static inline struct mark_space* heap_mark_space(struct heap *heap) {
+static inline struct mark_space* heap_mark_space(struct gc_heap *heap) {
   return &heap->mark_space;
 }
-static inline struct large_object_space* heap_large_object_space(struct heap 
*heap) {
+static inline struct large_object_space* heap_large_object_space(struct 
gc_heap *heap) {
   return &heap->large_object_space;
 }
-static inline struct heap* mutator_heap(struct mutator *mutator) {
+static inline struct gc_heap* mutator_heap(struct gc_mutator *mutator) {
   return mutator->heap;
 }
 
@@ -374,7 +374,7 @@ static inline void clear_memory(uintptr_t addr, size_t 
size) {
   memset((char*)addr, 0, size);
 }
 
-static void collect(struct mutator *mut) GC_NEVER_INLINE;
+static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
 
 static int heap_object_is_large(struct gcobj *obj) {
   size_t size;
@@ -626,7 +626,7 @@ static inline int large_object_space_mark_object(struct 
large_object_space *spac
   return large_object_space_copy(space, (uintptr_t)obj);
 }
 
-static inline int trace_edge(struct heap *heap, struct gc_edge edge) {
+static inline int trace_edge(struct gc_heap *heap, struct gc_edge edge) {
   struct gc_ref ref = gc_edge_ref(edge);
   if (!gc_ref_is_heap_object(ref))
     return 0;
@@ -648,22 +648,22 @@ static inline void trace_one(struct gcobj *obj, void 
*mark_data) {
   gc_trace_object(obj, tracer_visit, mark_data, NULL);
 }
 
-static int heap_has_multiple_mutators(struct heap *heap) {
+static int heap_has_multiple_mutators(struct gc_heap *heap) {
   return atomic_load_explicit(&heap->multithreaded, memory_order_relaxed);
 }
 
-static int mutators_are_stopping(struct heap *heap) {
+static int mutators_are_stopping(struct gc_heap *heap) {
   return atomic_load_explicit(&heap->collecting, memory_order_relaxed);
 }
 
-static inline void heap_lock(struct heap *heap) {
+static inline void heap_lock(struct gc_heap *heap) {
   pthread_mutex_lock(&heap->lock);
 }
-static inline void heap_unlock(struct heap *heap) {
+static inline void heap_unlock(struct gc_heap *heap) {
   pthread_mutex_unlock(&heap->lock);
 }
 
-static void add_mutator(struct heap *heap, struct mutator *mut) {
+static void add_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
   mut->heap = heap;
   heap_lock(heap);
   // We have no roots.  If there is a GC currently in progress, we have
@@ -677,7 +677,7 @@ static void add_mutator(struct heap *heap, struct mutator 
*mut) {
   heap_unlock(heap);
 }
 
-static void remove_mutator(struct heap *heap, struct mutator *mut) {
+static void remove_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
   mut->heap = NULL;
   heap_lock(heap);
   heap->active_mutator_count--;
@@ -689,12 +689,12 @@ static void remove_mutator(struct heap *heap, struct 
mutator *mut) {
   heap_unlock(heap);
 }
 
-static void request_mutators_to_stop(struct heap *heap) {
+static void request_mutators_to_stop(struct gc_heap *heap) {
   GC_ASSERT(!mutators_are_stopping(heap));
   atomic_store_explicit(&heap->collecting, 1, memory_order_relaxed);
 }
 
-static void allow_mutators_to_continue(struct heap *heap) {
+static void allow_mutators_to_continue(struct gc_heap *heap) {
   GC_ASSERT(mutators_are_stopping(heap));
   GC_ASSERT(heap->active_mutator_count == 0);
   heap->active_mutator_count++;
@@ -780,9 +780,9 @@ static void mark_space_reacquire_memory(struct mark_space 
*space,
   }
 }
 
-static size_t next_hole(struct mutator *mut);
+static size_t next_hole(struct gc_mutator *mut);
 
-static int sweep_until_memory_released(struct mutator *mut) {
+static int sweep_until_memory_released(struct gc_mutator *mut) {
   struct mark_space *space = heap_mark_space(mutator_heap(mut));
   ssize_t pending = atomic_load_explicit(&space->pending_unavailable_bytes,
                                          memory_order_acquire);
@@ -816,7 +816,7 @@ static int sweep_until_memory_released(struct mutator *mut) 
{
   return pending <= 0;
 }
 
-static void heap_reset_large_object_pages(struct heap *heap, size_t npages) {
+static void heap_reset_large_object_pages(struct gc_heap *heap, size_t npages) 
{
   size_t previous = heap->large_object_pages;
   heap->large_object_pages = npages;
   GC_ASSERT(npages <= previous);
@@ -825,7 +825,7 @@ static void heap_reset_large_object_pages(struct heap 
*heap, size_t npages) {
   mark_space_reacquire_memory(heap_mark_space(heap), bytes);
 }
 
-static void mutator_mark_buf_grow(struct mutator_mark_buf *buf) {
+static void mutator_mark_buf_grow(struct gc_mutator_mark_buf *buf) {
   size_t old_capacity = buf->capacity;
   size_t old_bytes = old_capacity * sizeof(struct gcobj*);
 
@@ -846,30 +846,30 @@ static void mutator_mark_buf_grow(struct mutator_mark_buf 
*buf) {
   buf->capacity = new_capacity;
 }
 
-static void mutator_mark_buf_push(struct mutator_mark_buf *buf,
+static void mutator_mark_buf_push(struct gc_mutator_mark_buf *buf,
                                   struct gcobj *val) {
   if (GC_UNLIKELY(buf->size == buf->capacity))
     mutator_mark_buf_grow(buf);
   buf->objects[buf->size++] = val;
 }
 
-static void mutator_mark_buf_release(struct mutator_mark_buf *buf) {
+static void mutator_mark_buf_release(struct gc_mutator_mark_buf *buf) {
   size_t bytes = buf->size * sizeof(struct gcobj*);
   if (bytes >= getpagesize())
     madvise(buf->objects, align_up(bytes, getpagesize()), MADV_DONTNEED);
   buf->size = 0;
 }
 
-static void mutator_mark_buf_destroy(struct mutator_mark_buf *buf) {
+static void mutator_mark_buf_destroy(struct gc_mutator_mark_buf *buf) {
   size_t bytes = buf->capacity * sizeof(struct gcobj*);
   if (bytes)
     munmap(buf->objects, bytes);
 }
 
-static void enqueue_mutator_for_tracing(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void enqueue_mutator_for_tracing(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   GC_ASSERT(mut->next == NULL);
-  struct mutator *next =
+  struct gc_mutator *next =
     atomic_load_explicit(&heap->mutator_trace_list, memory_order_acquire);
   do {
     mut->next = next;
@@ -877,7 +877,7 @@ static void enqueue_mutator_for_tracing(struct mutator 
*mut) {
                                          &next, mut));
 }
 
-static int heap_should_mark_while_stopping(struct heap *heap) {
+static int heap_should_mark_while_stopping(struct gc_heap *heap) {
   if (heap->allow_pinning) {
     // The metadata byte is mostly used for marking and object extent.
     // For marking, we allow updates to race, because the state
@@ -901,27 +901,27 @@ static int heap_should_mark_while_stopping(struct heap 
*heap) {
   return (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_EVACUATING) == 0;
 }
 
-static int mutator_should_mark_while_stopping(struct mutator *mut) {
+static int mutator_should_mark_while_stopping(struct gc_mutator *mut) {
   return heap_should_mark_while_stopping(mutator_heap(mut));
 }
 
-void gc_mutator_set_roots(struct mutator *mut,
+void gc_mutator_set_roots(struct gc_mutator *mut,
                           struct gc_mutator_roots *roots) {
   mut->roots = roots;
 }
-void gc_heap_set_roots(struct heap *heap, struct gc_heap_roots *roots) {
+void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots *roots) {
   heap->roots = roots;
 }
 
 static void trace_and_enqueue_locally(struct gc_edge edge, void *data) {
-  struct mutator *mut = data;
+  struct gc_mutator *mut = data;
   if (trace_edge(mutator_heap(mut), edge))
     mutator_mark_buf_push(&mut->mark_buf,
                           gc_ref_heap_object(gc_edge_ref(edge)));
 }
 
 static void trace_and_enqueue_globally(struct gc_edge edge, void *data) {
-  struct heap *heap = data;
+  struct gc_heap *heap = data;
   if (trace_edge(heap, edge))
     tracer_enqueue_root(&heap->tracer,
                         gc_ref_heap_object(gc_edge_ref(edge)));
@@ -929,43 +929,43 @@ static void trace_and_enqueue_globally(struct gc_edge 
edge, void *data) {
 
 // Mark the roots of a mutator that is stopping for GC.  We can't
 // enqueue them directly, so we send them to the controller in a buffer.
-static void mark_stopping_mutator_roots(struct mutator *mut) {
+static void mark_stopping_mutator_roots(struct gc_mutator *mut) {
   GC_ASSERT(mutator_should_mark_while_stopping(mut));
   gc_trace_mutator_roots(mut->roots, trace_and_enqueue_locally, mut);
 }
 
 // Precondition: the caller holds the heap lock.
-static void mark_mutator_roots_with_lock(struct mutator *mut) {
+static void mark_mutator_roots_with_lock(struct gc_mutator *mut) {
   gc_trace_mutator_roots(mut->roots, trace_and_enqueue_globally,
                          mutator_heap(mut));
 }
 
-static void trace_mutator_roots_with_lock(struct mutator *mut) {
+static void trace_mutator_roots_with_lock(struct gc_mutator *mut) {
   mark_mutator_roots_with_lock(mut);
 }
 
-static void trace_mutator_roots_with_lock_before_stop(struct mutator *mut) {
+static void trace_mutator_roots_with_lock_before_stop(struct gc_mutator *mut) {
   if (mutator_should_mark_while_stopping(mut))
     mark_mutator_roots_with_lock(mut);
   else
     enqueue_mutator_for_tracing(mut);
 }
 
-static void release_stopping_mutator_roots(struct mutator *mut) {
+static void release_stopping_mutator_roots(struct gc_mutator *mut) {
   mutator_mark_buf_release(&mut->mark_buf);
 }
 
-static void wait_for_mutators_to_stop(struct heap *heap) {
+static void wait_for_mutators_to_stop(struct gc_heap *heap) {
   heap->active_mutator_count--;
   while (heap->active_mutator_count)
     pthread_cond_wait(&heap->collector_cond, &heap->lock);
 }
 
-static void finish_sweeping(struct mutator *mut);
-static void finish_sweeping_in_block(struct mutator *mut);
+static void finish_sweeping(struct gc_mutator *mut);
+static void finish_sweeping_in_block(struct gc_mutator *mut);
 
-static void trace_mutator_roots_after_stop(struct heap *heap) {
-  struct mutator *mut = atomic_load(&heap->mutator_trace_list);
+static void trace_mutator_roots_after_stop(struct gc_heap *heap) {
+  struct gc_mutator *mut = atomic_load(&heap->mutator_trace_list);
   int active_mutators_already_marked = heap_should_mark_while_stopping(heap);
   while (mut) {
     if (active_mutators_already_marked)
@@ -973,24 +973,24 @@ static void trace_mutator_roots_after_stop(struct heap 
*heap) {
                            mut->mark_buf.objects, mut->mark_buf.size);
     else
       trace_mutator_roots_with_lock(mut);
-    struct mutator *next = mut->next;
+    struct gc_mutator *next = mut->next;
     mut->next = NULL;
     mut = next;
   }
   atomic_store(&heap->mutator_trace_list, NULL);
 
-  for (struct mutator *mut = heap->deactivated_mutators; mut; mut = mut->next) 
{
+  for (struct gc_mutator *mut = heap->deactivated_mutators; mut; mut = 
mut->next) {
     finish_sweeping_in_block(mut);
     trace_mutator_roots_with_lock(mut);
   }
 }
 
-static void trace_global_roots(struct heap *heap) {
+static void trace_global_roots(struct gc_heap *heap) {
   gc_trace_heap_roots(heap->roots, trace_and_enqueue_globally, heap);
 }
 
 static inline int
-heap_object_is_young(struct heap *heap, struct gcobj *obj) {
+heap_object_is_young(struct gc_heap *heap, struct gcobj *obj) {
   if (GC_UNLIKELY(!mark_space_contains(heap_mark_space(heap), obj))) {
     // No lospace nursery, for the moment.
     return 0;
@@ -1023,7 +1023,7 @@ static uint64_t broadcast_byte(uint8_t byte) {
 // byte doesn't hold any roots, if all stores were to nursery objects.
 STATIC_ASSERT_EQ(GRANULES_PER_REMSET_BYTE % 8, 0);
 static void mark_space_trace_card(struct mark_space *space,
-                                  struct heap *heap, struct slab *slab,
+                                  struct gc_heap *heap, struct slab *slab,
                                   size_t card) {
   uintptr_t first_addr_in_slab = (uintptr_t) &slab->blocks[0];
   size_t granule_base = card * GRANULES_PER_REMSET_BYTE;
@@ -1045,7 +1045,7 @@ static void mark_space_trace_card(struct mark_space 
*space,
 }
 
 static void mark_space_trace_remembered_set(struct mark_space *space,
-                                            struct heap *heap) {
+                                            struct gc_heap *heap) {
   GC_ASSERT(!space->evacuating);
   for (size_t s = 0; s < space->nslabs; s++) {
     struct slab *slab = &space->slabs[s];
@@ -1072,7 +1072,7 @@ static void mark_space_clear_remembered_set(struct 
mark_space *space) {
   }
 }
 
-static void trace_generational_roots(struct heap *heap) {
+static void trace_generational_roots(struct gc_heap *heap) {
   // TODO: Add lospace nursery.
   if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
     mark_space_trace_remembered_set(heap_mark_space(heap), heap);
@@ -1081,8 +1081,8 @@ static void trace_generational_roots(struct heap *heap) {
   }
 }
 
-static void pause_mutator_for_collection(struct heap *heap) GC_NEVER_INLINE;
-static void pause_mutator_for_collection(struct heap *heap) {
+static void pause_mutator_for_collection(struct gc_heap *heap) GC_NEVER_INLINE;
+static void pause_mutator_for_collection(struct gc_heap *heap) {
   GC_ASSERT(mutators_are_stopping(heap));
   GC_ASSERT(heap->active_mutator_count);
   heap->active_mutator_count--;
@@ -1104,9 +1104,9 @@ static void pause_mutator_for_collection(struct heap 
*heap) {
   heap->active_mutator_count++;
 }
 
-static void pause_mutator_for_collection_with_lock(struct mutator *mut) 
GC_NEVER_INLINE;
-static void pause_mutator_for_collection_with_lock(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void pause_mutator_for_collection_with_lock(struct gc_mutator *mut) 
GC_NEVER_INLINE;
+static void pause_mutator_for_collection_with_lock(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   GC_ASSERT(mutators_are_stopping(heap));
   finish_sweeping_in_block(mut);
   if (mutator_should_mark_while_stopping(mut))
@@ -1117,9 +1117,9 @@ static void pause_mutator_for_collection_with_lock(struct 
mutator *mut) {
   pause_mutator_for_collection(heap);
 }
 
-static void pause_mutator_for_collection_without_lock(struct mutator *mut) 
GC_NEVER_INLINE;
-static void pause_mutator_for_collection_without_lock(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) 
GC_NEVER_INLINE;
+static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   GC_ASSERT(mutators_are_stopping(heap));
   finish_sweeping(mut);
   if (mutator_should_mark_while_stopping(mut))
@@ -1131,7 +1131,7 @@ static void 
pause_mutator_for_collection_without_lock(struct mutator *mut) {
   release_stopping_mutator_roots(mut);
 }
 
-static inline void maybe_pause_mutator_for_collection(struct mutator *mut) {
+static inline void maybe_pause_mutator_for_collection(struct gc_mutator *mut) {
   while (mutators_are_stopping(mutator_heap(mut)))
     pause_mutator_for_collection_without_lock(mut);
 }
@@ -1155,11 +1155,11 @@ static void reset_statistics(struct mark_space *space) {
   space->fragmentation_granules_since_last_collection = 0;
 }
 
-static int maybe_grow_heap(struct heap *heap) {
+static int maybe_grow_heap(struct gc_heap *heap) {
   return 0;
 }
 
-static double heap_last_gc_yield(struct heap *heap) {
+static double heap_last_gc_yield(struct gc_heap *heap) {
   struct mark_space *mark_space = heap_mark_space(heap);
   size_t mark_space_yield = mark_space->granules_freed_by_last_collection;
   mark_space_yield <<= GRANULE_SIZE_LOG_2;
@@ -1180,7 +1180,7 @@ static double heap_last_gc_yield(struct heap *heap) {
   return yield / heap->size;
 }
 
-static double heap_fragmentation(struct heap *heap) {
+static double heap_fragmentation(struct gc_heap *heap) {
   struct mark_space *mark_space = heap_mark_space(heap);
   size_t fragmentation_granules =
     mark_space->fragmentation_granules_since_last_collection;
@@ -1189,7 +1189,7 @@ static double heap_fragmentation(struct heap *heap) {
   return ((double)fragmentation_granules) / heap_granules;
 }
 
-static void detect_out_of_memory(struct heap *heap) {
+static void detect_out_of_memory(struct gc_heap *heap) {
   struct mark_space *mark_space = heap_mark_space(heap);
   struct large_object_space *lospace = heap_large_object_space(heap);
 
@@ -1216,7 +1216,7 @@ static void detect_out_of_memory(struct heap *heap) {
   GC_CRASH();
 }
 
-static double clamp_major_gc_yield_threshold(struct heap *heap,
+static double clamp_major_gc_yield_threshold(struct gc_heap *heap,
                                              double threshold) {
   if (threshold < heap->minimum_major_gc_yield_threshold)
     threshold = heap->minimum_major_gc_yield_threshold;
@@ -1226,7 +1226,7 @@ static double clamp_major_gc_yield_threshold(struct heap 
*heap,
   return threshold;
 }
 
-static enum gc_kind determine_collection_kind(struct heap *heap) {
+static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
   struct mark_space *mark_space = heap_mark_space(heap);
   enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
   enum gc_kind gc_kind;
@@ -1305,7 +1305,7 @@ static void release_evacuation_target_blocks(struct 
mark_space *space) {
                               reserve);
 }
 
-static void prepare_for_evacuation(struct heap *heap) {
+static void prepare_for_evacuation(struct gc_heap *heap) {
   struct mark_space *space = heap_mark_space(heap);
 
   if ((heap->gc_kind & GC_KIND_FLAG_EVACUATING) == 0) {
@@ -1397,13 +1397,13 @@ static void prepare_for_evacuation(struct heap *heap) {
   space->evacuating = 1;
 }
 
-static void trace_conservative_roots_after_stop(struct heap *heap) {
+static void trace_conservative_roots_after_stop(struct gc_heap *heap) {
   // FIXME: Visit conservative roots, if the collector is configured in
   // that way.  Mark them in place, preventing any subsequent
   // evacuation.
 }
 
-static void trace_precise_roots_after_stop(struct heap *heap) {
+static void trace_precise_roots_after_stop(struct gc_heap *heap) {
   trace_mutator_roots_after_stop(heap);
   trace_global_roots(heap);
   trace_generational_roots(heap);
@@ -1418,8 +1418,8 @@ static void mark_space_finish_gc(struct mark_space *space,
   release_evacuation_target_blocks(space);
 }
 
-static void collect(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void collect(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   struct mark_space *space = heap_mark_space(heap);
   struct large_object_space *lospace = heap_large_object_space(heap);
   if (maybe_grow_heap(heap)) {
@@ -1524,7 +1524,7 @@ static uintptr_t mark_space_next_block_to_sweep(struct 
mark_space *space) {
   return block;
 }
 
-static void finish_block(struct mutator *mut) {
+static void finish_block(struct gc_mutator *mut) {
   GC_ASSERT(mut->block);
   struct block_summary *block = block_summary_for_addr(mut->block);
   struct mark_space *space = heap_mark_space(mutator_heap(mut));
@@ -1547,7 +1547,7 @@ static void finish_block(struct mutator *mut) {
 
 // Sweep some heap to reclaim free space, resetting mut->alloc and
 // mut->sweep.  Return the size of the hole in granules.
-static size_t next_hole_in_block(struct mutator *mut) {
+static size_t next_hole_in_block(struct gc_mutator *mut) {
   uintptr_t sweep = mut->sweep;
   if (sweep == 0)
     return 0;
@@ -1596,7 +1596,7 @@ static size_t next_hole_in_block(struct mutator *mut) {
   return 0;
 }
 
-static void finish_hole(struct mutator *mut) {
+static void finish_hole(struct gc_mutator *mut) {
   size_t granules = (mut->sweep - mut->alloc) / GRANULE_SIZE;
   if (granules) {
     struct block_summary *summary = block_summary_for_addr(mut->block);
@@ -1609,7 +1609,7 @@ static void finish_hole(struct mutator *mut) {
   // FIXME: add to fragmentation
 }
 
-static int maybe_release_swept_empty_block(struct mutator *mut) {
+static int maybe_release_swept_empty_block(struct gc_mutator *mut) {
   GC_ASSERT(mut->block);
   struct mark_space *space = heap_mark_space(mutator_heap(mut));
   uintptr_t block = mut->block;
@@ -1623,7 +1623,7 @@ static int maybe_release_swept_empty_block(struct mutator 
*mut) {
   return 1;
 }
 
-static size_t next_hole(struct mutator *mut) {
+static size_t next_hole(struct gc_mutator *mut) {
   finish_hole(mut);
   // As we sweep if we find that a block is empty, we return it to the
   // empties list.  Empties are precious.  But if we return 10 blocks in
@@ -1740,20 +1740,20 @@ static size_t next_hole(struct mutator *mut) {
   }
 }
 
-static void finish_sweeping_in_block(struct mutator *mut) {
+static void finish_sweeping_in_block(struct gc_mutator *mut) {
   while (next_hole_in_block(mut))
     finish_hole(mut);
 }
 
 // Another thread is triggering GC.  Before we stop, finish clearing the
 // dead mark bytes for the mutator's block, and release the block.
-static void finish_sweeping(struct mutator *mut) {
+static void finish_sweeping(struct gc_mutator *mut) {
   while (next_hole(mut))
     finish_hole(mut);
 }
 
-static void trigger_collection(struct mutator *mut) {
-  struct heap *heap = mutator_heap(mut);
+static void trigger_collection(struct gc_mutator *mut) {
+  struct gc_heap *heap = mutator_heap(mut);
   heap_lock(heap);
   if (mutators_are_stopping(heap))
     pause_mutator_for_collection_with_lock(mut);
@@ -1762,8 +1762,8 @@ static void trigger_collection(struct mutator *mut) {
   heap_unlock(heap);
 }
 
-void* gc_allocate_large(struct mutator *mut, size_t size) {
-  struct heap *heap = mutator_heap(mut);
+void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
+  struct gc_heap *heap = mutator_heap(mut);
   struct large_object_space *space = heap_large_object_space(heap);
 
   size_t npages = large_object_space_npages(space, size);
@@ -1787,7 +1787,7 @@ void* gc_allocate_large(struct mutator *mut, size_t size) 
{
   return ret;
 }
 
-void* gc_allocate_small(struct mutator *mut, size_t size) {
+void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
   GC_ASSERT(size > 0); // allocating 0 bytes would be silly
   GC_ASSERT(size <= gc_allocator_large_threshold());
   size = align_up(size, GRANULE_SIZE);
@@ -1816,7 +1816,7 @@ void* gc_allocate_small(struct mutator *mut, size_t size) 
{
   return obj;
 }
 
-void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
+void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
   return gc_allocate(mut, size);
 }
 
@@ -1907,7 +1907,7 @@ static struct slab* allocate_slabs(size_t nslabs) {
   return (struct slab*) aligned_base;
 }
 
-static int heap_init(struct heap *heap, struct options *options) {
+static int heap_init(struct gc_heap *heap, struct options *options) {
   // *heap is already initialized to 0.
 
   pthread_mutex_init(&heap->lock, NULL);
@@ -1928,7 +1928,7 @@ static int heap_init(struct heap *heap, struct options 
*options) {
   return 1;
 }
 
-static int mark_space_init(struct mark_space *space, struct heap *heap) {
+static int mark_space_init(struct mark_space *space, struct gc_heap *heap) {
   size_t size = align_up(heap->size, SLAB_SIZE);
   size_t nslabs = size / SLAB_SIZE;
   struct slab *slabs = allocate_slabs(nslabs);
@@ -1961,13 +1961,13 @@ static int mark_space_init(struct mark_space *space, 
struct heap *heap) {
 }
 
 int gc_init(int argc, struct gc_option argv[],
-                   struct heap **heap, struct mutator **mut) {
+            struct gc_heap **heap, struct gc_mutator **mut) {
   GC_ASSERT_EQ(gc_allocator_small_granule_size(), GRANULE_SIZE);
   GC_ASSERT_EQ(gc_allocator_large_threshold(), LARGE_OBJECT_THRESHOLD);
   GC_ASSERT_EQ(gc_allocator_allocation_pointer_offset(),
-               offsetof(struct mutator, alloc));
+               offsetof(struct gc_mutator, alloc));
   GC_ASSERT_EQ(gc_allocator_allocation_limit_offset(),
-               offsetof(struct mutator, sweep));
+               offsetof(struct gc_mutator, sweep));
   GC_ASSERT_EQ(gc_allocator_alloc_table_alignment(), SLAB_SIZE);
   GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(), METADATA_BYTE_YOUNG);
   GC_ASSERT_EQ(gc_allocator_alloc_table_end_pattern(), METADATA_BYTE_END);
@@ -1981,7 +1981,7 @@ int gc_init(int argc, struct gc_option argv[],
   if (!parse_options(argc, argv, &options))
     return 0;
 
-  *heap = calloc(1, sizeof(struct heap));
+  *heap = calloc(1, sizeof(struct gc_heap));
   if (!*heap) GC_CRASH();
 
   if (!heap_init(*heap, &options))
@@ -1997,28 +1997,28 @@ int gc_init(int argc, struct gc_option argv[],
   if (!large_object_space_init(heap_large_object_space(*heap), *heap))
     GC_CRASH();
 
-  *mut = calloc(1, sizeof(struct mutator));
+  *mut = calloc(1, sizeof(struct gc_mutator));
   if (!*mut) GC_CRASH();
   add_mutator(*heap, *mut);
   return 1;
 }
 
-struct mutator* gc_init_for_thread(uintptr_t *stack_base,
-                                          struct heap *heap) {
-  struct mutator *ret = calloc(1, sizeof(struct mutator));
+struct gc_mutator* gc_init_for_thread(uintptr_t *stack_base,
+                                      struct gc_heap *heap) {
+  struct gc_mutator *ret = calloc(1, sizeof(struct gc_mutator));
   if (!ret)
     GC_CRASH();
   add_mutator(heap, ret);
   return ret;
 }
 
-void gc_finish_for_thread(struct mutator *mut) {
+void gc_finish_for_thread(struct gc_mutator *mut) {
   remove_mutator(mutator_heap(mut), mut);
   mutator_mark_buf_destroy(&mut->mark_buf);
   free(mut);
 }
 
-static void deactivate_mutator(struct heap *heap, struct mutator *mut) {
+static void deactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
   GC_ASSERT(mut->next == NULL);
   heap_lock(heap);
   mut->next = heap->deactivated_mutators;
@@ -2029,11 +2029,11 @@ static void deactivate_mutator(struct heap *heap, 
struct mutator *mut) {
   heap_unlock(heap);
 }
 
-static void reactivate_mutator(struct heap *heap, struct mutator *mut) {
+static void reactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
   heap_lock(heap);
   while (mutators_are_stopping(heap))
     pthread_cond_wait(&heap->mutator_cond, &heap->lock);
-  struct mutator **prev = &heap->deactivated_mutators;
+  struct gc_mutator **prev = &heap->deactivated_mutators;
   while (*prev != mut)
     prev = &(*prev)->next;
   *prev = mut->next;
@@ -2042,17 +2042,17 @@ static void reactivate_mutator(struct heap *heap, 
struct mutator *mut) {
   heap_unlock(heap);
 }
 
-void* gc_call_without_gc(struct mutator *mut,
-                                void* (*f)(void*),
-                                void *data) {
-  struct heap *heap = mutator_heap(mut);
+void* gc_call_without_gc(struct gc_mutator *mut,
+                         void* (*f)(void*),
+                         void *data) {
+  struct gc_heap *heap = mutator_heap(mut);
   deactivate_mutator(heap, mut);
   void *ret = f(data);
   reactivate_mutator(heap, mut);
   return ret;
 }
 
-void gc_print_stats(struct heap *heap) {
+void gc_print_stats(struct gc_heap *heap) {
   printf("Completed %ld collections (%ld major)\n",
          heap->count, heap->count - heap->minor_count);
   printf("Heap size with overhead is %zd (%zu slabs)\n",

Reply via email to