wingo pushed a commit to branch wip-whippet
in repository guile.

commit 2199d5f48d487ce92b8d4fae7fdfa8031caa9f0a
Author: Andy Wingo <wi...@igalia.com>
AuthorDate: Tue Aug 16 23:21:16 2022 +0200

    Excise struct gcobj
---
 parallel-tracer.h | 82 ++++++++++++++++++++++++++-----------------------------
 serial-tracer.h   | 51 +++++++++++++++++-----------------
 whippet.c         | 65 ++++++++++++++++++++-----------------------
 3 files changed, 93 insertions(+), 105 deletions(-)

diff --git a/parallel-tracer.h b/parallel-tracer.h
index 180f8d09b..467ad1bf4 100644
--- a/parallel-tracer.h
+++ b/parallel-tracer.h
@@ -18,12 +18,10 @@
 // for Weak Memory Models" (LĂȘ et al, PPoPP'13)
 // (http://www.di.ens.fr/%7Ezappa/readings/ppopp13.pdf).
 
-struct gcobj;
-
 struct trace_buf {
   unsigned log_size;
   size_t size;
-  struct gcobj **data;
+  uintptr_t *data;
 };
 
 // Min size: 8 kB on 64-bit systems, 4 kB on 32-bit.
@@ -35,7 +33,7 @@ static int
 trace_buf_init(struct trace_buf *buf, unsigned log_size) {
   ASSERT(log_size >= trace_buf_min_log_size);
   ASSERT(log_size <= trace_buf_max_log_size);
-  size_t size = (1 << log_size) * sizeof(struct gcobj *);
+  size_t size = (1 << log_size) * sizeof(uintptr_t);
   void *mem = mmap(NULL, size, PROT_READ|PROT_WRITE,
                    MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   if (mem == MAP_FAILED) {
@@ -56,7 +54,7 @@ trace_buf_size(struct trace_buf *buf) {
 
 static inline size_t
 trace_buf_byte_size(struct trace_buf *buf) {
-  return trace_buf_size(buf) * sizeof(struct gcobj *);
+  return trace_buf_size(buf) * sizeof(uintptr_t);
 }
 
 static void
@@ -75,16 +73,16 @@ trace_buf_destroy(struct trace_buf *buf) {
   }
 }
 
-static inline struct gcobj *
+static inline struct gc_ref
 trace_buf_get(struct trace_buf *buf, size_t i) {
-  return atomic_load_explicit(&buf->data[i & (buf->size - 1)],
-                              memory_order_relaxed);
+  return gc_ref(atomic_load_explicit(&buf->data[i & (buf->size - 1)],
+                                     memory_order_relaxed));
 }
 
 static inline void
-trace_buf_put(struct trace_buf *buf, size_t i, struct gcobj * o) {
+trace_buf_put(struct trace_buf *buf, size_t i, struct gc_ref ref) {
   return atomic_store_explicit(&buf->data[i & (buf->size - 1)],
-                               o,
+                               gc_ref_value(ref),
                                memory_order_relaxed);
 }
 
@@ -158,7 +156,7 @@ trace_deque_grow(struct trace_deque *q, int cur, size_t b, 
size_t t) {
 }
 
 static void
-trace_deque_push(struct trace_deque *q, struct gcobj * x) {
+trace_deque_push(struct trace_deque *q, struct gc_ref x) {
   size_t b = LOAD_RELAXED(&q->bottom);
   size_t t = LOAD_ACQUIRE(&q->top);
   int active = LOAD_RELAXED(&q->active);
@@ -172,7 +170,7 @@ trace_deque_push(struct trace_deque *q, struct gcobj * x) {
 }
 
 static void
-trace_deque_push_many(struct trace_deque *q, struct gcobj **objv, size_t 
count) {
+trace_deque_push_many(struct trace_deque *q, struct gc_ref *objv, size_t 
count) {
   size_t b = LOAD_RELAXED(&q->bottom);
   size_t t = LOAD_ACQUIRE(&q->top);
   int active = LOAD_RELAXED(&q->active);
@@ -186,7 +184,7 @@ trace_deque_push_many(struct trace_deque *q, struct gcobj 
**objv, size_t count)
   STORE_RELAXED(&q->bottom, b + count);
 }
 
-static struct gcobj *
+static struct gc_ref
 trace_deque_try_pop(struct trace_deque *q) {
   size_t b = LOAD_RELAXED(&q->bottom);
   b = b - 1;
@@ -194,7 +192,7 @@ trace_deque_try_pop(struct trace_deque *q) {
   STORE_RELAXED(&q->bottom, b);
   atomic_thread_fence(memory_order_seq_cst);
   size_t t = LOAD_RELAXED(&q->top);
-  struct gcobj * x;
+  struct gc_ref x;
   if (t <= b) { // Non-empty queue.
     x = trace_buf_get(&q->bufs[active], b);
     if (t == b) { // Single last element in queue.
@@ -202,32 +200,32 @@ trace_deque_try_pop(struct trace_deque *q) {
                                                    memory_order_seq_cst,
                                                    memory_order_relaxed))
         // Failed race.
-        x = NULL;
+        x = gc_ref_null();
       STORE_RELAXED(&q->bottom, b + 1);
     }
   } else { // Empty queue.
-    x = NULL;
+    x = gc_ref_null();
     STORE_RELAXED(&q->bottom, b + 1);
   }
   return x;
 }
 
-static struct gcobj *
+static struct gc_ref
 trace_deque_steal(struct trace_deque *q) {
   while (1) {
     size_t t = LOAD_ACQUIRE(&q->top);
     atomic_thread_fence(memory_order_seq_cst);
     size_t b = LOAD_ACQUIRE(&q->bottom);
     if (t >= b)
-      return NULL;
+      return gc_ref_null();
     int active = LOAD_CONSUME(&q->active);
-    struct gcobj *x = x = trace_buf_get(&q->bufs[active], t);
+    struct gc_ref ref = trace_buf_get(&q->bufs[active], t);
     if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
                                                  memory_order_seq_cst,
                                                  memory_order_relaxed))
       // Failed race.
       continue;
-    return x;
+    return ref;
   }
 }
 
@@ -251,7 +249,7 @@ trace_deque_can_steal(struct trace_deque *q) {
 struct local_trace_queue {
   size_t read;
   size_t write;
-  struct gcobj * data[LOCAL_TRACE_QUEUE_SIZE];
+  struct gc_ref data[LOCAL_TRACE_QUEUE_SIZE];
 };
 
 static inline void
@@ -275,10 +273,10 @@ local_trace_queue_full(struct local_trace_queue *q) {
   return local_trace_queue_size(q) >= LOCAL_TRACE_QUEUE_SIZE;
 }
 static inline void
-local_trace_queue_push(struct local_trace_queue *q, struct gcobj * v) {
+local_trace_queue_push(struct local_trace_queue *q, struct gc_ref v) {
   q->data[q->write++ & LOCAL_TRACE_QUEUE_MASK] = v;
 }
-static inline struct gcobj *
+static inline struct gc_ref
 local_trace_queue_pop(struct local_trace_queue *q) {
   return q->data[q->read++ & LOCAL_TRACE_QUEUE_MASK];
 }
@@ -447,7 +445,6 @@ static void tracer_release(struct gc_heap *heap) {
     trace_deque_release(&tracer->workers[i].deque);
 }
 
-struct gcobj;
 static inline void tracer_visit(struct gc_edge edge, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline void trace_one(struct gc_ref ref, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline int trace_edge(struct gc_heap *heap,
@@ -466,12 +463,11 @@ tracer_visit(struct gc_edge edge, void *trace_data) {
   if (trace_edge(trace->heap, edge)) {
     if (local_trace_queue_full(&trace->local))
       tracer_share(trace);
-    local_trace_queue_push(&trace->local,
-                           gc_ref_heap_object(gc_edge_ref(edge)));
+    local_trace_queue_push(&trace->local, gc_edge_ref(edge));
   }
 }
 
-static struct gcobj *
+static struct gc_ref
 tracer_steal_from_worker(struct tracer *tracer, size_t id) {
   ASSERT(id < tracer->worker_count);
   return trace_deque_steal(&tracer->workers[id].deque);
@@ -483,21 +479,21 @@ tracer_can_steal_from_worker(struct tracer *tracer, 
size_t id) {
   return trace_deque_can_steal(&tracer->workers[id].deque);
 }
 
-static struct gcobj *
+static struct gc_ref
 trace_worker_steal_from_any(struct trace_worker *worker, struct tracer 
*tracer) {
   size_t steal_id = worker->steal_id;
   for (size_t i = 0; i < tracer->worker_count; i++) {
     steal_id = (steal_id + 1) % tracer->worker_count;
     DEBUG("tracer #%zu: stealing from #%zu\n", worker->id, steal_id);
-    struct gcobj * obj = tracer_steal_from_worker(tracer, steal_id);
-    if (obj) {
+    struct gc_ref obj = tracer_steal_from_worker(tracer, steal_id);
+    if (gc_ref_is_heap_object(obj)) {
       DEBUG("tracer #%zu: stealing got %p\n", worker->id, obj);
       worker->steal_id = steal_id;
       return obj;
     }
   }
   DEBUG("tracer #%zu: failed to steal\n", worker->id);
-  return 0;
+  return gc_ref_null();
 }
 
 static int
@@ -544,19 +540,19 @@ trace_worker_check_termination(struct trace_worker 
*worker,
   }
 }
 
-static struct gcobj *
+static struct gc_ref
 trace_worker_steal(struct local_tracer *trace) {
   struct tracer *tracer = heap_tracer(trace->heap);
   struct trace_worker *worker = trace->worker;
 
   while (1) {
     DEBUG("tracer #%zu: trying to steal\n", worker->id);
-    struct gcobj *obj = trace_worker_steal_from_any(worker, tracer);
-    if (obj)
+    struct gc_ref obj = trace_worker_steal_from_any(worker, tracer);
+    if (gc_ref_is_heap_object(obj))
       return obj;
 
     if (trace_worker_check_termination(worker, tracer))
-      return NULL;
+      return gc_ref_null();
   }
 }
 
@@ -571,15 +567,15 @@ trace_worker_trace(struct trace_worker *worker) {
   size_t n = 0;
   DEBUG("tracer #%zu: running trace loop\n", worker->id);
   while (1) {
-    void *obj;
+    struct gc_ref ref;
     if (!local_trace_queue_empty(&trace.local)) {
-      obj = local_trace_queue_pop(&trace.local);
+      ref = local_trace_queue_pop(&trace.local);
     } else {
-      obj = trace_worker_steal(&trace);
-      if (!obj)
+      ref = trace_worker_steal(&trace);
+      if (!gc_ref_is_heap_object(ref))
         break;
     }
-    trace_one(gc_ref_from_heap_object(obj), &trace);
+    trace_one(ref, &trace);
     n++;
   }
   DEBUG("tracer #%zu: done tracing, %zu objects traced\n", worker->id, n);
@@ -588,13 +584,13 @@ trace_worker_trace(struct trace_worker *worker) {
 }
 
 static inline void
-tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) {
+tracer_enqueue_root(struct tracer *tracer, struct gc_ref ref) {
   struct trace_deque *worker0_deque = &tracer->workers[0].deque;
-  trace_deque_push(worker0_deque, obj);
+  trace_deque_push(worker0_deque, ref);
 }
 
 static inline void
-tracer_enqueue_roots(struct tracer *tracer, struct gcobj **objv,
+tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objv,
                      size_t count) {
   struct trace_deque *worker0_deque = &tracer->workers[0].deque;
   trace_deque_push_many(worker0_deque, objv, count);
diff --git a/serial-tracer.h b/serial-tracer.h
index c2202d841..b4194c160 100644
--- a/serial-tracer.h
+++ b/serial-tracer.h
@@ -8,22 +8,20 @@
 #include "debug.h"
 #include "gc-api.h"
 
-struct gcobj;
-
 struct trace_queue {
   size_t size;
   size_t read;
   size_t write;
-  struct gcobj **buf;
+  struct gc_ref *buf;
 };
 
 static const size_t trace_queue_max_size =
-  (1ULL << (sizeof(struct gcobj *) * 8 - 1)) / sizeof(struct gcobj *);
+  (1ULL << (sizeof(struct gc_ref) * 8 - 1)) / sizeof(struct gc_ref);
 static const size_t trace_queue_release_byte_threshold = 1 * 1024 * 1024;
 
-static struct gcobj **
+static struct gc_ref *
 trace_queue_alloc(size_t size) {
-  void *mem = mmap(NULL, size * sizeof(struct gcobj *), PROT_READ|PROT_WRITE,
+  void *mem = mmap(NULL, size * sizeof(struct gc_ref), PROT_READ|PROT_WRITE,
                    MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
   if (mem == MAP_FAILED) {
     perror("Failed to grow trace queue");
@@ -35,20 +33,20 @@ trace_queue_alloc(size_t size) {
 
 static int
 trace_queue_init(struct trace_queue *q) {
-  q->size = getpagesize() / sizeof(struct gcobj *);
+  q->size = getpagesize() / sizeof(struct gc_ref);
   q->read = 0;
   q->write = 0;
   q->buf = trace_queue_alloc(q->size);
   return !!q->buf;
 }
   
-static inline struct gcobj *
+static inline struct gc_ref
 trace_queue_get(struct trace_queue *q, size_t idx) {
   return q->buf[idx & (q->size - 1)];
 }
 
 static inline void
-trace_queue_put(struct trace_queue *q, size_t idx, struct gcobj *x) {
+trace_queue_put(struct trace_queue *q, size_t idx, struct gc_ref x) {
   q->buf[idx & (q->size - 1)] = x;
 }
 
@@ -57,14 +55,14 @@ static int trace_queue_grow(struct trace_queue *q) 
GC_NEVER_INLINE;
 static int
 trace_queue_grow(struct trace_queue *q) {
   size_t old_size = q->size;
-  struct gcobj **old_buf = q->buf;
+  struct gc_ref *old_buf = q->buf;
   if (old_size >= trace_queue_max_size) {
     DEBUG("trace queue already at max size of %zu bytes", old_size);
     return 0;
   }
 
   size_t new_size = old_size * 2;
-  struct gcobj **new_buf = trace_queue_alloc(new_size);
+  struct gc_ref *new_buf = trace_queue_alloc(new_size);
   if (!new_buf)
     return 0;
 
@@ -74,7 +72,7 @@ trace_queue_grow(struct trace_queue *q) {
   for (size_t i = q->read; i < q->write; i++)
     new_buf[i & new_mask] = old_buf[i & old_mask];
 
-  munmap(old_buf, old_size * sizeof(struct gcobj *));
+  munmap(old_buf, old_size * sizeof(struct gc_ref));
 
   q->size = new_size;
   q->buf = new_buf;
@@ -82,7 +80,7 @@ trace_queue_grow(struct trace_queue *q) {
 }
   
 static inline void
-trace_queue_push(struct trace_queue *q, struct gcobj *p) {
+trace_queue_push(struct trace_queue *q, struct gc_ref p) {
   if (UNLIKELY(q->write - q->read == q->size)) {
     if (!trace_queue_grow(q))
       GC_CRASH();
@@ -91,7 +89,7 @@ trace_queue_push(struct trace_queue *q, struct gcobj *p) {
 }
 
 static inline void
-trace_queue_push_many(struct trace_queue *q, struct gcobj **pv, size_t count) {
+trace_queue_push_many(struct trace_queue *q, struct gc_ref *pv, size_t count) {
   while (q->size - (q->write - q->read) < count) {
     if (!trace_queue_grow(q))
       GC_CRASH();
@@ -100,16 +98,16 @@ trace_queue_push_many(struct trace_queue *q, struct gcobj 
**pv, size_t count) {
     trace_queue_put(q, q->write++, pv[i]);
 }
 
-static inline struct gcobj*
+static inline struct gc_ref
 trace_queue_pop(struct trace_queue *q) {
   if (UNLIKELY(q->read == q->write))
-    return NULL;
+    return gc_ref_null();
   return trace_queue_get(q, q->read++);
 }
 
 static void
 trace_queue_release(struct trace_queue *q) {
-  size_t byte_size = q->size * sizeof(struct gcobj *);
+  size_t byte_size = q->size * sizeof(struct gc_ref);
   if (byte_size >= trace_queue_release_byte_threshold)
     madvise(q->buf, byte_size, MADV_DONTNEED);
   q->read = q->write = 0;
@@ -117,7 +115,7 @@ trace_queue_release(struct trace_queue *q) {
 
 static void
 trace_queue_destroy(struct trace_queue *q) {
-  size_t byte_size = q->size * sizeof(struct gcobj *);
+  size_t byte_size = q->size * sizeof(struct gc_ref);
   munmap(q->buf, byte_size);
 }
 
@@ -137,18 +135,17 @@ static void tracer_release(struct gc_heap *heap) {
   trace_queue_release(&heap_tracer(heap)->queue);
 }
 
-struct gcobj;
 static inline void tracer_visit(struct gc_edge edge, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline void trace_one(struct gc_ref ref, void *trace_data) 
GC_ALWAYS_INLINE;
 static inline int trace_edge(struct gc_heap *heap,
                              struct gc_edge edge) GC_ALWAYS_INLINE;
 
 static inline void
-tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) {
+tracer_enqueue_root(struct tracer *tracer, struct gc_ref obj) {
   trace_queue_push(&tracer->queue, obj);
 }
 static inline void
-tracer_enqueue_roots(struct tracer *tracer, struct gcobj **objs,
+tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objs,
                      size_t count) {
   trace_queue_push_many(&tracer->queue, objs, count);
 }
@@ -156,14 +153,16 @@ static inline void
 tracer_visit(struct gc_edge edge, void *trace_data) {
   struct gc_heap *heap = trace_data;
   if (trace_edge(heap, edge))
-    tracer_enqueue_root(heap_tracer(heap),
-                        gc_ref_heap_object(gc_edge_ref(edge)));
+    tracer_enqueue_root(heap_tracer(heap), gc_edge_ref(edge));
 }
 static inline void
 tracer_trace(struct gc_heap *heap) {
-  struct gcobj *obj;
-  while ((obj = trace_queue_pop(&heap_tracer(heap)->queue)))
-    trace_one(gc_ref_from_heap_object(obj), heap);
+  do {
+    struct gc_ref obj = trace_queue_pop(&heap_tracer(heap)->queue);
+    if (!gc_ref_is_heap_object(obj))
+      break;
+    trace_one(obj, heap);
+  } while (1);
 }
 
 #endif // SERIAL_TRACER_H
diff --git a/whippet.c b/whippet.c
index 13ac6c260..ab951c306 100644
--- a/whippet.c
+++ b/whippet.c
@@ -182,13 +182,16 @@ static struct slab *object_slab(void *obj) {
   return (struct slab*) base;
 }
 
-static uint8_t *object_metadata_byte(void *obj) {
-  uintptr_t addr = (uintptr_t) obj;
+static uint8_t *metadata_byte_for_addr(uintptr_t addr) {
   uintptr_t base = addr & ~(SLAB_SIZE - 1);
   uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2;
   return (uint8_t*) (base + granule);
 }
 
+static uint8_t *metadata_byte_for_object(struct gc_ref ref) {
+  return metadata_byte_for_addr(gc_ref_value(ref));
+}
+
 #define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE)
 #define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK)
 
@@ -258,8 +261,6 @@ static inline size_t size_to_granules(size_t size) {
   return (size + GRANULE_SIZE - 1) >> GRANULE_SIZE_LOG_2;
 }
 
-struct gcobj;
-
 struct evacuation_allocator {
   size_t allocated; // atomically
   size_t limit;
@@ -329,7 +330,7 @@ struct gc_heap {
 struct gc_mutator_mark_buf {
   size_t size;
   size_t capacity;
-  struct gcobj **objects;
+  struct gc_ref *objects;
 };
 
 struct gc_mutator {
@@ -366,10 +367,6 @@ static inline void clear_memory(uintptr_t addr, size_t 
size) {
 
 static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
 
-static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) {
-  return object_metadata_byte(obj);
-}
-
 static size_t mark_space_live_object_granules(uint8_t *metadata) {
   size_t n = 0;
   while ((metadata[n] & METADATA_BYTE_END) == 0)
@@ -379,8 +376,7 @@ static size_t mark_space_live_object_granules(uint8_t 
*metadata) {
 
 static inline int mark_space_mark_object(struct mark_space *space,
                                          struct gc_ref ref) {
-  struct gcobj *obj = gc_ref_heap_object(ref);
-  uint8_t *loc = object_metadata_byte(obj);
+  uint8_t *loc = metadata_byte_for_object(ref);
   uint8_t byte = *loc;
   if (byte & space->marked_mask)
     return 0;
@@ -414,7 +410,7 @@ static void 
clear_remaining_metadata_bytes_in_block(uintptr_t block,
   uintptr_t limit = block + BLOCK_SIZE;
   uintptr_t granules = (limit - base) >> GRANULE_SIZE_LOG_2;
   GC_ASSERT(granules <= GRANULES_PER_BLOCK);
-  memset(object_metadata_byte((void*)base), 0, granules);
+  memset(metadata_byte_for_addr(base), 0, granules);
 }
 
 static void finish_evacuation_allocator_block(uintptr_t block,
@@ -524,7 +520,7 @@ static struct gc_ref evacuation_allocate(struct mark_space 
*space,
 static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
                                                      struct gc_edge edge,
                                                      struct gc_ref old_ref) {
-  uint8_t *metadata = object_metadata_byte(gc_ref_heap_object(old_ref));
+  uint8_t *metadata = metadata_byte_for_object(old_ref);
   uint8_t byte = *metadata;
   if (byte & space->marked_mask)
     return 0;
@@ -557,7 +553,7 @@ static inline int mark_space_evacuate_or_mark_object(struct 
mark_space *space,
         gc_atomic_forward_commit(&fwd, new_ref);
         // Now update extent metadata, and indicate to the caller that
         // the object's fields need to be traced.
-        uint8_t *new_metadata = 
object_metadata_byte(gc_ref_heap_object(new_ref));
+        uint8_t *new_metadata = metadata_byte_for_object(new_ref);
         memcpy(new_metadata + 1, metadata + 1, object_granules - 1);
         gc_edge_update(edge, new_ref);
         metadata = new_metadata;
@@ -809,10 +805,10 @@ static void heap_reset_large_object_pages(struct gc_heap 
*heap, size_t npages) {
 
 static void mutator_mark_buf_grow(struct gc_mutator_mark_buf *buf) {
   size_t old_capacity = buf->capacity;
-  size_t old_bytes = old_capacity * sizeof(struct gcobj*);
+  size_t old_bytes = old_capacity * sizeof(struct gc_ref);
 
   size_t new_bytes = old_bytes ? old_bytes * 2 : getpagesize();
-  size_t new_capacity = new_bytes / sizeof(struct gcobj*);
+  size_t new_capacity = new_bytes / sizeof(struct gc_ref);
 
   void *mem = mmap(NULL, new_bytes, PROT_READ|PROT_WRITE,
                    MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
@@ -829,21 +825,21 @@ static void mutator_mark_buf_grow(struct 
gc_mutator_mark_buf *buf) {
 }
 
 static void mutator_mark_buf_push(struct gc_mutator_mark_buf *buf,
-                                  struct gcobj *val) {
+                                  struct gc_ref ref) {
   if (GC_UNLIKELY(buf->size == buf->capacity))
     mutator_mark_buf_grow(buf);
-  buf->objects[buf->size++] = val;
+  buf->objects[buf->size++] = ref;
 }
 
 static void mutator_mark_buf_release(struct gc_mutator_mark_buf *buf) {
-  size_t bytes = buf->size * sizeof(struct gcobj*);
+  size_t bytes = buf->size * sizeof(struct gc_ref);
   if (bytes >= getpagesize())
     madvise(buf->objects, align_up(bytes, getpagesize()), MADV_DONTNEED);
   buf->size = 0;
 }
 
 static void mutator_mark_buf_destroy(struct gc_mutator_mark_buf *buf) {
-  size_t bytes = buf->capacity * sizeof(struct gcobj*);
+  size_t bytes = buf->capacity * sizeof(struct gc_ref);
   if (bytes)
     munmap(buf->objects, bytes);
 }
@@ -898,15 +894,13 @@ void gc_heap_set_roots(struct gc_heap *heap, struct 
gc_heap_roots *roots) {
 static void trace_and_enqueue_locally(struct gc_edge edge, void *data) {
   struct gc_mutator *mut = data;
   if (trace_edge(mutator_heap(mut), edge))
-    mutator_mark_buf_push(&mut->mark_buf,
-                          gc_ref_heap_object(gc_edge_ref(edge)));
+    mutator_mark_buf_push(&mut->mark_buf, gc_edge_ref(edge));
 }
 
 static void trace_and_enqueue_globally(struct gc_edge edge, void *data) {
   struct gc_heap *heap = data;
   if (trace_edge(heap, edge))
-    tracer_enqueue_root(&heap->tracer,
-                        gc_ref_heap_object(gc_edge_ref(edge)));
+    tracer_enqueue_root(&heap->tracer, gc_edge_ref(edge));
 }
 
 // Mark the roots of a mutator that is stopping for GC.  We can't
@@ -951,8 +945,8 @@ static void trace_mutator_roots_after_stop(struct gc_heap 
*heap) {
   int active_mutators_already_marked = heap_should_mark_while_stopping(heap);
   while (mut) {
     if (active_mutators_already_marked)
-      tracer_enqueue_roots(&heap->tracer,
-                           mut->mark_buf.objects, mut->mark_buf.size);
+      tracer_enqueue_roots(&heap->tracer, mut->mark_buf.objects,
+                           mut->mark_buf.size);
     else
       trace_mutator_roots_with_lock(mut);
     struct gc_mutator *next = mut->next;
@@ -1009,9 +1003,8 @@ static void mark_space_trace_card(struct mark_space 
*space,
       mark_bytes &= ~(((uint64_t)0xff) << (granule_offset * 8));
       size_t granule = granule_base + granule_offset;
       uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE;
-      struct gcobj *obj = (struct gcobj*)addr;
-      GC_ASSERT(object_metadata_byte(obj) == &slab->metadata[granule]);
-      tracer_enqueue_root(&heap->tracer, obj);
+      GC_ASSERT(metadata_byte_for_addr(addr) == &slab->metadata[granule]);
+      tracer_enqueue_root(&heap->tracer, gc_ref(addr));
     }
   }
 }
@@ -1528,7 +1521,7 @@ static size_t next_hole_in_block(struct gc_mutator *mut) {
 
   while (sweep != limit) {
     GC_ASSERT((sweep & (GRANULE_SIZE - 1)) == 0);
-    uint8_t* metadata = object_metadata_byte((struct gcobj*)sweep);
+    uint8_t* metadata = metadata_byte_for_addr(sweep);
     size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2;
 
     // Except for when we first get a block, mut->sweep is positioned
@@ -1574,7 +1567,7 @@ static void finish_hole(struct gc_mutator *mut) {
     struct block_summary *summary = block_summary_for_addr(mut->block);
     summary->holes_with_fragmentation++;
     summary->fragmentation_granules += granules;
-    uint8_t *metadata = object_metadata_byte((void*)mut->alloc);
+    uint8_t *metadata = metadata_byte_for_addr(mut->alloc);
     memset(metadata, 0, granules);
     mut->alloc = mut->sweep;
   }
@@ -1766,10 +1759,10 @@ void* gc_allocate_small(struct gc_mutator *mut, size_t 
size) {
   uintptr_t alloc = mut->alloc;
   uintptr_t sweep = mut->sweep;
   uintptr_t new_alloc = alloc + size;
-  struct gcobj *obj;
+  struct gc_ref ret;
   if (new_alloc <= sweep) {
     mut->alloc = new_alloc;
-    obj = (struct gcobj *)alloc;
+    ret = gc_ref(alloc);
   } else {
     size_t granules = size >> GRANULE_SIZE_LOG_2;
     while (1) {
@@ -1781,11 +1774,11 @@ void* gc_allocate_small(struct gc_mutator *mut, size_t 
size) {
       if (!hole)
         trigger_collection(mut);
     }
-    obj = (struct gcobj*)mut->alloc;
+    ret = gc_ref(mut->alloc);
     mut->alloc += size;
   }
-  gc_update_alloc_table(mut, gc_ref_from_heap_object(obj), size);
-  return obj;
+  gc_update_alloc_table(mut, ret, size);
+  return gc_ref_heap_object(ret);
 }
 
 void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {

Reply via email to