Changeset: e1a8bc23f11c for MonetDB
URL: https://dev.monetdb.org/hg/MonetDB/rev/e1a8bc23f11c
Modified Files:
gdk/gdk_utils.c
Branch: resource_management
Log Message:
Just some renaming: use ma prefix instead of sa prefix internally.
diffs (truncated from 470 to 300 lines):
diff --git a/gdk/gdk_utils.c b/gdk/gdk_utils.c
--- a/gdk/gdk_utils.c
+++ b/gdk/gdk_utils.c
@@ -1682,12 +1682,12 @@ GDKprintinforegister(void (*func)(void))
#define DEBUG_SPACE 16
#endif
-#define SA_NUM_BLOCKS 64
-#define SA_BLOCK_SIZE (128*1024)
-#define SA_HEADER_SIZE (2*(sizeof(size_t)))
+#define MA_NUM_BLOCKS 64
+#define MA_BLOCK_SIZE (128*1024)
+#define MA_HEADER_SIZE (2*(sizeof(size_t)))
#define CANARY_VALUE ((size_t)0xDEADBEEFDEADBEEF)
#define round16(sz) ((sz+15)&~15)
-#define round_block_size(sz) ((sz + (SA_BLOCK_SIZE - 1))&~(SA_BLOCK_SIZE - 1))
+#define round_block_size(sz) ((sz + (MA_BLOCK_SIZE - 1))&~(MA_BLOCK_SIZE - 1))
#define COND_LOCK_ALLOCATOR(a) \
bool __alloc_locked = false; \
@@ -1709,7 +1709,7 @@ typedef struct freed_t {
static inline size_t
-sa_get_blk_idx(allocator *sa, void *blk, size_t offset)
+ma_get_blk_idx(allocator *sa, void *blk, size_t offset)
{
size_t i;
for(i = offset; i < sa->nr; i++) {
@@ -1728,7 +1728,7 @@ sa_get_blk_idx(allocator *sa, void *blk,
static void
-sa_free_obj(allocator *sa, void *obj, size_t sz)
+ma_free_obj(allocator *sa, void *obj, size_t sz)
{
//size_t i;
@@ -1738,7 +1738,7 @@ sa_free_obj(allocator *sa, void *obj, si
//// find the block this object belongs to
//for(i = 0; i < sa->nr; i++) {
// char * blk_start = (char *) sa->blks[i];
- // char * blk_end = blk_start + SA_BLOCK_SIZE;
+ // char * blk_end = blk_start + MA_BLOCK_SIZE;
// if ((obj_start >= blk_start) && (obj_end <= blk_end))
// break;
//}
@@ -1752,17 +1752,17 @@ sa_free_obj(allocator *sa, void *obj, si
}
/*
- * Put regular blks of size SA_BLOCK_SIZE on freelist_blks
+ * Put regular blks of size MA_BLOCK_SIZE on freelist_blks
* all others are GDKfree
*/
static void
-sa_free_blk_memory(allocator *sa, void *blk)
+ma_free_blk_memory(allocator *sa, void *blk)
{
if (!sa->pa) {
// all blks are GDKmalloc
size_t sz = GDKmallocated(blk) - (MALLOC_EXTRA_SPACE +
DEBUG_SPACE);
assert(sz > 0);
- if (sz == SA_BLOCK_SIZE) {
+ if (sz == MA_BLOCK_SIZE) {
freed_t *f = blk;
f->sz = sz;
f->n = sa->freelist_blks;
@@ -1776,14 +1776,14 @@ sa_free_blk_memory(allocator *sa, void *
static void
-sa_free_blk(allocator *sa, void *blk)
+ma_free_blk(allocator *sa, void *blk)
{
- size_t i = sa_get_blk_idx(sa, blk, 0);
+ size_t i = ma_get_blk_idx(sa, blk, 0);
if (i < sa->nr) {
if (sa->pa)
- sa_free_blk(sa->pa, blk);
+ ma_free_blk(sa->pa, blk);
else
- sa_free_blk_memory(sa, blk);
+ ma_free_blk_memory(sa, blk);
// compact
for (; i < sa->nr-1; i++)
sa->blks[i] = sa->blks[i+1];
@@ -1796,7 +1796,7 @@ sa_free_blk(allocator *sa, void *blk)
* Return first slot that will fit the size
*/
static void *
-sa_use_freed_obj(allocator *sa, size_t sz)
+ma_use_freed_obj(allocator *sa, size_t sz)
{
freed_t *prev = NULL;
int cntr = 0;
@@ -1824,20 +1824,20 @@ sa_use_freed_obj(allocator *sa, size_t s
return NULL;
}
-static int sa_double_num_blks(allocator *sa);
+static int ma_double_num_blks(allocator *sa);
/*
* Free blocks are maintained at top level
*/
static void *
-sa_use_freed_blk(allocator *sa, size_t sz)
+ma_use_freed_blk(allocator *sa, size_t sz)
{
if (sa->pa)
- return sa_use_freed_blk(sa->pa, sz);
+ return ma_use_freed_blk(sa->pa, sz);
COND_LOCK_ALLOCATOR(sa);
- if (sa->freelist_blks && (sz == SA_BLOCK_SIZE)) {
- if (sa->nr >= sa->size && sa_double_num_blks(sa) < 0) {
+ if (sa->freelist_blks && (sz == MA_BLOCK_SIZE)) {
+ if (sa->nr >= sa->size && ma_double_num_blks(sa) < 0) {
COND_UNLOCK_ALLOCATOR(sa);
if (sa->eb.enabled)
eb_error(&sa->eb, "out of memory", 1000);
@@ -1845,8 +1845,8 @@ sa_use_freed_blk(allocator *sa, size_t s
}
freed_t *f = sa->freelist_blks;
sa->freelist_blks = f->n;
- sa->blk_size = SA_BLOCK_SIZE;
- sa->used = SA_BLOCK_SIZE;
+ sa->blk_size = MA_BLOCK_SIZE;
+ sa->used = MA_BLOCK_SIZE;
sa->blks[sa->nr] = (char*)f;
sa->nr ++;
sa->free_blk_hits += 1;
@@ -1859,40 +1859,40 @@ sa_use_freed_blk(allocator *sa, size_t s
static void *
-sa_use_freed(allocator *sa, size_t sz)
+ma_use_freed(allocator *sa, size_t sz)
{
- if (sz < SA_BLOCK_SIZE) {
- return sa_use_freed_obj(sa, sz);
+ if (sz < MA_BLOCK_SIZE) {
+ return ma_use_freed_obj(sa, sz);
}
- if (sz == SA_BLOCK_SIZE) {
- return sa_use_freed_blk(sa, sz);
+ if (sz == MA_BLOCK_SIZE) {
+ return ma_use_freed_blk(sa, sz);
}
return NULL;
}
static inline bool
-sa_reallocated(allocator *sa)
+ma_reallocated(allocator *sa)
{
return sa->blks != (char **)sa->first_blk;
}
static inline bool
-sa_has_dependencies(allocator *sa)
+ma_has_dependencies(allocator *sa)
{
return (sa->refcount > 0) && !sa->pa;
}
static inline void
-_sa_free_blks(allocator *sa, size_t start_idx)
+_ma_free_blks(allocator *sa, size_t start_idx)
{
for (size_t i = start_idx; i < sa->nr; i++) {
char *blk = sa->blks[i];
if (blk) {
if (sa->pa) {
- sa_free_blk(sa->pa, blk);
+ ma_free_blk(sa->pa, blk);
} else {
- sa_free_blk_memory(sa, blk);
+ ma_free_blk_memory(sa, blk);
}
}
}
@@ -1903,23 +1903,24 @@ static inline void
/*
* Reset allocator to initial state
*/
-allocator *ma_reset(allocator *sa)
+allocator *
+ma_reset(allocator *sa)
{
COND_LOCK_ALLOCATOR(sa);
- assert(!sa_has_dependencies(sa));
- if (sa_has_dependencies(sa)) {
+ assert(!ma_has_dependencies(sa));
+ if (ma_has_dependencies(sa)) {
if (sa->eb.enabled)
eb_error(&sa->eb, "reset failed, allocator has
dependencies", 1000);
return sa;
}
// 1st block is where we live, free the rest
- _sa_free_blks(sa, 1);
+ _ma_free_blks(sa, 1);
// compute start offset
- size_t offset = round16(sizeof(char*) * SA_NUM_BLOCKS) +
+ size_t offset = round16(sizeof(char*) * MA_NUM_BLOCKS) +
round16(sizeof(allocator));
// If reallocated, we need to restore original layout
- if (sa_reallocated(sa)) {
+ if (ma_reallocated(sa)) {
char **old_blks = sa->blks;
sa->blks = (char **)sa->first_blk;
if (!sa->pa) {
@@ -1928,7 +1929,7 @@ allocator *ma_reset(allocator *sa)
}
}
- sa->size = SA_NUM_BLOCKS;
+ sa->size = MA_NUM_BLOCKS;
sa->blks[0] = sa->first_blk;
sa->used = offset;
sa->frees = 0;
@@ -1936,8 +1937,8 @@ allocator *ma_reset(allocator *sa)
// reset freelist only i.e. leave freelist_blks alone as
// it may have blocks we can re-use
sa->freelist = NULL;
- sa->usedmem = SA_BLOCK_SIZE;
- sa->blk_size = SA_BLOCK_SIZE;
+ sa->usedmem = MA_BLOCK_SIZE;
+ sa->blk_size = MA_BLOCK_SIZE;
sa->objects = 0;
sa->inuse = 0;
sa->tmp_used = 0;
@@ -1945,7 +1946,7 @@ allocator *ma_reset(allocator *sa)
return sa;
}
-static void * _sa_alloc_internal(allocator* sa, size_t sz);
+static void * _ma_alloc_internal(allocator* sa, size_t sz);
#undef ma_realloc
void *
@@ -1956,35 +1957,35 @@ ma_realloc(allocator *sa, void *p, size_
if (r)
memcpy(r, p, oldsz);
if (oldsz >= sa->blk_size && !ma_tmp_active(sa)) {
- char* ptr = (char *) p - SA_HEADER_SIZE;
+ char* ptr = (char *) p - MA_HEADER_SIZE;
COND_LOCK_ALLOCATOR(sa);
- sa_free_blk(sa, ptr);
+ ma_free_blk(sa, ptr);
COND_UNLOCK_ALLOCATOR(sa);
}
return r;
}
static char *
-sa_fill_in_header(char *r, size_t sz)
+ma_fill_in_header(char *r, size_t sz)
{
if (r) {
// store size first
*((size_t *) r) = sz;
// store canary value to help us detect double free
*((size_t *) r + 1) = CANARY_VALUE;
- r += SA_HEADER_SIZE;
+ r += MA_HEADER_SIZE;
}
return r;
}
static int
-sa_double_num_blks(allocator *sa)
+ma_double_num_blks(allocator *sa)
{
char **tmp;
size_t osz = sa->size;
sa->size *=2;
if (sa->pa)
- tmp = (char**)_sa_alloc_internal(sa->pa, sizeof(char*) *
sa->size);
+ tmp = (char**)_ma_alloc_internal(sa->pa, sizeof(char*) *
sa->size);
else {
size_t bytes = sizeof(char*) * sa->size;
tmp = GDKmalloc(bytes);
@@ -1993,7 +1994,7 @@ sa_double_num_blks(allocator *sa)
if (tmp) {
size_t bytes = sizeof(char*) * osz;
memcpy(tmp, sa->blks, bytes);
- if (!sa->pa && sa_reallocated(sa)) {
+ if (!sa->pa && ma_reallocated(sa)) {
GDKfree(sa->blks);
sa->usedmem -= bytes;
}
@@ -2007,22 +2008,22 @@ sa_double_num_blks(allocator *sa)
static void *
-_sa_alloc_internal(allocator *sa, size_t sz)
_______________________________________________
checkin-list mailing list -- [email protected]
To unsubscribe send an email to [email protected]