Hi,
Here's the modified patch (including the styling). I've also added
some comments in the code. Pl. review them and let me know your suggestions.
Thanks
-Madhu
Index: shmem.c
===================================================================
RCS file: /home/cvspublic/apr/shmem/unix/shmem.c,v
retrieving revision 1.33
diff -u -r1.33 shmem.c
--- shmem.c 2001/08/30 17:11:04 1.33
+++ shmem.c 2001/09/18 20:20:03
@@ -89,32 +89,297 @@
#include <kernel/OS.h>
#endif
+#define MIN_BLK_SIZE 256
+
+typedef apr_int32_t index_t;
+typedef struct memoffsets_t {
+ index_t l_total; /* Index to start of Free list elements */
+ index_t c_used; /* Index to start of the used chunk list */
+ index_t c_free; /* Index to start of the freed chunk list */
+ apr_off_t shm_offset; /* The current offset of the shared memory */
+ apr_size_t shm_length; /* Total length of shared memory available */
+} memoffsets_t;
+
+typedef struct memchunk_t {
+ apr_off_t offset; /* Offset of the memory - from m->mem */
+ apr_size_t size; /* Size of the chunk */
+ index_t next; /* Index of Next chunk in the list */
+ index_t prev; /* Index of Previous chunk in the list*/
+} memchunk_t;
+
struct shmem_t {
- void *mem;
- void *curmem;
+ apr_pool_t *p;
+ void *mem; /* Starting address of the shared memory */
+ memoffsets_t *offsets; /* Begining of the set of offsets */
+ memchunk_t *list; /* Begining of the list elements */
apr_size_t length;
apr_lock_t *lock;
char *filename;
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
apr_file_t *file;
-#elif APR_USE_SHMEM_MMAP_ANON
- /* Nothing else. */
#elif APR_USE_SHMEM_SHMGET
apr_os_file_t file;
+ apr_os_file_t listfd;
#elif APR_USE_SHMEM_BEOS
area_id areaid;
#endif
};
+#define SHM_ADDR(m,offset) ((offset < 0) ? (void *)NULL : \
+ (void *)((unsigned char *)m->mem + offset))
+
+#define LIST_INDEX(m,ptr) ((ptr == NULL) ? -1 : \
+ (((unsigned char *)ptr - (unsigned char *)m->list)/sizeof(memchunk_t)))
+
+#define MAX_LIST_ELEM(m) ((m->length / MIN_BLK_SIZE) + 1)
+
+#define ROUND_UP(size) ((size < MIN_BLK_SIZE) ? MIN_BLK_SIZE : \
+ ((1 + ((size - 1) / sizeof (void *))) * sizeof (void *)))
+
+/*
+ * Find the insert position in a sorted list of elements. The sorting is
+ * based on the value of the list[idx].offset.
+ * Input : Begining index of the sorted list (*first), elem to be inserted.
+ * Return : The index before which the elem is to be inserted.
+ */
+static index_t posn_in_sorted_list(apr_shmem_t *m, index_t first, index_t elem)
+{
+ index_t idx = first;
+
+ if (idx < 0) return -1;
+
+ do {
+ if (m->list[idx].offset > m->list[elem].offset)
+ break;
+ idx = m->list[idx].next;
+ } while ((idx >= 0) && (idx < MAX_LIST_ELEM(m)) && (idx != first));
+ return idx;
+}
+
+/*
+ * Adds a list element (elem) to the list pointed by *first. If *first points
+ * to a freelist, it finds the appropirate insert position else appends elem
+ * to the end of the list.
+ * Ex. Adds a listelement to the usedlist / freelist.
+ */
+static void add_list(apr_shmem_t *m, index_t *first, index_t elem)
+{
+ index_t prev, idx;
+
+ if (*first == m->offsets->c_free)
+ idx = posn_in_sorted_list(m, *first, elem);
+ else
+ idx = *first;
+
+ if (idx == -1) {
+ idx = *first = elem;
+ prev = elem;
+ }
+ else
+ prev = m->list[idx].prev;
+
+ m->list[idx].prev = elem;
+ m->list[prev].next = elem;
+ m->list[elem].prev = prev;
+ m->list[elem].next = idx;
+
+ if (idx == m->offsets->c_free)
+ *first = elem;
+}
+
+/*
+ * Removes the elem from the list pointed to by *first.
+ * Ex. Removes a listelement from freelist so that it can be added to usedlist.
+ */
+static void remove_list(apr_shmem_t *m, index_t *first, index_t elem)
+{
+ index_t prev, next;
+
+ next = m->list[elem].next;
+ prev = m->list[elem].prev;
+ m->list[prev].next = next;
+ m->list[next].prev = prev;
+ if (next == elem)
+ *first = -1;
+}
+
+/*
+ * Frees a list element. This is useful during garbage collection. If 2 list
+ * elements can be merged (to form a bigger chunk), the 2nd list element has
+ * to be freed. The freed list element is made available at the end of the
+ * list. (No need for maintaining a seperate list of available listelements)
+ */
+static void free_list(apr_shmem_t *m, index_t elem)
+{
+ index_t idx;
+ if (elem >= 0) {
+ idx = m->offsets->l_total - 1;
+ memcpy(&m->list[elem], &m->list[idx], sizeof(memchunk_t));
+ m->list[m->list[idx].prev].next = idx;
+ m->list[m->list[idx].next].prev = idx;
+ m->offsets->l_total--;
+ }
+}
+
+/*
+ * Splits a memory chunk into two. The second mem chunk is allocated a list
+ * element, and filled with updated memory offsets / size.
+ */
+static int split_chunk(apr_shmem_t *m, index_t elem, apr_size_t size)
+{
+ index_t nelem = m->offsets->l_total;
+ if (nelem > MAX_LIST_ELEM(m))
+ return -1;
+
+ m->list[nelem].size = m->list[elem].size - size;
+ m->list[elem].size = size;
+ m->list[nelem].offset = m->list[elem].offset + size;
+ add_list(m, &m->offsets->c_free, nelem);
+ m->offsets->l_total++;
+
+ return nelem;
+}
+
+/*
+ * Finds the list element for a givem memory chunk
+ */
+static index_t find_chunk_by_addr(apr_shmem_t *m, index_t first, void *addr)
+{
+ index_t idx = first;
+
+ if (idx < 0) return NULL;
+
+ do {
+ if (SHM_ADDR(m, m->list[idx].offset) == addr)
+ return idx;
+ } while ((idx >= 0) && ((idx = m->list[idx].next) != first));
+
+ return NULL;
+}
+
+/*
+ * Finds a list element that best satisfies the memory requirement. If there's
+ * no best match available, it splits the memchunk into two.
+ */
+static index_t find_chunk_by_size(apr_shmem_t *m,
+ index_t first, apr_size_t size)
+{
+ memchunk_t *iter;
+ apr_size_t diff = -1;
+ index_t idx = first, found = -1;
+
+ if (idx < 0) return -1;
+
+ do {
+ if (m->list[idx].size == size)
+ return idx;
+ if (m->list[idx].size > size) {
+ if ((diff == -1) || ((m->list[idx].size - size) < diff)) {
+ diff = m->list[idx].size - size;
+ found = idx;
+ }
+ }
+ } while ((idx >= 0) && (idx = m->list[idx].next) != first);
+
+ if (diff > MIN_BLK_SIZE)
+ m->offsets->c_free = split_chunk(m, found, size);
+
+ return found;
+}
+
+/*
+ * Allocates a memory chunk. This also requires a list element to be allocated.
+ * It first checks the freelist to see if any match is available. If none are
+ * available, it allocates a new listelement. It adds the new listelement
+ * to the c_used list.
+ */
+static memchunk_t *alloc_chunk(apr_shmem_t *m, apr_size_t size)
+{
+ index_t idx;
+
+ size = ROUND_UP(size);
+
+ if (m->offsets->shm_length < size)
+ return NULL;
+
+ idx = find_chunk_by_size(m, m->offsets->c_free, size);
+ if (idx != -1)
+ remove_list(m, &m->offsets->c_free, idx);
+ else {
+ idx = m->offsets->l_total;
+ if (idx >= MAX_LIST_ELEM(m))
+ return NULL;
+
+ m->list[idx].offset = m->offsets->shm_offset;
+ m->list[idx].size = size;
+ m->offsets->shm_offset += m->list[idx].size;
+ m->offsets->l_total++;
+ }
+
+ m->offsets->shm_length -= m->list[idx].size;
+ add_list(m, &m->offsets->c_used, idx);
+
+ return (&m->list[idx]);
+}
+
+/*
+ * Frees a memory chunk pointed by entity. It removes the corresponding
+ * listelement from the c_used list and appends it to the c_free list
+ */
+static void free_chunk(apr_shmem_t *m, void *entity)
+{
+ index_t idx;
+
+ if (entity == NULL)
+ return;
+
+ idx = find_chunk_by_addr(m, m->offsets->c_used, entity);
+ if (idx != -1) {
+ m->offsets->shm_length += m->list[idx].size;
+ remove_list(m, &m->offsets->c_used, idx);
+ add_list(m, &m->offsets->c_free, idx);
+ }
+}
+
+/*
+ * Reallocates (resize) the memory pointed to by entity. It's not a true
+ * replacement of the realloc() - in the sense if the entity is not found
+ * in the used_list, it doesn't allocate a new memory of the requested size
+ */
+static memchunk_t *realloc_chunk(apr_shmem_t *m, void *entity, apr_size_t size)
+{
+ index_t idx;
+ memchunk_t *new_b;
+
+ size = ROUND_UP(size);
+
+ idx = find_chunk_by_addr(m, m->offsets->c_used, entity);
+ if (idx != -1) {
+ if (m->list[idx].size > size)
+ m->offsets->c_free = split_chunk(m, idx, size);
+ else if ((m->list[idx].size < size) &&
+ (size < m->offsets->shm_length)) {
+ new_b = alloc_chunk(m, size);
+ memcpy(SHM_ADDR(m, new_b->offset),
+ SHM_ADDR(m, m->list[idx].offset), m->list[idx].size);
+ free_chunk(m, entity);
+ idx = LIST_INDEX(m,new_b);
+ }
+ }
+ return ((idx >= 0) ? &m->list[idx] : NULL);
+}
+
APR_DECLARE(apr_status_t) apr_shm_init(apr_shmem_t **m, apr_size_t reqsize,
const char *filename, apr_pool_t *pool)
{
apr_shmem_t *new_m;
void *mem;
+ int i, listsize, listelem;
#if APR_USE_SHMEM_SHMGET
struct shmid_ds shmbuf;
apr_uid_t uid;
apr_gid_t gid;
+ void *addr;
#endif
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
apr_status_t status;
@@ -124,10 +389,13 @@
int tmpfd;
#endif
- new_m = apr_palloc(pool, sizeof(apr_shmem_t));
+ new_m = (apr_shmem_t *)apr_palloc(pool, sizeof(apr_shmem_t));
if (!new_m)
return APR_ENOMEM;
+ listelem = (reqsize / MIN_BLK_SIZE) + 1;
+ listsize = listelem * sizeof(memchunk_t) + sizeof(memoffsets_t);
+
/* These implementations are very similar except for opening the file. */
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
/* FIXME: Ignore error for now. *
@@ -180,7 +448,18 @@
new_m->file = tmpfd;
mem = shmat(new_m->file, NULL, 0);
+ if (!mem)
+ return errno;
+
+ tmpfd = shmget(IPC_PRIVATE, listsize, (SHM_R|SHM_W|IPC_CREAT));
+ if (tmpfd == -1)
+ return errno;
+ new_m->listfd = tmpfd;
+ addr = shmat(new_m->listfd, NULL, 0);
+ if (!addr)
+ return errno;
+
/* FIXME: Handle errors. */
if (shmctl(new_m->file, IPC_STAT, &shmbuf) == -1)
return errno;
@@ -192,8 +471,7 @@
if (shmctl(new_m->file, IPC_SET, &shmbuf) == -1)
return errno;
- /* remove in future (once use count hits zero) */
- if (shmctl(new_m->file, IPC_RMID, NULL) == -1)
+ if (shmctl(new_m->listfd, IPC_SET, &shmbuf) == -1)
return errno;
#elif APR_USE_SHMEM_BEOS
@@ -205,8 +483,35 @@
#endif
+ if (addr) {
+ new_m->offsets = (memoffsets_t *)addr;
+ new_m->list = (memchunk_t *)(addr + (sizeof(memoffsets_t)));
+ }
+
+ memset(new_m->list, 0, listsize);
+ for (i = 0; i < listelem; i++) {
+ new_m->list[i].prev = -1;
+ new_m->list[i].next = -1;
+ }
+
+ /*
+ * Initially, there's only one element c_free (=0, l_total = 1).
+ * The size of this free element is the total size requested.
+ * The c_used list is set to NULL (c_used = -1).
+ */
+ new_m->list[0].offset = 0;
+ new_m->list[0].size = reqsize;
+ new_m->list[0].next = 0;
+ new_m->list[0].prev = 0;
+
+ new_m->offsets->l_total = 1;
+ new_m->offsets->c_free = 0;
+ new_m->offsets->c_used = -1;
+ new_m->offsets->shm_offset = 0;
+ new_m->offsets->shm_length = reqsize;
+
+ new_m->p = pool;
new_m->mem = mem;
- new_m->curmem = mem;
new_m->length = reqsize;
apr_lock_create(&new_m->lock, APR_MUTEX, APR_CROSS_PROCESS, NULL, pool);
@@ -219,6 +524,15 @@
APR_DECLARE(apr_status_t) apr_shm_destroy(apr_shmem_t *m)
{
+#if APR_USE_SHMEM_SHMGET
+ struct shmid_ds shmbuf;
+ apr_uid_t uid;
+ apr_gid_t gid;
+#endif
+
+ if (!m)
+ return APR_SUCCESS;
+
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
munmap(m->mem, m->length);
apr_file_close(m->file);
@@ -226,41 +540,81 @@
munmap(m->mem, m->length);
#elif APR_USE_SHMEM_SHMGET
shmdt(m->mem);
+ apr_current_userid(&uid, &gid, m->p);
+ shmbuf.shm_perm.uid = uid;
+ shmbuf.shm_perm.gid = gid;
+
+ if (shmctl(m->file, IPC_RMID, &shmbuf) == -1)
+ return errno;
+
+ shmdt(m->list);
+ if (shmctl(m->listfd, IPC_RMID, &shmbuf) == -1)
+ return errno;
+
#elif APR_USE_SHMEM_BEOS
- delete_area(new_m->area_id);
+ delete_area(m->area_id);
#endif
+ m->offsets = NULL;
+ m->mem = NULL;
+
return APR_SUCCESS;
}
APR_DECLARE(void *) apr_shm_malloc(apr_shmem_t *m, apr_size_t reqsize)
{
- void *new;
- new = NULL;
+ memchunk_t *b = NULL;
- apr_lock_acquire(m->lock);
- /* Do we have enough space? */
- if (((char *)m->curmem - (char *)m->mem + reqsize) <= m->length)
+ if (m)
{
- new = m->curmem;
- m->curmem = (char *)m->curmem + reqsize;
+ apr_lock_acquire(m->lock);
+ b = alloc_chunk(m, reqsize);
+ apr_lock_release(m->lock);
}
- apr_lock_release(m->lock);
- return new;
+
+ return ((b) ? SHM_ADDR(m, b->offset) : NULL);
}
APR_DECLARE(void *) apr_shm_calloc(apr_shmem_t *m, apr_size_t reqsize)
{
- void *new = apr_shm_malloc(m, reqsize);
- if (new)
- memset(new, '\0', reqsize);
- return new;
+ memchunk_t *b = NULL;
+
+ if (m)
+ {
+ apr_lock_acquire(m->lock);
+ b = alloc_chunk(m, reqsize);
+ if (b != NULL)
+ memset(SHM_ADDR(m, b->offset), 0, reqsize);
+ apr_lock_release(m->lock);
+ }
+ return ((b) ? SHM_ADDR(m, b->offset) : NULL);
+}
+
+APR_DECLARE(void *) apr_shm_realloc(apr_shmem_t *m, void *p, apr_size_t
reqsize)
+{
+ memchunk_t *b = NULL;
+
+ if (m)
+ {
+ apr_lock_acquire(m->lock);
+ if (p != NULL)
+ b = realloc_chunk(m, p, reqsize);
+ else
+ b = alloc_chunk(m, reqsize);
+ apr_lock_release(m->lock);
+ }
+
+ return ((b) ? SHM_ADDR(m, b->offset) : NULL);
}
-APR_DECLARE(apr_status_t) apr_shm_free(apr_shmem_t *shared, void *entity)
+APR_DECLARE(apr_status_t) apr_shm_free(apr_shmem_t *m, void *entity)
{
- /* Without a memory management scheme within our shared memory, it
- * is impossible to implement free. */
+ if (m)
+ {
+ apr_lock_acquire(m->lock);
+ free_chunk(m, entity);
+ apr_lock_release(m->lock);
+ }
return APR_SUCCESS;
}
@@ -315,16 +669,16 @@
APR_DECLARE(apr_status_t) apr_shm_avail(apr_shmem_t *m, apr_size_t *size)
{
- apr_status_t status;
+ apr_status_t status = APR_ENOSHMAVAIL;
- status = APR_ENOSHMAVAIL;
-
- apr_lock_acquire(m->lock);
+ if (m)
+ {
+ apr_lock_acquire(m->lock);
+ if ((*size = m->offsets->shm_length) > 0)
+ status = APR_SUCCESS;
- *size = m->length - ((char *)m->curmem - (char *)m->mem);
- if (*size)
- status = APR_SUCCESS;
+ apr_lock_release(m->lock);
+ }
- apr_lock_release(m->lock);
return status;
}