Author: mturk
Date: Wed Mar 17 15:16:13 2010
New Revision: 924329
URL: http://svn.apache.org/viewvc?rev=924329&view=rev
Log:
Add apr-util's rmm with slightly more usable api
Added:
commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h (with
props)
commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c (with props)
Modified:
commons/sandbox/runtime/trunk/src/main/native/Makefile.in
commons/sandbox/runtime/trunk/src/main/native/Makefile.msc.in
Modified: commons/sandbox/runtime/trunk/src/main/native/Makefile.in
URL:
http://svn.apache.org/viewvc/commons/sandbox/runtime/trunk/src/main/native/Makefile.in?rev=924329&r1=924328&r2=924329&view=diff
==============================================================================
--- commons/sandbox/runtime/trunk/src/main/native/Makefile.in (original)
+++ commons/sandbox/runtime/trunk/src/main/native/Makefile.in Wed Mar 17
15:16:13 2010
@@ -115,6 +115,7 @@ COMMON_OBJS=\
$(SRCDIR)/shared/object.$(OBJ) \
$(SRCDIR)/shared/base64.$(OBJ) \
$(SRCDIR)/shared/md5.$(OBJ) \
+ $(SRCDIR)/shared/rmm.$(OBJ) \
$(SRCDIR)/shared/sha1.$(OBJ) \
$(SRCDIR)/shared/sha2.$(OBJ) \
$(SRCDIR)/shared/sbuf.$(OBJ) \
Modified: commons/sandbox/runtime/trunk/src/main/native/Makefile.msc.in
URL:
http://svn.apache.org/viewvc/commons/sandbox/runtime/trunk/src/main/native/Makefile.msc.in?rev=924329&r1=924328&r2=924329&view=diff
==============================================================================
--- commons/sandbox/runtime/trunk/src/main/native/Makefile.msc.in (original)
+++ commons/sandbox/runtime/trunk/src/main/native/Makefile.msc.in Wed Mar 17
15:16:13 2010
@@ -99,6 +99,7 @@ COMMON_OBJS=\
$(SRCDIR)/shared/object.$(OBJ) \
$(SRCDIR)/shared/base64.$(OBJ) \
$(SRCDIR)/shared/md5.$(OBJ) \
+ $(SRCDIR)/shared/rmm.$(OBJ) \
$(SRCDIR)/shared/sha1.$(OBJ) \
$(SRCDIR)/shared/sha2.$(OBJ) \
$(SRCDIR)/shared/sbuf.$(OBJ) \
Added: commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h
URL:
http://svn.apache.org/viewvc/commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h?rev=924329&view=auto
==============================================================================
--- commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h (added)
+++ commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h Wed Mar 17
15:16:13 2010
@@ -0,0 +1,114 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ACR_RMM_H
+#define ACR_RMM_H
+/**
+ * @file acr_rmm.h
+ * @brief ACR Relocatable Memory Management Routines
+ */
+/**
+ * @defgroup ACR_RMM Relocatable Memory Management Routines
+ * @{
+ */
+
+#include "acr.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/** Structure to access Relocatable, Managed Memory */
+typedef struct acr_rmm_t acr_rmm_t;
+
+/**
+ * Initialize a relocatable memory block to be managed by the apr_rmm API.
+ * @param rmm The relocatable memory block
+ * @param lock An apr_anylock_t of the appropriate type of lock, or NULL
+ * if no locking is required.
+ * @param membuf The block of relocatable memory to be managed
+ * @param memsize The size of relocatable memory block to be managed
+ * @remark Both @param membuf and @param memsize must be aligned
+ * (for instance using ACR_ALIGN_DEFAULT).
+ */
+ACR_DECLARE(int) ACR_RmmInit(acr_rmm_t **rmm, int lock,
+ void *membuf, size_t memsize);
+
+/**
+ * Destroy a managed memory block.
+ * @param rmm The relocatable memory block to destroy
+ */
+ACR_DECLARE(int) ACR_RmmRestroy(acr_rmm_t *rmm);
+
+/**
+ * Attach to a relocatable memory block already managed by the acr_rmm API.
+ * @param rmm The relocatable memory block
+ * @param lock An apr_anylock_t of the appropriate type of lock
+ * @param membuf The block of relocatable memory already under management
+ */
+ACR_DECLARE(int) ACR_RmmAttach(acr_rmm_t **rmm, int lock,
+ void *membuf);
+
+/**
+ * Detach from the managed block of memory.
+ * @param rmm The relocatable memory block to detach from
+ */
+ACR_DECLARE(int) ACR_RmmDetach(acr_rmm_t *rmm);
+
+/**
+ * Allocate memory from the block of relocatable memory.
+ * @param rmm The relocatable memory block
+ * @param reqsize How much memory to allocate
+ */
+ACR_DECLARE(void *) ACR_RmmMalloc(acr_rmm_t *rmm,
+ size_t reqsize);
+
+/**
+ * Realloc memory from the block of relocatable memory.
+ * @param rmm The relocatable memory block
+ * @param entity The memory allocation to realloc
+ * @param reqsize The new size
+ */
+ACR_DECLARE(void *) ACR_RmmRealloc(acr_rmm_t *rmm,
+ void *entity,
+ size_t reqsize);
+
+/**
+ * Allocate memory from the block of relocatable memory and initialize it to
zero.
+ * @param rmm The relocatable memory block
+ * @param reqsize How much memory to allocate
+ */
+ACR_DECLARE(void *) ACR_RmmCalloc(acr_rmm_t *rmm, size_t reqsize);
+
+/**
+ * Free allocation returned by apr_rmm_malloc or apr_rmm_calloc.
+ * @param rmm The relocatable memory block
+ * @param entity The memory allocation to free
+ */
+ACR_DECLARE(int) ACR_RmmFree(acr_rmm_t *rmm, void *entity);
+
+/**
+ * Compute the required overallocation of memory needed to fit n allocs
+ * @param n The number of alloc/calloc regions desired
+ */
+ACR_DECLARE(size_t) ACR_RmmOverheadGet(int n);
+
+#ifdef __cplusplus
+}
+#endif
+/** @} */
+#endif /* ! ACR_RMM_H */
+
Propchange: commons/sandbox/runtime/trunk/src/main/native/include/acr_rmm.h
------------------------------------------------------------------------------
svn:eol-style = native
Added: commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c
URL:
http://svn.apache.org/viewvc/commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c?rev=924329&view=auto
==============================================================================
--- commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c (added)
+++ commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c Wed Mar 17
15:16:13 2010
@@ -0,0 +1,429 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "acr.h"
+#include "acr_private.h"
+#include "acr_arch.h"
+#include "acr_error.h"
+#include "acr_rmm.h"
+
+/* The RMM region is made up of two doubly-linked-list of blocks; the
+ * list of used blocks, and the list of free blocks (either list may
+ * be empty). The base pointer, rmm->base, points at the beginning of
+ * the shmem region in use. Each block is addressable by an
+ * ptrdiff_t value, which represents the offset from the base
+ * pointer. The term "address" is used here to mean such a value; an
+ * "offset from rmm->base".
+ *
+ * The RMM region contains exactly one "rmm_hdr_block_t" structure,
+ * the "header block", which is always stored at the base pointer.
+ * The firstused field in this structure is the address of the first
+ * block in the "used blocks" list; the firstfree field is the address
+ * of the first block in the "free blocks" list.
+ *
+ * Each block is prefixed by an "rmm_block_t" structure, followed by
+ * the caller-usable region represented by the block. The next and
+ * prev fields of the structure are zero if the block is at the end or
+ * beginning of the linked-list respectively, or otherwise hold the
+ * address of the next and previous blocks in the list. ("address 0",
+ * i.e. rmm->base is *not* a valid address for a block, since the
+ * header block is always stored at that address).
+ *
+ * At creation, the RMM region is initialized to hold a single block
+ * on the free list representing the entire available shm segment
+ * (minus header block); subsequent allocation and deallocation of
+ * blocks involves splitting blocks and coalescing adjacent blocks,
+ * and switching them between the free and used lists as
+ * appropriate.
+ */
+
+typedef struct rmm_block_t {
+ size_t size;
+ ptrdiff_t prev;
+ ptrdiff_t next;
+} rmm_block_t;
+
+/* Always at our acr_rmm_off(0):
+ */
+typedef struct rmm_hdr_block_t {
+ size_t abssize;
+ ptrdiff_t /* rmm_block_t */ firstused;
+ ptrdiff_t /* rmm_block_t */ firstfree;
+} rmm_hdr_block_t;
+
+#define RMM_HDR_BLOCK_SIZE (ACR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
+#define RMM_BLOCK_SIZE (ACR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
+#define RMM_ADDRESS(R, O) (void *)((char*)(R)->base + (O))
+#define RMM_OFFSET(R, A) (ptrdiff_t)((char*)(A) - (char*)(R)->base)
+
+struct acr_rmm_t {
+ rmm_hdr_block_t *base;
+ size_t size;
+ int lock;
+};
+
+static ptrdiff_t find_block_by_offset(acr_rmm_t *rmm, ptrdiff_t next,
+ ptrdiff_t find, int includes)
+{
+ ptrdiff_t prev = 0;
+
+ while (next) {
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
+
+ if (find == next)
+ return next;
+
+ /* Overshot? */
+ if (find < next)
+ return includes ? prev : 0;
+
+ prev = next;
+ next = blk->next;
+ }
+ return includes ? prev : 0;
+}
+
+static ptrdiff_t find_block_of_size(acr_rmm_t *rmm, size_t size)
+{
+ ptrdiff_t next = rmm->base->firstfree;
+ ptrdiff_t best = 0;
+ ptrdiff_t bestsize = 0;
+
+ while (next) {
+ struct rmm_block_t *blk = (rmm_block_t *)((char *)rmm->base + next);
+
+ if (blk->size == size)
+ return next;
+
+ if (blk->size >= size) {
+ /* XXX: sub optimal algorithm
+ * We need the most thorough best-fit logic, since we can
+ * never grow our rmm, we are SOL when we hit the wall.
+ */
+ if (!bestsize || (blk->size < bestsize)) {
+ bestsize = blk->size;
+ best = next;
+ }
+ }
+
+ next = blk->next;
+ }
+
+ if (bestsize > RMM_BLOCK_SIZE + size) {
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
+ struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best +
size);
+
+ new->size = blk->size - size;
+ new->next = blk->next;
+ new->prev = best;
+
+ blk->size = size;
+ blk->next = best + size;
+
+ if (new->next) {
+ blk = (rmm_block_t*)((char*)rmm->base + new->next);
+ blk->prev = best + size;
+ }
+ }
+
+ return best;
+}
+
+static void move_block(acr_rmm_t *rmm, ptrdiff_t this, int free)
+{
+ struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
+
+ /* close the gap */
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base +
blk->prev);
+ prev->next = blk->next;
+ }
+ else {
+ if (free) {
+ rmm->base->firstused = blk->next;
+ }
+ else {
+ rmm->base->firstfree = blk->next;
+ }
+ }
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base +
blk->next);
+ next->prev = blk->prev;
+ }
+
+ /* now find it in the other list, pushing it to the head if required */
+ if (free) {
+ blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
+ if (!blk->prev) {
+ blk->next = rmm->base->firstfree;
+ rmm->base->firstfree = this;
+ }
+ }
+ else {
+ blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
+ if (!blk->prev) {
+ blk->next = rmm->base->firstused;
+ rmm->base->firstused = this;
+ }
+ }
+
+ /* and open it up */
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base +
blk->prev);
+ if (free && (blk->prev + prev->size == this)) {
+ /* Collapse us into our predecessor */
+ prev->size += blk->size;
+ this = blk->prev;
+ blk = prev;
+ }
+ else {
+ blk->next = prev->next;
+ prev->next = this;
+ }
+ }
+
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base +
blk->next);
+ if (free && (this + blk->size == blk->next)) {
+ /* Collapse us into our successor */
+ blk->size += next->size;
+ blk->next = next->next;
+ if (blk->next) {
+ next = (rmm_block_t*)((char*)rmm->base + blk->next);
+ next->prev = this;
+ }
+ }
+ else {
+ next->prev = this;
+ }
+ }
+}
+
+ACR_DECLARE(int) ACR_RmmInit(acr_rmm_t **rmm, int lock,
+ void *base, size_t size)
+{
+ rmm_block_t *blk;
+
+ if (!rmm)
+ return ACR_EINVAL;
+ (*rmm) = s_calloc(acr_rmm_t, 1);
+ if (!(*rmm))
+ return ACR_ENOMEM;
+ (*rmm)->base = base;
+ (*rmm)->size = size;
+ (*rmm)->lock = lock;
+
+ (*rmm)->base->abssize = size;
+ (*rmm)->base->firstused = 0;
+ (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
+
+ blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
+
+ blk->size = size - (*rmm)->base->firstfree;
+ blk->prev = 0;
+ blk->next = 0;
+
+ return 0;
+}
+
+ACR_DECLARE(int) ACR_RmmDestroy(acr_rmm_t *rmm)
+{
+ rmm_block_t *blk;
+
+ if (!rmm)
+ return ACR_EINVAL;
+ /* Blast it all --- no going back :) */
+ if (rmm->base->firstused) {
+ ptrdiff_t this = rmm->base->firstused;
+ do {
+ blk = (rmm_block_t *)((char*)rmm->base + this);
+ this = blk->next;
+ blk->next = blk->prev = 0;
+ } while (this);
+ rmm->base->firstused = 0;
+ }
+ if (rmm->base->firstfree) {
+ ptrdiff_t this = rmm->base->firstfree;
+ do {
+ blk = (rmm_block_t *)((char*)rmm->base + this);
+ this = blk->next;
+ blk->next = blk->prev = 0;
+ } while (this);
+ rmm->base->firstfree = 0;
+ }
+ rmm->base->abssize = 0;
+ rmm->size = 0;
+
+ free(rmm);
+
+ return 0;
+}
+
+ACR_DECLARE(int) ACR_RmmAttach(acr_rmm_t **rmm, int lock,
+ void *base)
+{
+
+ if (!base || !rmm)
+ return ACR_EINVAL;
+ (*rmm) = s_calloc(acr_rmm_t, 1);
+ if (!(*rmm))
+ return ACR_ENOMEM;
+ (*rmm)->base = base;
+ (*rmm)->size = (*rmm)->base->abssize;
+ (*rmm)->lock = lock;
+ return 0;
+}
+
+ACR_DECLARE(int) ACR_RmmDetach(acr_rmm_t *rmm)
+{
+ if (!rmm)
+ return ACR_EINVAL;
+ x_free(rmm);
+ return 0;
+}
+
+ACR_DECLARE(void *) ACR_RmmMalloc(acr_rmm_t *rmm, size_t reqsize)
+{
+ size_t size;
+ ptrdiff_t this;
+
+ size = ACR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
+ if (size < reqsize) {
+ ACR_SET_OS_ERROR(ACR_EINVAL);
+ return NULL;
+ }
+
+ this = find_block_of_size(rmm, size);
+ if (this) {
+ move_block(rmm, this, 0);
+ this += RMM_BLOCK_SIZE;
+ return RMM_ADDRESS(rmm, this);
+ }
+ else {
+ ACR_SET_OS_ERROR(ACR_ENOMEM);
+ return NULL;
+ }
+}
+
+ACR_DECLARE(void *) ACR_RmmCalloc(acr_rmm_t *rmm, size_t reqsize)
+{
+ size_t size;
+ ptrdiff_t this;
+
+ size = ACR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
+ if (size < reqsize) {
+ ACR_SET_OS_ERROR(ACR_EINVAL);
+ return NULL;
+ }
+
+ this = find_block_of_size(rmm, size);
+ if (this) {
+ move_block(rmm, this, 0);
+ this += RMM_BLOCK_SIZE;
+ memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
+ return RMM_ADDRESS(rmm, this);
+ }
+ else {
+ ACR_SET_OS_ERROR(ACR_ENOMEM);
+ return NULL;
+ }
+}
+
+ACR_DECLARE(void *) ACR_RmmRealloc(acr_rmm_t *rmm, void *entity,
+ size_t reqsize)
+{
+ void *this;
+ ptrdiff_t old;
+ struct rmm_block_t *blk;
+ size_t size, oldsize;
+
+ if (!entity) {
+ return ACR_RmmMalloc(rmm, reqsize);
+ }
+
+ size = ACR_ALIGN_DEFAULT(reqsize);
+ if (size < reqsize) {
+ ACR_SET_OS_ERROR(ACR_EINVAL);
+ return NULL;
+ }
+ old = RMM_OFFSET(rmm, entity);
+
+ if (!(this = ACR_RmmMalloc(rmm, size))) {
+ return NULL;
+ }
+
+ blk = (rmm_block_t *)((char *)rmm->base + old - RMM_BLOCK_SIZE);
+ oldsize = blk->size;
+
+ memcpy(this, RMM_ADDRESS(rmm, old), oldsize < size ? oldsize : size);
+
+ ACR_RmmFree(rmm, RMM_ADDRESS(rmm, old));
+ return this;
+}
+
+ACR_DECLARE(int) ACR_RmmFree(acr_rmm_t *rmm, void *entity)
+{
+ ptrdiff_t this;
+ struct rmm_block_t *blk;
+
+ if (!entity || !rmm)
+ return ACR_EINVAL;
+
+ this = RMM_OFFSET(rmm, entity);
+ /* A little sanity check is always healthy, especially here.
+ * If we really cared, we could make this compile-time
+ */
+ if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
+ return ACR_EINVAL;
+ }
+
+ this -= RMM_BLOCK_SIZE;
+
+ blk = (rmm_block_t *)((char *)rmm->base + this);
+
+ if (blk->prev) {
+ struct rmm_block_t *prev = (rmm_block_t *)((char *)rmm->base +
blk->prev);
+ if (prev->next != this) {
+ return ACR_EINVAL;
+ }
+ }
+ else {
+ if (rmm->base->firstused != this) {
+ return ACR_EINVAL;
+ }
+ }
+
+ if (blk->next) {
+ struct rmm_block_t *next = (rmm_block_t *)((char *)rmm->base +
blk->next);
+ if (next->prev != this) {
+ return ACR_EINVAL;
+ }
+ }
+
+ /* Ok, it remained [apparently] sane, so unlink it
+ */
+ move_block(rmm, this, 1);
+ return 0;
+}
+
+ACR_DECLARE(size_t)ACR_RmmOverheadGet(int n)
+{
+ /* overhead per block is at most ACR_ALIGN_DEFAULT(1) wasted bytes
+ * for alignment overhead, plus the size of the rmm_block_t
+ * structure. */
+ return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + ACR_ALIGN_DEFAULT(1));
+}
+
Propchange: commons/sandbox/runtime/trunk/src/main/native/shared/rmm.c
------------------------------------------------------------------------------
svn:eol-style = native