Github user jsoref commented on a diff in the pull request:
https://github.com/apache/cordova-plugin-globalization/pull/18#discussion_r15603044
--- Diff: src/blackberry10/native/public/json_batchallocator.h ---
@@ -0,0 +1,125 @@
+#ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED
+# define JSONCPP_BATCHALLOCATOR_H_INCLUDED
+
+# include <stdlib.h>
+# include <assert.h>
+
+# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+
+namespace Json {
+
+/* Fast memory allocator.
+ *
+ * This memory allocator allocates memory for a batch of object (specified
by
+ * the page size, the number of object in each page).
+ *
+ * It does not allow the destruction of a single object. All the allocated
objects
+ * can be destroyed at once. The memory can be either released or reused
for future
+ * allocation.
+ *
+ * The in-place new operator must be used to construct the object using
the pointer
+ * returned by allocate.
+ */
+template<typename AllocatedType
+ ,const unsigned int objectPerAllocation>
+class BatchAllocator
+{
+public:
+ typedef AllocatedType Type;
+
+ BatchAllocator( unsigned int objectsPerPage = 255 )
+ : freeHead_( 0 )
+ , objectsPerPage_( objectsPerPage )
+ {
+// printf( "Size: %d => %s\n", sizeof(AllocatedType),
typeid(AllocatedType).name() );
+ assert( sizeof(AllocatedType) * objectPerAllocation >=
sizeof(AllocatedType *) ); // We must be able to store a slist in the object
free space.
+ assert( objectsPerPage >= 16 );
+ batches_ = allocateBatch( 0 ); // allocated a dummy page
+ currentBatch_ = batches_;
+ }
+
+ ~BatchAllocator()
+ {
+ for ( BatchInfo *batch = batches_; batch; )
+ {
+ BatchInfo *nextBatch = batch->next_;
+ free( batch );
+ batch = nextBatch;
+ }
+ }
+
+ /// allocate space for an array of objectPerAllocation object.
+ /// @warning it is the responsability of the caller to call objects
constructors.
+ AllocatedType *allocate()
+ {
+ if ( freeHead_ ) // returns node from free list.
+ {
+ AllocatedType *object = freeHead_;
+ freeHead_ = *(AllocatedType **)object;
+ return object;
+ }
+ if ( currentBatch_->used_ == currentBatch_->end_ )
+ {
+ currentBatch_ = currentBatch_->next_;
+ while ( currentBatch_ && currentBatch_->used_ ==
currentBatch_->end_ )
+ currentBatch_ = currentBatch_->next_;
+
+ if ( !currentBatch_ ) // no free batch found, allocate a new one
+ {
+ currentBatch_ = allocateBatch( objectsPerPage_ );
+ currentBatch_->next_ = batches_; // insert at the head of the
list
+ batches_ = currentBatch_;
+ }
+ }
+ AllocatedType *allocated = currentBatch_->used_;
+ currentBatch_->used_ += objectPerAllocation;
+ return allocated;
+ }
+
+ /// Release the object.
+ /// @warning it is the responsability of the caller to actually
destruct the object.
+ void release( AllocatedType *object )
+ {
+ assert( object != 0 );
+ *(AllocatedType **)object = freeHead_;
+ freeHead_ = object;
+ }
+
+private:
+ struct BatchInfo
+ {
+ BatchInfo *next_;
+ AllocatedType *used_;
+ AllocatedType *end_;
+ AllocatedType buffer_[objectPerAllocation];
+ };
+
+ // disabled copy constructor and assignement operator.
+ BatchAllocator( const BatchAllocator & );
+ void operator =( const BatchAllocator &);
+
+ static BatchInfo *allocateBatch( unsigned int objectsPerPage )
+ {
+ const unsigned int mallocSize = sizeof(BatchInfo) -
sizeof(AllocatedType)* objectPerAllocation
+ + sizeof(AllocatedType) *
objectPerAllocation * objectsPerPage;
+ BatchInfo *batch = static_cast<BatchInfo*>( malloc( mallocSize ) );
+ batch->next_ = 0;
+ batch->used_ = batch->buffer_;
+ batch->end_ = batch->buffer_ + objectsPerPage;
+ return batch;
+ }
+
+ BatchInfo *batches_;
+ BatchInfo *currentBatch_;
+ /// Head of a single linked list within the allocated space of freeed
object
--- End diff --
sp: freeed
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---