From: Junyan He <junyan...@intel.com>

We use context's lock when we add and delete cl objects.
Every cl object should use it's own lock to protect itself.
We also add some helper functions to ease the adding and
removing operations.

Signed-off-by: Junyan He <junyan...@intel.com>
---
 src/cl_accelerator_intel.c |   8 +--
 src/cl_command_queue.c     |  27 ++------
 src/cl_command_queue.h     |   1 -
 src/cl_context.c           | 153 ++++++++++++++++++++++++++++++++++++++++-----
 src/cl_context.h           |  34 ++++++----
 src/cl_event.c             |  47 +++++---------
 src/cl_event.h             |   1 -
 src/cl_kernel.c            |   2 +-
 src/cl_mem.c               |  55 ++++++----------
 src/cl_mem.h               |   3 +-
 src/cl_program.c           |  20 ++----
 src/cl_sampler.c           |  21 +------
 src/cl_sampler.h           |   1 -
 13 files changed, 212 insertions(+), 161 deletions(-)

diff --git a/src/cl_accelerator_intel.c b/src/cl_accelerator_intel.c
index 545a613..ae08184 100644
--- a/src/cl_accelerator_intel.c
+++ b/src/cl_accelerator_intel.c
@@ -35,12 +35,12 @@ cl_accelerator_intel_new(cl_context ctx,
 
   /* Append the accelerator_intel in the context accelerator_intel list */
   /* does this really needed? */
-  pthread_mutex_lock(&ctx->accelerator_intel_lock);
+  CL_OBJECT_LOCK(ctx);
     accel->next = ctx->accels;
     if (ctx->accels != NULL)
       ctx->accels->prev = accel;
     ctx->accels = accel;
-  pthread_mutex_unlock(&ctx->accelerator_intel_lock);
+  CL_OBJECT_UNLOCK(ctx);
 
   accel->ctx = ctx;
   cl_context_add_ref(ctx);
@@ -70,14 +70,14 @@ cl_accelerator_intel_delete(cl_accelerator_intel accel)
     return;
 
   /* Remove the accelerator_intel in the context accelerator_intel list */
-  pthread_mutex_lock(&accel->ctx->accelerator_intel_lock);
+  CL_OBJECT_LOCK(accel->ctx);
     if (accel->prev)
       accel->prev->next = accel->next;
     if (accel->next)
       accel->next->prev = accel->prev;
     if (accel->ctx->accels == accel)
       accel->ctx->accels = accel->next;
-  pthread_mutex_unlock(&accel->ctx->accelerator_intel_lock);
+  CL_OBJECT_UNLOCK(accel->ctx);
 
   cl_context_delete(accel->ctx);
   CL_OBJECT_DESTROY_BASE(accel);
diff --git a/src/cl_command_queue.c b/src/cl_command_queue.c
index 172ea94..8d3c6b0 100644
--- a/src/cl_command_queue.c
+++ b/src/cl_command_queue.c
@@ -45,22 +45,14 @@ cl_command_queue_new(cl_context ctx)
   assert(ctx);
   TRY_ALLOC_NO_ERR (queue, CALLOC(struct _cl_command_queue));
   CL_OBJECT_INIT_BASE(queue, CL_OBJECT_COMMAND_QUEUE_MAGIC);
-  queue->ctx = ctx;
+
   queue->cmrt_event = NULL;
   if ((queue->thread_data = cl_thread_data_create()) == NULL) {
     goto error;
   }
 
   /* Append the command queue in the list */
-  pthread_mutex_lock(&ctx->queue_lock);
-    queue->next = ctx->queues;
-    if (ctx->queues != NULL)
-      ctx->queues->prev = queue;
-    ctx->queues = queue;
-  pthread_mutex_unlock(&ctx->queue_lock);
-
-  /* The queue also belongs to its context */
-  cl_context_add_ref(ctx);
+  cl_context_add_queue(ctx, queue);
 
 exit:
   return queue;
@@ -74,7 +66,8 @@ LOCAL void
 cl_command_queue_delete(cl_command_queue queue)
 {
   assert(queue);
-  if (CL_OBJECT_DEC_REF(queue) != 1) return;
+  if (CL_OBJECT_DEC_REF(queue) > 1)
+    return;
 
 #ifdef HAS_CMRT
   if (queue->cmrt_event != NULL)
@@ -84,21 +77,11 @@ cl_command_queue_delete(cl_command_queue queue)
   // If there is a list of valid events, we need to give them
   // a chance to call the call-back function.
   cl_event_update_last_events(queue,1);
-  /* Remove it from the list */
-  assert(queue->ctx);
-  pthread_mutex_lock(&queue->ctx->queue_lock);
-    if (queue->prev)
-      queue->prev->next = queue->next;
-    if (queue->next)
-      queue->next->prev = queue->prev;
-    if (queue->ctx->queues == queue)
-      queue->ctx->queues = queue->next;
-  pthread_mutex_unlock(&queue->ctx->queue_lock);
 
   cl_thread_data_destroy(queue);
   queue->thread_data = NULL;
   cl_mem_delete(queue->perf);
-  cl_context_delete(queue->ctx);
+  cl_context_remove_queue(queue->ctx, queue);
   cl_free(queue->wait_events);
   cl_free(queue->barrier_events);
   CL_OBJECT_DESTROY_BASE(queue);
diff --git a/src/cl_command_queue.h b/src/cl_command_queue.h
index 72b7c55..f0b421d 100644
--- a/src/cl_command_queue.h
+++ b/src/cl_command_queue.h
@@ -40,7 +40,6 @@ struct _cl_command_queue {
   cl_int    wait_events_num;           /* Number of Non-complete user events */
   cl_int    wait_events_size;          /* The size of array that wait_events 
point to */
   cl_command_queue_properties  props;  /* Queue properties */
-  cl_command_queue prev, next;         /* We chain the command queues together 
*/
   void *thread_data;                   /* Used to store thread context data */
   cl_mem perf;                         /* Where to put the perf counters */
 
diff --git a/src/cl_context.c b/src/cl_context.c
index c6137a3..229ab96 100644
--- a/src/cl_context.c
+++ b/src/cl_context.c
@@ -22,6 +22,8 @@
 #include "cl_context.h"
 #include "cl_command_queue.h"
 #include "cl_mem.h"
+#include "cl_sampler.h"
+#include "cl_event.h"
 #include "cl_alloc.h"
 #include "cl_utils.h"
 #include "cl_driver.h"
@@ -38,6 +40,134 @@
 #include <assert.h>
 #include <string.h>
 
+LOCAL void
+cl_context_add_queue(cl_context ctx, cl_command_queue queue) {
+  assert(queue->ctx == NULL);
+  cl_context_add_ref(ctx);
+
+  CL_OBJECT_LOCK(ctx);
+  list_add_tail(&queue->base.node, &ctx->queues);
+  ctx->queue_num++;
+  ctx->queue_cookie++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  queue->ctx = ctx;
+}
+
+LOCAL void
+cl_context_remove_queue(cl_context ctx, cl_command_queue queue) {
+  assert(queue->ctx == ctx);
+  CL_OBJECT_LOCK(ctx);
+  list_del(&queue->base.node);
+  ctx->queue_num--;
+  ctx->queue_cookie++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  cl_context_delete(ctx);
+  queue->ctx = NULL;
+}
+
+LOCAL void
+cl_context_add_mem(cl_context ctx, cl_mem mem) {
+  assert(mem->ctx == NULL);
+  cl_context_add_ref(ctx);
+
+  CL_OBJECT_LOCK(ctx);
+  list_add_tail(&mem->base.node, &ctx->mem_objects);
+  ctx->mem_object_num++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  mem->ctx = ctx;
+}
+
+LOCAL void
+cl_context_remove_mem(cl_context ctx, cl_mem mem) {
+  assert(mem->ctx == ctx);
+  CL_OBJECT_LOCK(ctx);
+  list_del(&mem->base.node);
+  ctx->mem_object_num--;
+  CL_OBJECT_UNLOCK(ctx);
+
+  cl_context_delete(ctx);
+  mem->ctx = NULL;
+}
+
+LOCAL void
+cl_context_add_sampler(cl_context ctx, cl_sampler sampler) {
+  assert(sampler->ctx == NULL);
+  cl_context_add_ref(ctx);
+
+  CL_OBJECT_LOCK(ctx);
+  list_add_tail(&sampler->base.node, &ctx->samplers);
+  ctx->sampler_num++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  sampler->ctx = ctx;
+}
+
+LOCAL void
+cl_context_remove_sampler(cl_context ctx, cl_sampler sampler) {
+  assert(sampler->ctx == ctx);
+  CL_OBJECT_LOCK(ctx);
+  list_del(&sampler->base.node);
+  ctx->sampler_num--;
+  CL_OBJECT_UNLOCK(ctx);
+
+  cl_context_delete(ctx);
+  sampler->ctx = NULL;
+}
+
+LOCAL void
+cl_context_add_event(cl_context ctx, cl_event event) {
+  assert(event->ctx == NULL);
+  cl_context_add_ref(ctx);
+
+  CL_OBJECT_LOCK(ctx);
+  list_add_tail(&event->base.node, &ctx->events);
+  ctx->event_num++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  event->ctx = ctx;
+}
+
+LOCAL void
+cl_context_remove_event(cl_context ctx, cl_event event) {
+  assert(event->ctx == ctx);
+  CL_OBJECT_LOCK(ctx);
+  list_del(&event->base.node);
+  ctx->event_num--;
+  CL_OBJECT_UNLOCK(ctx);
+
+  cl_context_delete(ctx);
+  event->ctx = NULL;
+}
+
+LOCAL void
+cl_context_add_program(cl_context ctx, cl_program program) {
+  assert(program->ctx == NULL);
+  cl_context_add_ref(ctx);
+
+  CL_OBJECT_LOCK(ctx);
+  list_add_tail(&program->base.node, &ctx->programs);
+  ctx->program_num++;
+  CL_OBJECT_UNLOCK(ctx);
+
+  program->ctx = ctx;
+}
+
+LOCAL void
+cl_context_remove_program(cl_context ctx, cl_program program) {
+  assert(program->ctx == ctx);
+  CL_OBJECT_LOCK(ctx);
+  list_del(&program->base.node);
+  ctx->program_num--;
+  CL_OBJECT_UNLOCK(ctx);
+
+  cl_context_delete(ctx);
+  program->ctx = NULL;
+}
+
+
 #define CHECK(var) \
   if (var) \
     return CL_INVALID_PROPERTY; \
@@ -168,14 +298,15 @@ cl_context_new(struct _cl_context_prop *props)
 
   TRY_ALLOC_NO_ERR (ctx, CALLOC(struct _cl_context));
   CL_OBJECT_INIT_BASE(ctx, CL_OBJECT_CONTEXT_MAGIC);
+  list_init(&ctx->queues);
+  list_init(&ctx->mem_objects);
+  list_init(&ctx->samplers);
+  list_init(&ctx->events);
+  list_init(&ctx->programs);
+  ctx->queue_cookie = 1;
   TRY_ALLOC_NO_ERR (ctx->drv, cl_driver_new(props));
   ctx->props = *props;
   ctx->ver = cl_driver_get_ver(ctx->drv);
-  pthread_mutex_init(&ctx->program_lock, NULL);
-  pthread_mutex_init(&ctx->queue_lock, NULL);
-  pthread_mutex_init(&ctx->buffer_lock, NULL);
-  pthread_mutex_init(&ctx->sampler_lock, NULL);
-  pthread_mutex_init(&ctx->accelerator_intel_lock, NULL);
 
 exit:
   return ctx;
@@ -216,13 +347,6 @@ cl_context_delete(cl_context ctx)
   cl_program_delete(ctx->built_in_prgs);
   ctx->built_in_prgs = NULL;
 
-  /* All object lists should have been freed. Otherwise, the reference counter
-   * of the context cannot be 0
-   */
-  assert(ctx->queues == NULL);
-  assert(ctx->programs == NULL);
-  assert(ctx->buffers == NULL);
-  assert(ctx->drv);
   cl_free(ctx->prop_user);
   cl_driver_delete(ctx->drv);
   CL_OBJECT_DESTROY_BASE(ctx);
@@ -274,7 +398,8 @@ cl_context_get_static_kernel_from_bin(cl_context ctx, 
cl_int index,
   cl_int ret;
   cl_int binary_status = CL_SUCCESS;
   cl_kernel ker;
-  pthread_mutex_lock(&ctx->program_lock);
+
+  CL_OBJECT_TAKE_OWNERSHIP(ctx, 1);
   if (ctx->internal_prgs[index] == NULL) {
     ctx->internal_prgs[index] = cl_program_create_from_binary(ctx, 1, 
&ctx->device,
       &size, (const unsigned char **)&str_kernel, &binary_status, &ret);
@@ -324,6 +449,6 @@ cl_context_get_static_kernel_from_bin(cl_context ctx, 
cl_int index,
   ker = ctx->internal_kernels[index];
 
 unlock:
-  pthread_mutex_unlock(&ctx->program_lock);
+  CL_OBJECT_RELEASE_OWNERSHIP(ctx);
   return cl_kernel_dup(ker);
 }
diff --git a/src/cl_context.h b/src/cl_context.h
index 82444d2..b1602c8 100644
--- a/src/cl_context.h
+++ b/src/cl_context.h
@@ -102,18 +102,19 @@ struct _cl_context {
   _cl_base_object base;
   cl_driver drv;                    /* Handles HW or simulator */
   cl_device_id device;              /* All information about the GPU device */
-  cl_command_queue queues;          /* All command queues currently allocated 
*/
-  cl_program programs;              /* All programs currently allocated */
-  cl_mem buffers;                   /* All memory object currently allocated */
-  cl_sampler samplers;              /* All sampler object currently allocated 
*/
+  list_head queues;                 /* All command queues currently allocated 
*/
+  cl_uint queue_num;                /* All queue number currently allocated */
+  cl_uint queue_cookie;             /* Cookie will change every time we change 
queue list. */
+  list_head mem_objects;            /* All memory object currently allocated */
+  cl_uint mem_object_num;           /* All memory number currently allocated */
+  list_head samplers;               /* All sampler object currently allocated 
*/
+  cl_uint sampler_num;              /* All sampler number currently allocated 
*/
+  list_head events;                 /* All event object currently allocated */
+  cl_uint event_num;                /* All event number currently allocated */
+  list_head programs;               /* All programs currently allocated */
+  cl_uint program_num;              /* All program number currently allocated 
*/
+
   cl_accelerator_intel accels;      /* All accelerator_intel object currently 
allocated */
-  cl_event   events;                /* All event object currently allocated */
-  pthread_mutex_t queue_lock;       /* To allocate and deallocate queues */
-  pthread_mutex_t program_lock;     /* To allocate and deallocate programs */
-  pthread_mutex_t buffer_lock;      /* To allocate and deallocate buffers */
-  pthread_mutex_t sampler_lock;     /* To allocate and deallocate samplers */
-  pthread_mutex_t accelerator_intel_lock;     /* To allocate and deallocate 
accelerator_intel */
-  pthread_mutex_t event_lock;       /* To allocate and deallocate events */
   cl_program internal_prgs[CL_INTERNAL_KERNEL_MAX];
                                     /* All programs internal used, for example 
clEnqueuexxx api use */
   cl_kernel  internal_kernels[CL_INTERNAL_KERNEL_MAX];
@@ -133,6 +134,17 @@ struct _cl_context {
 #define CL_OBJECT_CONTEXT_MAGIC 0x20BBCADE993134AALL
 #define CL_OBJECT_IS_CONTEXT(obj) (((cl_base_object)obj)->magic == 
CL_OBJECT_CONTEXT_MAGIC)
 
+extern void cl_context_add_queue(cl_context ctx, cl_command_queue queue);
+extern void cl_context_remove_queue(cl_context ctx, cl_command_queue queue);
+extern void cl_context_add_mem(cl_context ctx, cl_mem mem);
+extern void cl_context_remove_mem(cl_context ctx, cl_mem mem);
+extern void cl_context_add_sampler(cl_context ctx, cl_sampler sampler);
+extern void cl_context_remove_sampler(cl_context ctx, cl_sampler sampler);
+extern void cl_context_add_event(cl_context ctx, cl_event sampler);
+extern void cl_context_remove_event(cl_context ctx, cl_event sampler);
+extern void cl_context_add_program(cl_context ctx, cl_program program);
+extern void cl_context_remove_program(cl_context ctx, cl_program program);
+
 /* Implement OpenCL function */
 extern cl_context cl_create_context(const cl_context_properties*,
                                     cl_uint,
diff --git a/src/cl_event.c b/src/cl_event.c
index cf3cc3a..6c7c2e0 100644
--- a/src/cl_event.c
+++ b/src/cl_event.c
@@ -102,15 +102,8 @@ cl_event cl_event_new(cl_context ctx, cl_command_queue 
queue, cl_command_type ty
   CL_OBJECT_INIT_BASE(event, CL_OBJECT_EVENT_MAGIC);
 
   /* Append the event in the context event list */
-  pthread_mutex_lock(&ctx->event_lock);
-    event->next = ctx->events;
-    if (ctx->events != NULL)
-      ctx->events->prev = event;
-    ctx->events = event;
-  pthread_mutex_unlock(&ctx->event_lock);
-  event->ctx   = ctx;
-  cl_context_add_ref(ctx);
-
+  cl_context_add_event(ctx, event);
+ 
   /* Initialize all members and create GPGPU event object */
   event->queue = queue;
   event->type  = type;
@@ -155,19 +148,7 @@ void cl_event_delete(cl_event event)
     cl_gpgpu_event_delete(event->gpgpu_event);
 
   /* Remove it from the list */
-  assert(event->ctx);
-  pthread_mutex_lock(&event->ctx->event_lock);
-
-  if (event->prev)
-    event->prev->next = event->next;
-  if (event->next)
-    event->next->prev = event->prev;
-  /* if this is the head, update head pointer ctx->events */
-  if (event->ctx->events == event)
-    event->ctx->events = event->next;
-
-  pthread_mutex_unlock(&event->ctx->event_lock);
-  cl_context_delete(event->ctx);
+  cl_context_remove_event(event->ctx, event);
 
   if (event->gpgpu) {
     fprintf(stderr, "Warning: a event is deleted with a pending enqueued 
task.\n");
@@ -206,17 +187,17 @@ cl_int cl_event_set_callback(cl_event event ,
   // It is possible that the event enqueued is already completed.
   // clEnqueueReadBuffer can be synchronous and when the callback
   // is registered after, it still needs to get executed.
-  pthread_mutex_lock(&event->ctx->event_lock); // Thread safety required: 
operations on the event->status can be made from many different threads
+  CL_OBJECT_LOCK(event); // Thread safety required: operations on the 
event->status can be made from many different threads
   if(event->status <= command_exec_callback_type) {
     /* Call user callback */
-    pthread_mutex_unlock(&event->ctx->event_lock); // pfn_notify can call 
clFunctions that use the event_lock and from here it's not required
+    CL_OBJECT_UNLOCK(event); // pfn_notify can call clFunctions that use the 
event_lock and from here it's not required
     cb->pfn_notify(event, event->status, cb->user_data);
     cl_free(cb);
   } else {
     // Enqueue to callback list
     cb->next        = event->user_cb;
     event->user_cb  = cb;
-    pthread_mutex_unlock(&event->ctx->event_lock);
+    CL_OBJECT_UNLOCK(event);
   }
 
 exit:
@@ -434,7 +415,7 @@ void cl_event_call_callback(cl_event event, cl_int status, 
cl_bool free_cb) {
   user_callback *queue_cb = NULL; // For thread safety, we create a queue that 
holds user_callback's pfn_notify contents
   user_callback *temp_cb = NULL;
   user_cb = event->user_cb;
-  pthread_mutex_lock(&event->ctx->event_lock);
+  CL_OBJECT_LOCK(event);
   while(user_cb) {
     if(user_cb->status >= status
         && user_cb->executed == CL_FALSE) { // Added check to not execute a 
callback when it was already handled
@@ -458,7 +439,7 @@ void cl_event_call_callback(cl_event event, cl_int status, 
cl_bool free_cb) {
     }
     user_cb = user_cb->next;
   }
-  pthread_mutex_unlock(&event->ctx->event_lock);
+  CL_OBJECT_UNLOCK(event);
 
   // Calling the callbacks outside of the event_lock is required because the 
callback can call cl_api functions and get deadlocked
   while(queue_cb) { // For each callback queued, actually execute the callback
@@ -474,14 +455,14 @@ void cl_event_set_status(cl_event event, cl_int status)
   cl_int ret, i;
   cl_event evt;
 
-  pthread_mutex_lock(&event->ctx->event_lock);
+  CL_OBJECT_LOCK(event);
   if(status >= event->status) {
-    pthread_mutex_unlock(&event->ctx->event_lock);
+    CL_OBJECT_UNLOCK(event);
     return;
   }
   if(event->status <= CL_COMPLETE) {
     event->status = status;    //have done enqueue before or doing in another 
thread
-    pthread_mutex_unlock(&event->ctx->event_lock);
+    CL_OBJECT_UNLOCK(event);
     return;
   }
 
@@ -501,10 +482,10 @@ void cl_event_set_status(cl_event event, cl_int status)
 
       event->status = status;  //Change the event status after enqueue and 
befor unlock
 
-      pthread_mutex_unlock(&event->ctx->event_lock);
+      CL_OBJECT_UNLOCK(event);
       for(i=0; i<event->enqueue_cb->num_events; i++)
         cl_event_delete(event->enqueue_cb->wait_list[i]);
-      pthread_mutex_lock(&event->ctx->event_lock);
+      CL_OBJECT_LOCK(event);
 
       if(event->enqueue_cb->wait_list)
         cl_free(event->enqueue_cb->wait_list);
@@ -514,7 +495,7 @@ void cl_event_set_status(cl_event event, cl_int status)
   }
   if(event->status >= status)  //maybe changed in other threads
     event->status = status;
-  pthread_mutex_unlock(&event->ctx->event_lock);
+  CL_OBJECT_UNLOCK(event);
 
   /* Call user callback */
   cl_event_call_callback(event, status, CL_FALSE);
diff --git a/src/cl_event.h b/src/cl_event.h
index 164ca47..85cd53e 100644
--- a/src/cl_event.h
+++ b/src/cl_event.h
@@ -57,7 +57,6 @@ typedef struct _user_callback {
 struct _cl_event {
   _cl_base_object    base;
   cl_context         ctx;         /* The context associated with event */
-  cl_event           prev, next;  /* We chain the memory buffers together */
   cl_command_queue   queue;       /* The command queue associated with event */
   cl_command_type    type;        /* The command type associated with event */
   cl_int             status;      /* The execution status */
diff --git a/src/cl_kernel.c b/src/cl_kernel.c
index 8fafe42..b3f1e35 100644
--- a/src/cl_kernel.c
+++ b/src/cl_kernel.c
@@ -166,7 +166,7 @@ cl_kernel_set_arg(cl_kernel k, cl_uint index, size_t sz, 
const void *value)
     if(value != NULL)
       mem = *(cl_mem*)value;
     if(value != NULL && mem) {
-      if( CL_SUCCESS != is_valid_mem(mem, ctx->buffers))
+      if(CL_SUCCESS != cl_mem_is_valid(mem, ctx))
         return CL_INVALID_MEM_OBJECT;
 
       if (UNLIKELY((arg_type == GBE_ARG_IMAGE && !IS_IMAGE(mem))
diff --git a/src/cl_mem.c b/src/cl_mem.c
index 5caf5a9..06a4d5a 100644
--- a/src/cl_mem.c
+++ b/src/cl_mem.c
@@ -362,15 +362,8 @@ cl_mem_allocate(enum cl_mem_type type,
     mem->size = sz;
   }
 
-  cl_context_add_ref(ctx);
-  mem->ctx = ctx;
-    /* Append the buffer in the context buffer list */
-  pthread_mutex_lock(&ctx->buffer_lock);
-  mem->next = ctx->buffers;
-  if (ctx->buffers != NULL)
-    ctx->buffers->prev = mem;
-  ctx->buffers = mem;
-  pthread_mutex_unlock(&ctx->buffer_lock);
+  /* Append the buffer in the context buffer list */
+  cl_context_add_mem(ctx, mem);
 
 exit:
   if (errcode)
@@ -384,17 +377,26 @@ error:
 }
 
 LOCAL cl_int
-is_valid_mem(cl_mem mem, cl_mem buffers)
+cl_mem_is_valid(cl_mem mem, cl_context ctx)
 {
-  cl_mem tmp = buffers;
-  while(tmp){
-    if(mem == tmp){
-      if (UNLIKELY(!CL_OBJECT_IS_MEM(mem)))
+  struct list_head *pos;
+  cl_base_object pbase_object;
+
+  CL_OBJECT_LOCK(ctx);
+  list_for_each (pos, (&ctx->mem_objects)) {
+    pbase_object = list_entry(pos, _cl_base_object, node);
+    if (pbase_object == (cl_base_object)mem) {
+      if (UNLIKELY(!CL_OBJECT_IS_MEM(mem))) {
+        CL_OBJECT_UNLOCK(ctx);
         return CL_INVALID_MEM_OBJECT;
+      }
+
+      CL_OBJECT_UNLOCK(ctx);
       return CL_SUCCESS;
     }
-    tmp = tmp->next;
   }
+
+  CL_OBJECT_UNLOCK(ctx);
   return CL_INVALID_MEM_OBJECT;
 }
 
@@ -581,15 +583,8 @@ cl_mem_new_sub_buffer(cl_mem buffer,
     mem->host_ptr = buffer->host_ptr;
   }
 
-  cl_context_add_ref(buffer->ctx);
-  mem->ctx = buffer->ctx;
   /* Append the buffer in the context buffer list */
-  pthread_mutex_lock(&buffer->ctx->buffer_lock);
-  mem->next = buffer->ctx->buffers;
-  if (buffer->ctx->buffers != NULL)
-    buffer->ctx->buffers->prev = mem;
-  buffer->ctx->buffers = mem;
-  pthread_mutex_unlock(&buffer->ctx->buffer_lock);
+  cl_context_add_mem(buffer->ctx, mem);
 
 exit:
   if (errcode_ret)
@@ -1203,19 +1198,7 @@ cl_mem_delete(cl_mem mem)
   }
 
   /* Remove it from the list */
-  if (mem->ctx) {
-    pthread_mutex_lock(&mem->ctx->buffer_lock);
-      if (mem->prev)
-        mem->prev->next = mem->next;
-      if (mem->next)
-        mem->next->prev = mem->prev;
-      if (mem->ctx->buffers == mem)
-        mem->ctx->buffers = mem->next;
-    pthread_mutex_unlock(&mem->ctx->buffer_lock);
-    cl_context_delete(mem->ctx);
-  } else {
-    assert((mem->prev == 0) && (mem->next == 0));
-  }
+  cl_context_remove_mem(mem->ctx, mem);
 
   /* Someone still mapped, unmap */
   if(mem->map_ref > 0) {
diff --git a/src/cl_mem.h b/src/cl_mem.h
index e6c8217..9bb5c47 100644
--- a/src/cl_mem.h
+++ b/src/cl_mem.h
@@ -82,7 +82,6 @@ enum cl_mem_type {
 
 typedef  struct _cl_mem {
   _cl_base_object base;
-  cl_mem prev, next;        /* We chain the memory buffers together */
   enum cl_mem_type type;
   cl_buffer bo;             /* Data in GPU memory */
   size_t size;              /* original request size, not alignment size, used 
in constant buffer */
@@ -195,7 +194,7 @@ extern cl_int cl_get_mem_object_info(cl_mem, cl_mem_info, 
size_t, void *, size_t
 extern cl_int cl_get_image_info(cl_mem, cl_image_info, size_t, void *, size_t 
*);
 
 /* Query whether mem is in buffers */
-extern cl_int is_valid_mem(cl_mem mem, cl_mem buffers);
+extern cl_int cl_mem_is_valid(cl_mem mem, cl_context ctx);
 
 /* Create a new memory object and initialize it with possible user data */
 extern cl_mem cl_mem_new_buffer(cl_context, cl_mem_flags, size_t, void*, 
cl_int*);
diff --git a/src/cl_program.c b/src/cl_program.c
index 543af18..6e86675 100644
--- a/src/cl_program.c
+++ b/src/cl_program.c
@@ -83,17 +83,6 @@ cl_program_delete(cl_program p)
     p->build_log = NULL;
   }
 
-  /* Remove it from the list */
-  assert(p->ctx);
-  pthread_mutex_lock(&p->ctx->program_lock);
-    if (p->prev)
-      p->prev->next = p->next;
-    if (p->next)
-      p->next->prev = p->prev;
-    if (p->ctx->programs == p)
-      p->ctx->programs = p->next;
-  pthread_mutex_unlock(&p->ctx->program_lock);
-
 #ifdef HAS_CMRT
   if (p->cmrt_program != NULL)
     cmrt_destroy_program(p);
@@ -106,9 +95,8 @@ cl_program_delete(cl_program p)
     cl_free(p->ker);
   }
 
-  /* Program belongs to their parent context */
-  cl_context_delete(p->ctx);
-
+  /* Remove it from the list */
+  cl_context_remove_program(p->ctx, p);
   /* Free the program as allocated by the compiler */
   if (p->opaque) {
     if (CompilerSupported())
@@ -133,14 +121,14 @@ cl_program_new(cl_context ctx)
   /* Allocate the structure */
   TRY_ALLOC_NO_ERR (p, CALLOC(struct _cl_program));
   CL_OBJECT_INIT_BASE(p, CL_OBJECT_PROGRAM_MAGIC);
-  p->ctx = ctx;
   p->build_status = CL_BUILD_NONE;
   p->cmrt_program = NULL;
   p->build_log = calloc(BUILD_LOG_MAX_SIZE, sizeof(char));
   if (p->build_log)
     p->build_log_max_sz = BUILD_LOG_MAX_SIZE;
+
   /* The queue also belongs to its context */
-  cl_context_add_ref(ctx);
+  cl_context_add_program(ctx, p);
 
 exit:
   return p;
diff --git a/src/cl_sampler.c b/src/cl_sampler.c
index c67a900..aad2761 100644
--- a/src/cl_sampler.c
+++ b/src/cl_sampler.c
@@ -88,14 +88,7 @@ cl_sampler_new(cl_context ctx,
   sampler->filter = filter;
 
   /* Append the sampler in the context sampler list */
-  pthread_mutex_lock(&ctx->sampler_lock);
-    sampler->next = ctx->samplers;
-    if (ctx->samplers != NULL)
-      ctx->samplers->prev = sampler;
-    ctx->samplers = sampler;
-  pthread_mutex_unlock(&ctx->sampler_lock);
-  sampler->ctx = ctx;
-  cl_context_add_ref(ctx);
+  cl_context_add_sampler(ctx, sampler);
 
   sampler->clkSamplerValue = cl_to_clk(normalized_coords, address, filter);
 
@@ -117,17 +110,7 @@ cl_sampler_delete(cl_sampler sampler)
   if (CL_OBJECT_DEC_REF(sampler) > 1)
     return;
 
-  assert(sampler->ctx);
-  pthread_mutex_lock(&sampler->ctx->sampler_lock);
-    if (sampler->prev)
-      sampler->prev->next = sampler->next;
-    if (sampler->next)
-      sampler->next->prev = sampler->prev;
-    if (sampler->ctx->samplers == sampler)
-      sampler->ctx->samplers = sampler->next;
-  pthread_mutex_unlock(&sampler->ctx->sampler_lock);
-  cl_context_delete(sampler->ctx);
-
+  cl_context_remove_sampler(sampler->ctx, sampler);
   CL_OBJECT_DESTROY_BASE(sampler);
   cl_free(sampler);
 }
diff --git a/src/cl_sampler.h b/src/cl_sampler.h
index 900f66e..08d03fd 100644
--- a/src/cl_sampler.h
+++ b/src/cl_sampler.h
@@ -28,7 +28,6 @@
 /* How to access images */
 struct _cl_sampler {
   _cl_base_object base;
-  cl_sampler prev, next;     /* We chain the samplers in the allocator */
   cl_context ctx;            /* Context it belongs to */
   cl_bool normalized_coords; /* Are coordinates normalized? */
   cl_addressing_mode address;/* CLAMP / REPEAT and so on... */
-- 
2.7.4



_______________________________________________
Beignet mailing list
Beignet@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/beignet

Reply via email to