samskalicky commented on a change in pull request #17270: Dynamic custom 
operator GPU support
URL: https://github.com/apache/incubator-mxnet/pull/17270#discussion_r369384854
 
 

 ##########
 File path: include/mxnet/lib_api.h
 ##########
 @@ -328,16 +358,31 @@ typedef void* (*xpu_malloc_t)(void*, int);
  */
 class OpResource {
  public:
-  OpResource(xpu_malloc_t cm, void* ca) : cpu_malloc(cm), cpu_alloc(ca) {}
+  OpResource(xpu_malloc_t cm, void* ca, xpu_malloc_t gm, void* ga, void* st)
+    : cpu_malloc(cm), gpu_malloc(gm), cpu_alloc(ca), gpu_alloc(ga), 
cuda_stream(st) {}
 
   /*! \brief allocate memory controlled by MXNet */
-  void* alloc(int size) {
+  void* alloc_cpu(int size) {
     return cpu_malloc(cpu_alloc, size);
   }
 
+  /*! \brief allocate memory controlled by MXNet */
+  void* alloc_gpu(int size) {
+    return gpu_malloc(gpu_alloc, size);
+  }
+
+  /*! \brief return the gpu stream object */
+  void* get_cuda_stream() {
+    return cuda_stream;
+  }
+
  private:
-  xpu_malloc_t cpu_malloc;
-  void* cpu_alloc;
+  /*! \brief wrapper to allocation lambda function */
+  xpu_malloc_t cpu_malloc, gpu_malloc;
+  /*! \brief lambda function to return allocated memory handle */
+  void *cpu_alloc, *gpu_alloc;
+  /*! \brief cuda stream passed from MXNet */
+  void *cuda_stream;
 
 Review comment:
   and again here, or do this once up above and use everywhere:
   ```
   #if defined(__NVCC__)
    typedef cudaStream_t mx_stream_t;
   #else
    typedef void* mx_stream_t;
   #endif
   
     mx_stream_t cuda_stream;
   
   ```

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to