Tried to create a new Op for a specific purpose. 

The Op works fine when ran on CPU. However, with GPU specific compilation, 
it's throwing an error which I'm unable to decrypt. 

I'm posting the error if anyone can shade any light here.


Command > THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 aprun 
python3 ~/NeuralNets/abc.py
---------------------------

Using gpu device 0: *Tesla K20*

.............

mod.cu(303): error: identifier 
"callkernel_node_m8019ee0cc695e27a50022b6a1f03a249_0" is undefined

1 error detected in the compilation of 
"/tmp/tmpxft_00000fa8_00000000-9_mod.cpp1.ii".

.............

Exception: ('The following error happened while compiling the node', 
GpuElemwise{scalar_binarize,no_inplace}(GpuElemwise{Add}[(0, 0)].0), '\n', 
'nvcc return status', 2, 'for cmd', 'nvcc -shared -O3 --maxrregcount=32 
-arch=sm_35 -m64 -Xcompiler 
-fno-math-errno,-Wno-unused-label,-Wno-unused-variable,-Wno-write-strings,-DCUDA_NDARRAY_CUH=mc9bf508a7d4a85caaad53710f8c30be5,-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION,-fPIC,-fvisibility=hidden
 
-Xlinker 
-rpath,/gpfs/home/m/m/mmaity/BigRed2/.theano/compiledir_Linux-3.0-1_1.0502.8871-cray_gem_c-x86_64-with-SuSE-11-x86_64-x86_64-3.3.2-64/cuda_ndarray
 
-I/gpfs/home/m/m/mmaity/BigRed2/.theano/compiledir_Linux-3.0-1_1.0502.8871-cray_gem_c-x86_64-with-SuSE-11-x86_64-x86_64-3.3.2-64/cuda_ndarray
 
-I/opt/nvidia/cudatoolkit7.0/7.0.28-1.0502.10742.5.1/include 
-I/N/soft/cle4/python/3.3.2a/lib/python3.3/site-packages/numpy/core/include 
-I/N/soft/cle4/python/3.3.2a/include/python3.3m 
-I/N/soft/cle4/python/3.3.2a/lib/python3.3/site-packages/theano/gof 
-I/N/soft/cle4/python/3.3.2a/lib/python3.3/site-packages/theano/sandbox/cuda 
-o 
/gpfs/home/m/m/mmaity/BigRed2/.theano/compiledir_Linux-3.0-1_1.0502.8871-cray_gem_c-x86_64-with-SuSE-11-x86_64-x86_64-3.3.2-64/tmppc14oi/m8019ee0cc695e27a50022b6a1f03a249.so
 
mod.cu 
-L/gpfs/home/m/m/mmaity/BigRed2/.theano/compiledir_Linux-3.0-1_1.0502.8871-cray_gem_c-x86_64-with-SuSE-11-x86_64-x86_64-3.3.2-64/cuda_ndarray
 
-L/N/soft/cle4/python/3.3.2a/lib -lpython3.3m -lcudart -lcublas 
-lcuda_ndarray', 
'[GpuElemwise{scalar_binarize,no_inplace}(<CudaNdarrayType(float32, 
matrix)>)]')
---------------------------

I've checked the trace and its not useful here. Here's the relevant part of 
generated code by Theano from where the error is being thrown.

---------------------------

299 

300         {

301             //new block so that failure gotos don't skip over variable 
initialization

302             //std::cerr << "calling callkernel\n";

303             if (*callkernel_node_m8019ee0cc695e27a50022b6a1f03a249_0*(1, 
0, dims

304             

305 

306                         , CudaNdarray_DEV_DATA(V3), 
CudaNdarray_HOST_STRIDES(V3)

307             

308 

309                         , CudaNdarray_DEV_DATA(V1), 
CudaNdarray_HOST_STRIDES(V1)

310             

311 

312                         ))

313             {

314                  // error

315             

316 

317                 Py_DECREF(V1);

318                 V1 = NULL;

319                 

320 

321                 {

322         __failure = 5;

323         if (!PyErr_Occurred()) {

324             PyErr_SetString(PyExc_RuntimeError,

325                 "Unexpected error in an Op's C code. "

326                 "No Python exception was set.");

327             }

328         goto __label_5;};

329             }

330             else // no error

331             {

332             }

333         }

334         //std::cerr << "C_CODE scalar_binarize END\n";
---------------------------

The part written in *bold* is from where the error is thrown. '
*callkernel_node_m8019ee0cc695e27a50022b6a1f03a249_0*'  seems like a 
generic checks for all Ops but I'm not sure. Any help is really appreciated.

Regards,
Mrinmoy

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to