I check online. There is one guy who had the same problem as me. But he 
didn't say how he fixed it.
https://github.com/Theano/Theano/issues/5330

On Thursday, May 18, 2017 at 3:56:20 PM UTC-8, Xu Zhang wrote:
>
> My theano and lasagne works well yesterday. Today, after I type sudo 
> apt-get upgrade and installed some others dependencies for VLC. My Theano 
> doesn't work anymore. Error is below when I run my code. The screen keep 
> printing similar errors without stopping.
>
> my nvidia info:
>
>
> +-----------------------------------------------------------------------------+
> | NVIDIA-SMI 361.93.02              Driver Version: 
> 361.93.02                 |
>
> |-------------------------------+----------------------+----------------------+
> | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. 
> ECC |
> | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute 
> M. |
>
> |===============================+======================+======================|
> |   0  Tesla K40m          On   | 0000:81:00.0     Off 
> |                    0 |
> | N/A   37C    P8    20W / 235W |      0MiB / 11441MiB |      0%      
> Default |
>
> +-------------------------------+----------------------+----------------------+
>                                                                               
>  
>
>
> +-----------------------------------------------------------------------------+
> | Processes:                                                       GPU 
> Memory |
> |  GPU       PID  Type  Process name                               
> Usage      |
>
> |=============================================================================|
> |  No running processes 
> found                                                 |
>
> +-----------------------------------------------------------------------------+
> cuda version:
>
> nvcc: NVIDIA (R) Cuda compiler driver
> Copyright (c) 2005-2016 NVIDIA Corporation
> Built on Tue_Jan_10_13:22:03_CST_2017
> Cuda compilation tools, release 8.0, V8.0.61
>
> Ubuntu version:
>
> Distributor ID:    Ubuntu
> Description:    Ubuntu 14.04.5 LTS
> Release:    14.04
> Codename:    trusty
>
>
>
> WARNING (theano.sandbox.cuda): The cuda backend is deprecated and will be 
> removed in the next release (v0.10).  Please switch to the gpuarray 
> backend. You can get more information about how to switch at this URL:
>  
> https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29
>
> 1 #include <Python.h>
> 2 #include <iostream>
> 3 #include "theano_mod_helper.h"
> 4 #include "cudnn.h"
> 5 //////////////////////
> 6 ////  Support Code
> 7 //////////////////////
> 8 
> 9 #if PY_MAJOR_VERSION >= 3
> 10 #define PyInt_FromLong PyLong_FromLong
> 11 #endif
> 12 
> 13 
> 14     namespace {
> 15     struct __struct_compiled_op_3442132e74e76f61361fb1c73112eef4 {
> 16         PyObject* __ERROR;
> 17 
> 18         PyObject* storage_V1;
> 19         
> 20 
> 21         __struct_compiled_op_3442132e74e76f61361fb1c73112eef4() {
> 22             // This is only somewhat safe because we:
> 23             //  1) Are not a virtual class
> 24             //  2) Do not use any virtual classes in the members
> 25             //  3) Deal with mostly POD and pointers
> 26 
> 27             // If this changes, we would have to revise this, but for
> 28             // now I am tired of chasing segfaults because
> 29             // initialization code had an error and some pointer has
> 30             // a junk value.
> 31             memset(this, 0, sizeof(*this));
> 32         }
> 33         ~__struct_compiled_op_3442132e74e76f61361fb1c73112eef4(void) {
> 34             cleanup();
> 35         }
> 36 
> 37         int init(PyObject* __ERROR, PyObject* storage_V1) {
> 38             Py_XINCREF(storage_V1);
> 39             this->storage_V1 = storage_V1;
> 40             
> 41 
> 42 
> 43             this->__ERROR = __ERROR;
> 44             return 0;
> 45         }
> 46         void cleanup(void) {
> 47             __label_1:
> 48 
> 49 double __DUMMY_1;
> 50 __label_4:
> 51 
> 52 double __DUMMY_4;
> 53 
> 54             Py_XDECREF(this->storage_V1);
> 55         }
> 56         int run(void) {
> 57             int __failure = 0;
> 58             
> 59     PyObject* py_V1;
> 60     
> 61         PyObject* V1;
> 62         
> 63 {
> 64 
> 65     py_V1 = Py_None;
> 66     {Py_XINCREF(py_V1);}
> 67     
> 68         V1 = NULL;
> 69         
> 70 {
> 71 // Op class DnnVersion
> 72 
> 73 #if defined(CUDNN_VERSION)
> 74 V1 = PyTuple_Pack(2, PyInt_FromLong(CUDNN_VERSION), 
> PyInt_FromLong(cudnnGetVersion()));
> 75 #else
> 76 V1 = PyInt_FromLong(-1);
> 77 #endif
> 78 __label_3:
> 79 
> 80 double __DUMMY_3;
> 81 
> 82 }
> 83 __label_2:
> 84 
> 85     if (!__failure) {
> 86       
> 87         assert(py_V1->ob_refcnt > 1);
> 88         Py_DECREF(py_V1);
> 89         py_V1 = V1 ? V1 : Py_None;
> 90         Py_INCREF(py_V1);
> 91         
> 92       PyObject* old = PyList_GET_ITEM(storage_V1, 0);
> 93       {Py_XINCREF(py_V1);}
> 94       PyList_SET_ITEM(storage_V1, 0, py_V1);
> 95       {Py_XDECREF(old);}
> 96     }
> 97     
> 98         Py_XDECREF(V1);
> 99         
> 100     {Py_XDECREF(py_V1);}
> 101     
> 102 double __DUMMY_2;
> 103 
> 104 }
> 105 
> 106             
> 107         if (__failure) {
> 108             // When there is a failure, this code puts the exception
> 109             // in __ERROR.
> 110             PyObject* err_type = NULL;
> 111             PyObject* err_msg = NULL;
> 112             PyObject* err_traceback = NULL;
> 113             PyErr_Fetch(&err_type, &err_msg, &err_traceback);
> 114             if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
> 115             if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
> 116             if (!err_traceback) {err_traceback = Py_None; 
> Py_INCREF(Py_None);}
> 117             PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
> 118             PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
> 119             PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
> 120             PyList_SET_ITEM(__ERROR, 0, err_type);
> 121             PyList_SET_ITEM(__ERROR, 1, err_msg);
> 122             PyList_SET_ITEM(__ERROR, 2, err_traceback);
> 123             {Py_XDECREF(old_err_type);}
> 124             {Py_XDECREF(old_err_msg);}
> 125             {Py_XDECREF(old_err_traceback);}
> 126         }
> 127         // The failure code is returned to index what code block 
> failed.
> 128         return __failure;
> 129         
> 130         }
> 131     };
> 132     }
> 133     
> 134 
> 135         static int 
> __struct_compiled_op_3442132e74e76f61361fb1c73112eef4_executor(__struct_compiled_op_3442132e74e76f61361fb1c73112eef4*
>  
> self) {
> 136             return self->run();
> 137         }
> 138 
> 139         static void 
> __struct_compiled_op_3442132e74e76f61361fb1c73112eef4_destructor(void* 
> executor, void* self) {
> 140             delete 
> ((__struct_compiled_op_3442132e74e76f61361fb1c73112eef4*)self);
> 141         }
> 142         
> 143 //////////////////////
> 144 ////  Functions
> 145 //////////////////////
> 146 static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
> 147   assert(PyTuple_Check(argtuple));
> 148   if (2 != PyTuple_Size(argtuple)){ 
> 149      PyErr_Format(PyExc_TypeError, "Wrong number of arguments, 
> expected 2, got %i", (int)PyTuple_Size(argtuple));
> 150      return NULL;
> 151   }
> 152   __struct_compiled_op_3442132e74e76f61361fb1c73112eef4* struct_ptr = 
> new __struct_compiled_op_3442132e74e76f61361fb1c73112eef4();
> 153   if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 
> 0),PyTuple_GET_ITEM(argtuple, 1) ) != 0) {
> 154     delete struct_ptr;
> 155     return NULL;
> 156   }
> 157   PyObject* thunk = 
> PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_3442132e74e76f61361fb1c73112eef4_executor),
>  
> struct_ptr, 
> __struct_compiled_op_3442132e74e76f61361fb1c73112eef4_destructor);
> 158   return thunk; }
> 159 
> 160 //////////////////////
> 161 ////  Module init
> 162 //////////////////////
> 163 static PyMethodDef MyMethods[] = {
> 164     {"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
> 165     {NULL, NULL, 0, NULL}
> 166 };
> 167 PyMODINIT_FUNC init3442132e74e76f61361fb1c73112eef4(void){
> 168    (void) Py_InitModule("3442132e74e76f61361fb1c73112eef4", MyMethods);
> 169 }
> 170 
> ===============================
> nvcc warning : The 'compute_20', 'sm_20', and 'sm_21' architectures are 
> deprecated, and may be removed in a future release (Use 
> -Wno-deprecated-gpu-targets to suppress warning).
> /usr/bin/ld: cannot find -lcudnn
> /usr/bin/ld: cannot find -lcudart
> /usr/bin/ld: cannot find -lcudadevrt
> /usr/bin/ld: cannot find -lcudart_static
> collect2: error: ld returned 1 exit status
> Using gpu device 0: Tesla K40m (CNMeM is enabled with initial size: 95.0% 
> of memory, cuDNN None)
>
> ['nvcc', '-shared', '-O3', '-Xlinker', '-rpath,/usr/local/cuda-8.0/lib64', 
> '-m64', '-Xcompiler', 
> '-fno-math-errno,-Wno-unused-label,-Wno-unused-variable,-Wno-write-strings,-DCUDA_NDARRAY_CUH=c72d035fdf91890f3b36710688069b2e,-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION,-fPIC,-fvisibility=hidden',
>  
> '-Xlinker', 
> '-rpath,/space/xzhang/.theano/compiledir_Linux-3.19--generic-x86_64-with-Ubuntu-14.04-trusty-x86_64-2.7.6-64/cuda_ndarray',
>  
> '-I/usr/local/cuda-8.0/include', 
> '-I/usr/local/lib/python2.7/dist-packages/numpy/core/include', 
> '-I/usr/include/python2.7', 
> '-I/usr/local/lib/python2.7/dist-packages/theano/gof', 
> '-I/usr/local/lib/python2.7/dist-packages/theano/sandbox/cuda', 
> '-L/usr/lib', '-o', 
> '/space/xzhang/.theano/compiledir_Linux-3.19--generic-x86_64-with-Ubuntu-14.04-trusty-x86_64-2.7.6-64/tmpjR_LxZ/3442132e74e76f61361fb1c73112eef4.so',
>  
> 'mod.cu', '-lcudnn', '-lpython2.7', '-lcudart']
> Loading data...
> Building model and compiling functions...
> number of parameters in model: 145376
>

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to