Re: [theano-users] Error using floatX = float16 to save memory

2016-10-14 Thread luca . wagner . 0812
Hi Pascal,
I don't know how to see what happens during "raise_with_op" in  that call.
 
This is the output  using pdb inside spyder:


Python 2.7.12 |Continuum Analytics, Inc.| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
debugfile('/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/run_multi_conv.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core')
> 
/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/run_multi_conv.py(1)()
-> import mpr_convnet_class as conv
(Pdb) continue
Mapped name None to device cuda: Tesla K40c
Using cuDNN version 5103 on context None
> 
/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/run_multi_conv.py(16)run_experiments()
-> conv.mpr_convnet(
(Pdb) continue
> 
/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/mpr_convnet_class.py(163)__init__()
-> dataset_x = np.asarray(dataset_x, dtype=floatX)
(Pdb) next
> 
/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/mpr_convnet_class.py(164)__init__()
-> dataset_y = np.asarray(dataset_y, dtype=np.int32)
(Pdb) continue
continue
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
Disabling C code for MaxAndArgmax due to unsupported float16


start time:
14/10/2016
12:08:01


Image_dim_1: 90
Image_dim_2: 90
Image_dim_3: 90


training @ iter =  0
> 
/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/mpr_convnet_class.py(331)__init__()
-> a = train_set_x[minibatch_index:minibatch_index+batch_size]
(Pdb) Traceback (most recent call last):
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyder/utils/site/sitecustomize.py",
 
line 888, in debugfile
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
  File "/home/luca/anaconda2/lib/python2.7/bdb.py", line 400, in run
exec cmd in globals, locals
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyder/utils/site/sitecustomize.py",
 
line 866, in runfile
execfile(filename, namespace)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyder/utils/site/sitecustomize.py",
 
line 94, in execfile
builtins.execfile(filename, *where)
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/run_multi_conv.py",
 
line 42, in 
run_experiments()
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-Convnet-master/convnet3d/core/run_multi_conv.py",
 
line 33, in run_experiments
Zoom = 0.0
  File "mpr_convnet_class.py", line 333, in __init__
training_cost_ij=train_model(a, b) 
  File "/home/luca/data/Theano-master/theano/compile/function_module.py", 
line 879, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
  File "/home/luca/data/Theano-master/theano/gof/link.py", line 167, in 
raise_with_op
"\nInputs values: %s" % scalar_values)
  File "pygpu/gpuarray.pyx", line 1941, in pygpu.gpuarray.GpuArray.__repr__ 
(pygpu/gpuarray.c:24742)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/numpy/core/numeric.py", 
line 482, in asarray
return array(a, dtype, copy=False, order=order)
  File "pygpu/gpuarray.pyx", line 1572, in 
pygpu.gpuarray.GpuArray.__array__ (pygpu/gpuarray.c:20224)
  File "pygpu/gpuarray.pyx", line 1320, in pygpu.gpuarray.pygpu_as_ndarray 
(pygpu/gpuarray.c:17346)
  File "pygpu/gpuarray.pyx", line 347, in pygpu.gpuarray.array_read 
(pygpu/gpuarray.c:6114)
pygpu.gpuarray.GpuArrayException: an illegal memory access was encountered

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-10-13 Thread Pascal Lamblin
On Thu, Oct 13, 2016, luca.wagner.0...@gmail.com wrote:
> whether I use float 16 or float32, theano.gpuarray.dnn.dnn_conv  or 
> theano.tensor.nnet.conv3d2d.conv3d  
> it works only for small images but  if I increase the images size there are 
> problems 
> with memory.
> I had not this issue when I was using float32 with the previous Theano 
> version and same parameters and images size.
> 
> I try:
> 
> floatX = float16
> device = cuda
> 
>  #theano.gpuarray.dnn.dnn_conv   
>  out = dnn_conv(img= input, 
> if I increase the size of the images: pygpu.gpuarray.GpuArrayException: an 
> illegal memory access was encountered

This is due to memory being accessed outside of what is allowed, not
because of too much memory being used. It may be that this happens when
trying to report another exception, though.

Can you use pdb and try to see what happens during "raise_with_op" in
that call?

>   File "/home/luca/data/Theano-master/theano/gof/link.py", line 167, in 
> raise_with_op
> "\nInputs values: %s" % scalar_values)
>   File "pygpu/gpuarray.pyx", line 1941, in pygpu.gpuarray.GpuArray.__repr__ 
> (pygpu/gpuarray.c:24742)
>   File 
> "/home/luca/anaconda2/lib/python2.7/site-packages/numpy/core/numeric.py", 
> line 482, in asarray
> return array(a, dtype, copy=False, order=order)
>   File "pygpu/gpuarray.pyx", line 1572, in 
> pygpu.gpuarray.GpuArray.__array__ (pygpu/gpuarray.c:20224)
>   File "pygpu/gpuarray.pyx", line 1320, in pygpu.gpuarray.pygpu_as_ndarray 
> (pygpu/gpuarray.c:17346)
>   File "pygpu/gpuarray.pyx", line 347, in pygpu.gpuarray.array_read 
> (pygpu/gpuarray.c:6114)
> pygpu.gpuarray.GpuArrayException: an illegal memory access was encountered


> -
> If I try float 32 and theano.gpuarray.dnn.dnn_conv  I also have memory 
> problems:  pygpu.gpuarray.GpuArrayException: out of memory

This seems to happen even before the function is running, when you are
transferring the shared parameters to the GPU. This is quite strange,
because it should have failed regardless of the Theano version, back-end
or type of convolution.

> --
> If I try 
> theano.tensor.nnet.conv3d2d.conv3d, 
> floatX = float32,
> device = gpu
> 
> I also have memory problems: MemoryError: ('Error allocating 14224896000 
> bytes of device memory (out of memory).', "you might consider using 
> 'theano.shared(..., borrow=True)'")

This is a 14GB shared variable that you are trying to transfer to GPU.
Is that what you expected?

>   File "mpr_convnet_class.py", line 242, in __init__
> b)
>   File "mlp.py", line 199, in __init__
> borrow=True,  
>   File "mlp.py", line 138, in __init__
> self.W = shared(value=W_val, borrow=borrow, name=layer_name+'_W')
>   File "/home/luca/data/Theano-master/theano/compile/sharedvalue.py", line 
> 247, in shared
> allow_downcast=allow_downcast, **kwargs)
>   File "/home/luca/data/Theano-master/theano/sandbox/cuda/var.py", line 
> 242, in float32_shared_constructor
> deviceval = type_support_filter(value, type.broadcastable, False, None)
> MemoryError: ('Error allocating 14224896000 bytes of device memory (out of 
> memory).', "you might consider using 'theano.shared(..., borrow=True)'")
> 
> 
> If I try 
> theano.tensor.nnet.conv3d2d.conv3d, 
> floatX = float16,
> device = gpu
> 
> I have TypeError

That is normal, the old back-end (device=gpu) does not support float16.

-- 
Pascal

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-10-13 Thread Frédéric Bastien
For the memory error, the problem is that you try to allocate 14G for a
shared variable on a 12G GPU. This is probably not what you want to do.

Use theano.tensor.nnet.conv3d now (not conv3d2d.conv3d() or dnn_conv3d).
But we need to fix the memory problem. conv3d2d.conv3d probably cause an
upcast to float32 in the computation. That would explain the last error.

On Thu, Oct 13, 2016 at 6:14 AM,  wrote:

> Hi,
> I'm doing tests on Tesla K40 and  Theano==0.9.0.dev3:
> whether I use float 16 or float32, theano.gpuarray.dnn.dnn_conv  or
> theano.tensor.nnet.conv3d2d.conv3d  it works only for small images but  if
> I increase the images size there are problems with memory.
> I had not this issue when I was using float32 with the previous Theano
> version and same parameters and images size.
>
> I try:
>
> floatX = float16
> device = cuda
>
>  #theano.gpuarray.dnn.dnn_conv
>  out = dnn_conv(img= input,
> kerns= self.W,
> border_mode='valid',
> subsample=(1,1,1),
> conv_mode='conv',
> direction_hint=None,
> workmem=None,
> algo=None,
> precision=None)
>
>
> This is the output for small 3d images, the convnet is working:
>
> Python 2.7.12 |Continuum Analytics, Inc.| (default, Jul  2 2016, 17:42:40)
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core/run_multi_conv.py',
> wdir='/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core')
> Mapped name None to device cuda: Tesla K40c
> Using cuDNN version 5103 on context None
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for MaxAndArgmax due to unsupported float16
>
>
> start time:
> 13/10/2016
> 11:43:00
>
>
> Image_dim_1: 30
> Image_dim_2: 30
> Image_dim_3: 30
>
>
> training @ iter =  0
> training @ iter =  400
> training cost 0.701
> epoch 1, training batch 574/574, validation error 48.04 %
> ---
>
>
> if I increase the size of the images: pygpu.gpuarray.GpuArrayException:
> an illegal memory access was encountered
>
> Python 2.7.12 |Continuum Analytics, Inc.| (default, Jul  2 2016, 17:42:40)
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core/run_multi_conv.py',
> wdir='/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core')
> Mapped name None to device cuda: Tesla K40c
> Using cuDNN version 5103 on context None
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for MaxAndArgmax due to unsupported float16
>
>
> start time:
> 13/10/2016
> 11:52:45
>
>
> Image_dim_1: 90
> Image_dim_2: 90
> Image_dim_3: 90
>
>
> training @ iter =  0
> Traceback (most recent call last):
>   File "", line 1, in 
>   File "/home/luca/anaconda2/lib/python2.7/site-packages/
> spyder/utils/site/sitecustomize.py", line 866, in runfile
> execfile(filename, namespace)
>   File "/home/luca/anaconda2/lib/python2.7/site-packages/
> spyder/utils/site/sitecustomize.py", line 94, in execfile
> builtins.execfile(filename, *where)
>   File "/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core/run_multi_conv.py", line 42, in 
> run_experiments()
>   File "/home/luca/data/DeepLearningTutorials/Theano-
> 3D-Convnet-master/convnet3d/core/run_multi_conv.py", line 33, in
> run_experiments
> Zoom = 0.0
>   File "mpr_convnet_class.py", line 322, in __init__
> training_cost_ij=train_model(a, b)
>   File "/home/luca/data/Theano-master/theano/compile/function_module.py",
> line 879, in __call__
> storage_map=getattr(self.fn, 'storage_map', None))
>   File "/home/luca/data/Theano-master/theano/gof/link.py", line 167, in
> raise_with_op
> "\nInputs values: %s" % scalar_values)
>   File "pygpu/gpuarray.pyx", line 1941, in pygpu.gpuarray.GpuArray.__repr__
> (pygpu/gpuarray.c:24742)
>   File 
> "/home/luca/anaconda2/lib/python2.7/site-packages/numpy/core/numeric.py",
> line 482, in asarray
> return array(a, dtype, copy=False, order=order)
>   File "pygpu/gpuarray.pyx", line 1572, in pygpu.gpuarray.GpuArray.__array__
> (pygpu/gpuarray.c:20224)
>   File "pygpu/gpuarray.pyx", 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-11 Thread luca . wagner . 0812
Hi Fred,
I installed the Theano version that contains  "Adding an AbstractConv3d 
interface #4862"  https://github.com/Theano/Theano/pull/4862 

but now  it doesn't work because in the previous Theano version, class Pool 
had these parameters: ds, ignore_border, st, padding, mode,openmp
in the latest Theano version class Pool there is no ds: only  
ignore_border, mode,openmp.

In maxpool3d.py  I was calling op = DownsampleFactorMax((ds[1],ds[2]), 
ignore_border) where DownsampleFactorMax = pool.Pool

I tried   Pool(mode=..., ...)(input, ws=ws) but it doesn't work.
How can I call Pool passing (ds[1],ds[2]) ?

Many Thanks
Luca

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
""" Max pooling spatio-temporal inputs for Theano """

from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax

#it was originally ignore_border=False and then corrected as suggested by Pascal
'''Pascal update on ignore_border'''   
def max_pool_3d(input, ds, ignore_border=True):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input video by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (time, height, width)

:type input: N-D theano tensor of input images.
:param input: input images. Max pooling will be done over the 3 last dimensions.
:type ds: tuple of length 3
:param ds: factor by which to downscale. (2,2,2) will halve the video in each dimension.
:param ignore_border: boolean value. When True, (5,5,5) input with ds=(2,2,2) will generate a
  (2,2,2) output. (3,3,3) otherwise.
"""

if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')

# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of 
# downsamplefactormax. First maxpool frames as usual. 
# Then maxpool the time dimension. Shift the time dimension to the third 
# position, so rows and cols are in the back

# extract dimensions
frame_shape = input.shape[-2:]

# count the number of "leading" dimensions, store as dmatrix
# tensor.prod: product of every term in x along axis
batch_size = tensor.prod(input.shape[:-2])
# Reshape x by right padding the shape with n_ones 1s. 
batch_size = tensor.shape_padright(batch_size,1)

# store as 4D tensor with shape: (batch_size,1,height,width)
#tensor.cast
# Cast any tensor x to a Tensor of the same shape, but with a different numerical type dtype.
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]), 
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)

# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)

# now maxpool time

# output (time, rows, cols), reshape so that time is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_time = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_time.shape[-2:]

# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_time.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)

# store as 4D tensor with shape: (batch_size,1,width,time)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]), 
vid_shape), 'int32')
input_4D_time = tensor.reshape(input_time, new_shape, ndim=4)
# downsample mini-batch of videos in time
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outtime = op(input_4D_time)
# output 
# restore to original shape (xxx, rows, cols, time)
outshape = tensor.join(0, input_time.shape[:-2], outtime.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-10 Thread luca . wagner . 0812


On Friday, October 7, 2016 at 5:38:14 PM UTC+2, nouiz wrote:
>
>
>
> On Fri, Oct 7, 2016 at 11:31 AM, Pascal Lamblin  > wrote:
>
>> On Fri, Oct 07, 2016, luca.wag...@gmail.com  wrote:
>> > Hi Fred,
>> > I did a test using:
>> >
>> > theano.tensor.nnet.conv3d2d import conv3d
>>
>> That's the old conv3d2d code, that should not be needed with cuDNN, and
>> that has some pieces that do not work in float16.
>> These are not the problems we should try to solve, we should focus on
>> what happens when using dnn_conv3d instead.
>>
>
> not dnn_conv3d, but the new conv interface: theano.tensor.nnet.conv3d(). 
> Use that one, with float=float15 and device=cuda.
>

I don't find this new  new conv interface: theano.tensor.nnet.conv3d()
thanks
luca

>  
>
>>
>> >
>> > this PR: https://github.com/Theano/Theano/pull/4862
>> >
>> > [global]
>> > floatX = float16
>> > device=cuda
>> > [cuda]
>> > root = /usr/local/cuda-7.5
>> >
>> > [nvcc]
>> > fastmath=True
>> >
>> > optimizer = fast_compile
>> >
>> > [dnn.conv]
>> > algo_fwd =  time_once
>> > algo_bwd_filter = time_once
>> > algo_bwd_data = time_once
>> >
>> > The output is much slower then using float32:
>> >
>> > Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 
>> 17:42:40)
>> > [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
>> > Type "help", "copyright", "credits" or "license" for more information.
>> > Anaconda is brought to you by Continuum Analytics.
>> > Please check out: http://continuum.io/thanks and https://anaconda.org
>> > >>>
>> > 
>> runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
>> > 
>> wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
>> > Mapped name None to device cuda: GeForce 840M
>> > WARNING (theano.gof.compilelock): Overriding existing lock by dead 
>> process
>> > '3119' (I am process '3598')
>> > Using cuDNN version 5103 on context None
>> > /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6:
>> > UserWarning: downsample module has been moved to the
>> > theano.tensor.signal.pool module.
>> >   "downsample module has been moved to the theano.tensor.signal.pool
>> > module.")
>> > Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
>> > Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
>> > Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
>> > Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
>> > Disabling C code for Alloc due to unsupported float16
>> > ERROR (theano.gof.opt): SeqOptimizer apply 
>> > > object at 0x7f3944076110>
>> > ERROR (theano.gof.opt): Traceback:
>> > ERROR (theano.gof.opt): Traceback (most recent call last):
>> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in 
>> apply
>> > sub_prof = optimizer.optimize(fgraph)
>> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in
>> > optimize
>> > ret = self.apply(fgraph, *args, **kwargs)
>> >   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 
>> 355, in
>> > apply
>> > node.outputs)
>> >   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 
>> 1874,
>> > in local_gpua_pool_dnn_alternative
>> > img, ws, stride, pad = inputs
>> > ValueError: need more than 1 value to unpack
>> >
>> > ERROR (theano.gof.opt): Optimization failure due to:
>> > local_gpua_pool_dnn_grad_stride
>> > ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True,
>> > st=(3, 3), padding=(0, 0), mode='max'}(sigmoid.0, Pool{ds=(3, 3),
>> > ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, 
>> Reshape{4}.0)
>> > ERROR (theano.gof.opt): TRACEBACK:
>> > ERROR (theano.gof.opt): Traceback (most recent call last):
>> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in
>> > process_node
>> > replacements = lopt.transform(node)
>> >   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 
>> 203, in
>> > local_opt
>> > new_op = maker(node.op, context_name, node.inputs, node.outputs)
>> >   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 
>> 1888,
>> > in local_gpua_pool_dnn_grad_stride
>> > inp, out, out_grad, ws, stride, pad = inputs
>> > ValueError: need more than 3 values to unpack
>> >
>> > ERROR (theano.gof.opt): Optimization failure due to:
>> > local_gpua_pool_dnn_grad_stride
>> > ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True,
>> > st=(3, 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0, 
>> Pool{ds=(3,
>> > 3), ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0,
>> > Reshape{4}.0)
>> > ERROR (theano.gof.opt): TRACEBACK:
>> > ERROR (theano.gof.opt): Traceback (most recent call last):
>> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in
>> > process_node
>> > replacements = lopt.transform(node)
>> >   File 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-10 Thread luca . wagner . 0812
Hi Fred,
first I created a h5py dataset, i put  files in the dataset  and then load 
data.

I don't use pickle  but h5py to load data:

with h5py.File( h5py_dataset,'r') as f: 

 f.visit(dataset_list.append)   
 for j in range(len(dataset_list)):   

 dataset_x.append( np.array(f.get(dataset_list[j]))) 
   
 dataset_attributes = 
f.get(dataset_list[j]).attrs.values() 
 dataset_yy.append(dataset_attributes[2])


-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-10-07 Thread Frédéric Bastien
On Fri, Oct 7, 2016 at 11:31 AM, Pascal Lamblin 
wrote:

> On Fri, Oct 07, 2016, luca.wagner.0...@gmail.com wrote:
> > Hi Fred,
> > I did a test using:
> >
> > theano.tensor.nnet.conv3d2d import conv3d
>
> That's the old conv3d2d code, that should not be needed with cuDNN, and
> that has some pieces that do not work in float16.
> These are not the problems we should try to solve, we should focus on
> what happens when using dnn_conv3d instead.
>

not dnn_conv3d, but the new conv interface: theano.tensor.nnet.conv3d().
Use that one, with float=float15 and device=cuda.


>
> >
> > this PR: https://github.com/Theano/Theano/pull/4862
> >
> > [global]
> > floatX = float16
> > device=cuda
> > [cuda]
> > root = /usr/local/cuda-7.5
> >
> > [nvcc]
> > fastmath=True
> >
> > optimizer = fast_compile
> >
> > [dnn.conv]
> > algo_fwd =  time_once
> > algo_bwd_filter = time_once
> > algo_bwd_data = time_once
> >
> > The output is much slower then using float32:
> >
> > Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40)
> > [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> > Type "help", "copyright", "credits" or "license" for more information.
> > Anaconda is brought to you by Continuum Analytics.
> > Please check out: http://continuum.io/thanks and https://anaconda.org
> > >>>
> > runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-
> ConvNet-master/convnet3d/core/run_multi_conv_t.py',
> > wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-
> ConvNet-master/convnet3d/core')
> > Mapped name None to device cuda: GeForce 840M
> > WARNING (theano.gof.compilelock): Overriding existing lock by dead
> process
> > '3119' (I am process '3598')
> > Using cuDNN version 5103 on context None
> > /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6:
> > UserWarning: downsample module has been moved to the
> > theano.tensor.signal.pool module.
> >   "downsample module has been moved to the theano.tensor.signal.pool
> > module.")
> > Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> > Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> > Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> > Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> > Disabling C code for Alloc due to unsupported float16
> > ERROR (theano.gof.opt): SeqOptimizer apply  U
> > object at 0x7f3944076110>
> > ERROR (theano.gof.opt): Traceback:
> > ERROR (theano.gof.opt): Traceback (most recent call last):
> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in
> apply
> > sub_prof = optimizer.optimize(fgraph)
> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in
> > optimize
> > ret = self.apply(fgraph, *args, **kwargs)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line
> 355, in
> > apply
> > node.outputs)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line
> 1874,
> > in local_gpua_pool_dnn_alternative
> > img, ws, stride, pad = inputs
> > ValueError: need more than 1 value to unpack
> >
> > ERROR (theano.gof.opt): Optimization failure due to:
> > local_gpua_pool_dnn_grad_stride
> > ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True,
> > st=(3, 3), padding=(0, 0), mode='max'}(sigmoid.0, Pool{ds=(3, 3),
> > ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0,
> Reshape{4}.0)
> > ERROR (theano.gof.opt): TRACEBACK:
> > ERROR (theano.gof.opt): Traceback (most recent call last):
> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in
> > process_node
> > replacements = lopt.transform(node)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line
> 203, in
> > local_opt
> > new_op = maker(node.op, context_name, node.inputs, node.outputs)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line
> 1888,
> > in local_gpua_pool_dnn_grad_stride
> > inp, out, out_grad, ws, stride, pad = inputs
> > ValueError: need more than 3 values to unpack
> >
> > ERROR (theano.gof.opt): Optimization failure due to:
> > local_gpua_pool_dnn_grad_stride
> > ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True,
> > st=(3, 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0,
> Pool{ds=(3,
> > 3), ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0,
> > Reshape{4}.0)
> > ERROR (theano.gof.opt): TRACEBACK:
> > ERROR (theano.gof.opt): Traceback (most recent call last):
> >   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in
> > process_node
> > replacements = lopt.transform(node)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line
> 203, in
> > local_opt
> > new_op = maker(node.op, context_name, node.inputs, node.outputs)
> >   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line
> 1888,
> > in local_gpua_pool_dnn_grad_stride
> > inp, out, out_grad, ws, 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-07 Thread Pascal Lamblin
On Fri, Oct 07, 2016, luca.wagner.0...@gmail.com wrote:
> Hi Fred,
> I did a test using:
> 
> theano.tensor.nnet.conv3d2d import conv3d

That's the old conv3d2d code, that should not be needed with cuDNN, and
that has some pieces that do not work in float16.
These are not the problems we should try to solve, we should focus on
what happens when using dnn_conv3d instead.

> 
> this PR: https://github.com/Theano/Theano/pull/4862
> 
> [global]
> floatX = float16
> device=cuda
> [cuda] 
> root = /usr/local/cuda-7.5
> 
> [nvcc]
> fastmath=True
> 
> optimizer = fast_compile
> 
> [dnn.conv]
> algo_fwd =  time_once
> algo_bwd_filter = time_once
> algo_bwd_data = time_once 
> 
> The output is much slower then using float32:
> 
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> 
> runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
>  
> wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
> Mapped name None to device cuda: GeForce 840M
> WARNING (theano.gof.compilelock): Overriding existing lock by dead process 
> '3119' (I am process '3598')
> Using cuDNN version 5103 on context None
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
> UserWarning: downsample module has been moved to the 
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool 
> module.")
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Alloc due to unsupported float16
> ERROR (theano.gof.opt): SeqOptimizer apply  object at 0x7f3944076110>
> ERROR (theano.gof.opt): Traceback:
> ERROR (theano.gof.opt): Traceback (most recent call last):
>   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in apply
> sub_prof = optimizer.optimize(fgraph)
>   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
> optimize
> ret = self.apply(fgraph, *args, **kwargs)
>   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 355, in 
> apply
> node.outputs)
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1874, 
> in local_gpua_pool_dnn_alternative
> img, ws, stride, pad = inputs
> ValueError: need more than 1 value to unpack
> 
> ERROR (theano.gof.opt): Optimization failure due to: 
> local_gpua_pool_dnn_grad_stride
> ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
> st=(3, 3), padding=(0, 0), mode='max'}(sigmoid.0, Pool{ds=(3, 3), 
> ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, Reshape{4}.0)
> ERROR (theano.gof.opt): TRACEBACK:
> ERROR (theano.gof.opt): Traceback (most recent call last):
>   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
> process_node
> replacements = lopt.transform(node)
>   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
> local_opt
> new_op = maker(node.op, context_name, node.inputs, node.outputs)
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
> in local_gpua_pool_dnn_grad_stride
> inp, out, out_grad, ws, stride, pad = inputs
> ValueError: need more than 3 values to unpack
> 
> ERROR (theano.gof.opt): Optimization failure due to: 
> local_gpua_pool_dnn_grad_stride
> ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
> st=(3, 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0, Pool{ds=(3, 
> 3), ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, 
> Reshape{4}.0)
> ERROR (theano.gof.opt): TRACEBACK:
> ERROR (theano.gof.opt): Traceback (most recent call last):
>   File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
> process_node
> replacements = lopt.transform(node)
>   File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
> local_opt
> new_op = maker(node.op, context_name, node.inputs, node.outputs)
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
> in local_gpua_pool_dnn_grad_stride
> inp, out, out_grad, ws, stride, pad = inputs
> ValueError: need more than 3 values to unpack
> 
> ERROR (theano.gof.opt): Optimization failure due to: 
> local_gpua_pool_dnn_alternative
> ERROR (theano.gof.opt): node: Pool{ds=(3, 3), ignore_border=True, st=(3, 
> 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0)
> ERROR (theano.gof.opt): TRACEBACK:
> ERROR (theano.gof.opt): Traceback (most recent call last):
>   File 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-07 Thread luca . wagner . 0812
Hi Fred,
I did a test using:

theano.tensor.nnet.conv3d2d import conv3d

this PR: https://github.com/Theano/Theano/pull/4862

[global]
floatX = float16
device=cuda
[cuda] 
root = /usr/local/cuda-7.5

[nvcc]
fastmath=True

optimizer = fast_compile

[dnn.conv]
algo_fwd =  time_once
algo_bwd_filter = time_once
algo_bwd_data = time_once 

The output is much slower then using float32:

Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
WARNING (theano.gof.compilelock): Overriding existing lock by dead process 
'3119' (I am process '3598')
Using cuDNN version 5103 on context None
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Alloc due to unsupported float16
ERROR (theano.gof.opt): SeqOptimizer apply 
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
optimize
ret = self.apply(fgraph, *args, **kwargs)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 355, in 
apply
node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1874, 
in local_gpua_pool_dnn_alternative
img, ws, stride, pad = inputs
ValueError: need more than 1 value to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_grad_stride
ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
st=(3, 3), padding=(0, 0), mode='max'}(sigmoid.0, Pool{ds=(3, 3), 
ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, Reshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
in local_gpua_pool_dnn_grad_stride
inp, out, out_grad, ws, stride, pad = inputs
ValueError: need more than 3 values to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_grad_stride
ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
st=(3, 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0, Pool{ds=(3, 
3), ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, 
Reshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
in local_gpua_pool_dnn_grad_stride
inp, out, out_grad, ws, stride, pad = inputs
ValueError: need more than 3 values to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_alternative
ERROR (theano.gof.opt): node: Pool{ds=(3, 3), ignore_border=True, st=(3, 
3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1874, 
in local_gpua_pool_dnn_alternative
img, ws, stride, pad = inputs
ValueError: need more than 1 value to unpack

ERROR (theano.gof.opt): Optimization failure due to: 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-07 Thread luca . wagner . 0812
Hi Fred,
I did the test using:

theano.tensor.nnet.conv3d2d.conv3d

I updated the code with https://github.com/Theano/Theano/pull/4862 


.theanorc:
[global]
floatX = float16
device=cuda
[cuda] 
root = /usr/local/cuda-7.5
[nvcc]
fastmath=True

optimizer = fast_compile

[dnn.conv]
algo_fwd =  time_once
algo_bwd_filter = time_once
algo_bwd_data = time_once 

This is the output, much slower the using float32:

Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
WARNING (theano.gof.compilelock): Overriding existing lock by dead process 
'3119' (I am process '3598')
Using cuDNN version 5103 on context None
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Alloc due to unsupported float16
ERROR (theano.gof.opt): SeqOptimizer apply 
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
optimize
ret = self.apply(fgraph, *args, **kwargs)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 355, in 
apply
node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1874, 
in local_gpua_pool_dnn_alternative
img, ws, stride, pad = inputs
ValueError: need more than 1 value to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_grad_stride
ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
st=(3, 3), padding=(0, 0), mode='max'}(sigmoid.0, Pool{ds=(3, 3), 
ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, Reshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
in local_gpua_pool_dnn_grad_stride
inp, out, out_grad, ws, stride, pad = inputs
ValueError: need more than 3 values to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_grad_stride
ERROR (theano.gof.opt): node: MaxPoolGrad{ds=(3, 3), ignore_border=True, 
st=(3, 3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0, Pool{ds=(3, 
3), ignore_border=True, st=(3, 3), padding=(0, 0), mode='max'}.0, 
Reshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1888, 
in local_gpua_pool_dnn_grad_stride
inp, out, out_grad, ws, stride, pad = inputs
ValueError: need more than 3 values to unpack

ERROR (theano.gof.opt): Optimization failure due to: 
local_gpua_pool_dnn_alternative
ERROR (theano.gof.opt): node: Pool{ds=(3, 3), ignore_border=True, st=(3, 
3), padding=(0, 0), mode='max'}(HostFromGpu(gpuarray).0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gpuarray/opt.py", line 203, in 
local_opt
new_op = maker(node.op, context_name, node.inputs, node.outputs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 1874, 
in local_gpua_pool_dnn_alternative
img, 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-06 Thread Frédéric Bastien
For float16, always use
device=cuda

Not device=gpu. This could be your problem. Can you test that?

thanks

Fred

On Tue, Oct 4, 2016 at 10:21 AM,  wrote:

> Hi Fred,
>  I tested the convnet using
>
>  floatX= float32,
> device=gpu
> theano.tensor.nnet.conv3d2d.conv3d
> updated theano/sandbox/cuda/blas.py  downloaded from
> https://github.com/Theano/Theano/pull/5050
> 
>
> The convnet converges:
>
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40)
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/home/luca/data/DeepLearningTutorials/Theano-
> 3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
> wdir='/home/luca/data/DeepLearningTutorials/Theano-
> 3D-ConvNet-master/convnet3d/core')
> Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5103)
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6:
> UserWarning: downsample module has been moved to the
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool
> module.")
>
>
> start time:
> 04/10/2016
> 16:18:13
>
>
> Images for training: 316
> Images for validation: 56
>
> training @ iter =  0
> training cost 0.69672
> epoch 1, training batch 316/316, validation error 37.500 %
> --
>
> If I make the same test using:
> floatX = float16
> device=gpu
> theano.tensor.nnet.conv3d2d.conv3d
> updated theano/sandbox/cuda/blas.py  downloaded from
> https://github.com/Theano/Theano/pull/5050
> 
>
> I have an error running the  convnet:
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40)
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/home/luca/data/DeepLearningTutorials/Theano-
> 3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
> wdir='/home/luca/data/DeepLearningTutorials/Theano-
> 3D-ConvNet-master/convnet3d/core')
> Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5103)
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6:
> UserWarning: downsample module has been moved to the
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool
> module.")
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Alloc due to unsupported float16
> Disabling C code for Elemwise{abs_,no_inplace} due to unsupported float16
> Disabling C code for Sum{acc_dtype=float32} due to unsupported float16
> Disabling C code for mrg_uniform{TensorType(float16, matrix),inplace} due
> to unsupported float16
> Disabling C code for mrg_uniform{TensorType(float16, matrix),inplace} due
> to unsupported float16
> Disabling C code for Elemwise{Composite{(-Cast{float16}((i0 / i1)))}} due
> to unsupported float16
> Disabling C code for Elemwise{Composite{Cast{float16}(Cast{int64}(LT(i0,
> i1)))}}[(0, 0)] due to unsupported float16
> Disabling C code for Elemwise{Composite{Cast{float16}(Cast{int64}(LT(i0,
> i1)))}}[(0, 0)] due to unsupported float16
> Disabling C code for CorrMM{valid, (1, 1), (1, 1)} due to unsupported
> float16
> Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
> Disabling C code for Sum{axis=[3], acc_dtype=float32} due to unsupported
> float16
> Disabling C code for Elemwise{Add}[(0, 0)] due to unsupported float16
> Disabling C code for sigmoid due to unsupported float16
> Disabling C code for Pool{ds=(3, 3), ignore_border=True, st=(3, 3),
> padding=(0, 0), mode='max'} due to unsupported float16
> Disabling C code for Pool{ds=(1, 3), ignore_border=True, st=(1, 3),
> padding=(0, 0), mode='max'} due to unsupported float16
> Disabling C code for dot due to unsupported float16
> Disabling C code for Elemwise{Composite{scalar_sigmoid((i0 + i1))}}[(0,
> 0)] due to unsupported float16
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for dot due to unsupported float16
> Disabling C code for CrossentropySoftmaxArgmax1HotWithBias due to
> unsupported float16
> Disabling C code for CrossentropySoftmax1HotWithBiasDx due to unsupported
> float16
> Disabling C code 

Re: [theano-users] Error using floatX = float16 to save memory

2016-10-04 Thread luca . wagner . 0812
Hi Fred,
 I tested the convnet using

 floatX= float32,
device=gpu
theano.tensor.nnet.conv3d2d.conv3d
updated theano/sandbox/cuda/blas.py  downloaded from 
https://github.com/Theano/Theano/pull/5050 


The convnet converges:

Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5103)
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")


start time:
04/10/2016
16:18:13


Images for training: 316
Images for validation: 56

training @ iter =  0
training cost 0.69672
epoch 1, training batch 316/316, validation error 37.500 %
--

If I make the same test using:
floatX = float16
device=gpu
theano.tensor.nnet.conv3d2d.conv3d
updated theano/sandbox/cuda/blas.py  downloaded from 
https://github.com/Theano/Theano/pull/5050 


I have an error running the  convnet:
Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5103)
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Alloc due to unsupported float16
Disabling C code for Elemwise{abs_,no_inplace} due to unsupported float16
Disabling C code for Sum{acc_dtype=float32} due to unsupported float16
Disabling C code for mrg_uniform{TensorType(float16, matrix),inplace} due 
to unsupported float16
Disabling C code for mrg_uniform{TensorType(float16, matrix),inplace} due 
to unsupported float16
Disabling C code for Elemwise{Composite{(-Cast{float16}((i0 / i1)))}} due 
to unsupported float16
Disabling C code for Elemwise{Composite{Cast{float16}(Cast{int64}(LT(i0, 
i1)))}}[(0, 0)] due to unsupported float16
Disabling C code for Elemwise{Composite{Cast{float16}(Cast{int64}(LT(i0, 
i1)))}}[(0, 0)] due to unsupported float16
Disabling C code for CorrMM{valid, (1, 1), (1, 1)} due to unsupported 
float16
Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
Disabling C code for Sum{axis=[3], acc_dtype=float32} due to unsupported 
float16
Disabling C code for Elemwise{Add}[(0, 0)] due to unsupported float16
Disabling C code for sigmoid due to unsupported float16
Disabling C code for Pool{ds=(3, 3), ignore_border=True, st=(3, 3), 
padding=(0, 0), mode='max'} due to unsupported float16
Disabling C code for Pool{ds=(1, 3), ignore_border=True, st=(1, 3), 
padding=(0, 0), mode='max'} due to unsupported float16
Disabling C code for dot due to unsupported float16
Disabling C code for Elemwise{Composite{scalar_sigmoid((i0 + i1))}}[(0, 0)] 
due to unsupported float16
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for dot due to unsupported float16
Disabling C code for CrossentropySoftmaxArgmax1HotWithBias due to 
unsupported float16
Disabling C code for CrossentropySoftmax1HotWithBiasDx due to unsupported 
float16
Disabling C code for Sum{acc_dtype=float32} due to unsupported float16
Disabling C code for Sum{axis=[0], acc_dtype=float32} due to unsupported 
float16
Disabling C code for dot due to unsupported float16
Disabling C code for dot due to unsupported float16
Disabling C code for Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)] due to 
unsupported float16
Disabling C code for 

Re: [theano-users] Error using floatX = float16 to save memory

2016-09-30 Thread Pascal Lamblin
Forwarding the response to the ML:

The default algo selected by Theano is not correct for conv3d (only
exists for 2D), this should be fixed.
In the mean time, try:
[dnn.conv]
algo_fwd = time_once
algo_bwd_filter = time_once
algo_bwd_data = time_once

On Fri, Sep 30, 2016, luca.wagner.0...@gmail.com wrote:
> Hi Pascal,
> 
> I did the previous test using
> [global]
> floatX = float32
> device=gpu
> [cuda] 
> 
> Following your answer I did another test with
> floatX = float32
> device=cuda0
> 
> 
> but it doesn't work: ValueError: ("convolution algo %s can't be used for 3d 
> convolutions", ('small',))
> 
> Using
> floatX = float16
> device=cuda0
> I have the same error: ValueError: ("convolution algo %s can't be used for 
> 3d convolutions", ('small',))
>  
> 
> 
> This is the output:
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/home/luca/data/
> DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
>  
> wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
> Mapped name None to device cuda0: GeForce 840M
> Using cuDNN version 5103 on context None
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
> UserWarning: downsample module has been moved to the 
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool 
> module.")
> Traceback (most recent call last):
>   File "", line 1, in 
>   File 
> "/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
>  
> line 714, in runfile
> execfile(filename, namespace)
>   File 
> "/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
>  
> line 81, in execfile
> builtins.execfile(filename, *where)
>   File 
> "/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
>  
> line 32, in 
> run_experiments()
>   File 
> "/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
>  
> line 25, in run_experiments
> Learning_rate=0.001 
>   File "mpr_convnet_class_t.py", line 169, in __init__
> b )
>   File "cuddn_convnet3d.py", line 113, in __init__
> precision=None)   
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 958, in 
> dnn_conv
> return gpu_dnn_conv(algo=algo)(img, kerns, out, desc)
>   File "/home/luca/data/Theano-master/theano/gof/op.py", line 602, in 
> __call__
> node = self.make_node(*inputs, **kwargs)
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 513, in 
> make_node
> "3d convolutions", (self.algo,))
> ValueError: ("convolution algo %s can't be used for 3d convolutions", 
> ('small',))
> 
> -- 
> 
> --- 
> You received this message because you are subscribed to the Google Groups 
> "theano-users" group.
> To unsubscribe from this group and stop receiving emails from it, send an 
> email to theano-users+unsubscr...@googlegroups.com.
> For more options, visit https://groups.google.com/d/optout.


-- 
Pascal

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-30 Thread luca . wagner . 0812
Hi Pascal,

I did the previous test using
[global]
floatX = float32
device=gpu
[cuda] 

Following your answer I did another test with
floatX = float32
device=cuda0


but it doesn't work: ValueError: ("convolution algo %s can't be used for 3d 
convolutions", ('small',))

Using
floatX = float16
device=cuda0
I have the same error: ValueError: ("convolution algo %s can't be used for 
3d convolutions", ('small',))
 


This is the output:
Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> runfile('/home/luca/data/
DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda0: GeForce 840M
Using cuDNN version 5103 on context None
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Traceback (most recent call last):
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 714, in runfile
execfile(filename, namespace)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 81, in execfile
builtins.execfile(filename, *where)
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
 
line 32, in 
run_experiments()
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
 
line 25, in run_experiments
Learning_rate=0.001 
  File "mpr_convnet_class_t.py", line 169, in __init__
b )
  File "cuddn_convnet3d.py", line 113, in __init__
precision=None)   
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 958, in 
dnn_conv
return gpu_dnn_conv(algo=algo)(img, kerns, out, desc)
  File "/home/luca/data/Theano-master/theano/gof/op.py", line 602, in 
__call__
node = self.make_node(*inputs, **kwargs)
  File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 513, in 
make_node
"3d convolutions", (self.algo,))
ValueError: ("convolution algo %s can't be used for 3d convolutions", 
('small',))

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-29 Thread luca . wagner . 0812
Fred,
I used theano.sandbox.cuda.dnn import dnn_conv3d instead of 
theano.tensor.nnet.conv3d2d.conv3d
It works with floatX=float32  and device=gpu but it doesn't work with 
floatX=float16  and device=cuda:
TypeError: CudaNdarrayType only supports dtype float32 for now. Tried using 
dtype float16 for variable None

I also tried to put precision='float16'  in dnn_conv3d but nothing changed.



  out = dnn_conv3d(
img=input, 
kerns=self.W,
border_mode='valid',
subsample=(1, 1, 1),
conv_mode='conv',
direction_hint=None,
workmem=None,
algo=None,
precision=None
)
   
This is the output testing the small 3dconvnet:

Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
Using cuDNN version 5103 on context None
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Traceback (most recent call last):
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 714, in runfile
execfile(filename, namespace)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 81, in execfile
builtins.execfile(filename, *where)
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
 
line 33, in 
run_experiments()
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv_t.py",
 
line 26, in run_experiments
Zoom=0.5
  File "mpr_convnet_class_t.py", line 171, in __init__
b )
  File "cuddn_convnet3d.py", line 100, in __init__
precision=None
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
1283, in dnn_conv3d
img = gpu_contiguous(img)
  File "/home/luca/data/Theano-master/theano/gof/op.py", line 602, in 
__call__
node = self.make_node(*inputs, **kwargs)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/basic_ops.py", 
line 3963, in make_node
input = as_cuda_ndarray_variable(input)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/basic_ops.py", 
line 46, in as_cuda_ndarray_variable
return gpu_from_host(tensor_x)
  File "/home/luca/data/Theano-master/theano/gof/op.py", line 602, in 
__call__
node = self.make_node(*inputs, **kwargs)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/basic_ops.py", 
line 139, in make_node
dtype=x.dtype)()])
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/type.py", line 
95, in __init__
(self.__class__.__name__, dtype, name))
TypeError: CudaNdarrayType only supports dtype float32 for now. Tried using 
dtype float16 for variable None
>>>


-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-29 Thread luca . wagner . 0812
Fred,
I started to look in cuddn:
I don't find cudnn dnn_conv3d in gpuarray.dnn.dnn_conv3d; what I found is 
class theano.sandbox.cuda.dnn.GpuDnnConv3d in 
http://deeplearning.net/software/theano/library/sandbox/cuda/dnn.html

while in http://deeplearning.net/software/theano/library/gpuarray/dnn.html
I don't see any 3dconv op.

Many thanks
Luca

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-23 Thread luca . wagner . 0812
Many thanks

On Thursday, September 22, 2016 at 11:52:59 PM UTC+2, Arnaud Bergeron wrote:
>
> Actually I believe that your code is using conv2d3d in order to get it 
> working with 3d convolutions on the GPU.  This is now directly supported by 
> cudnn if you use dnn_conv() with 5d objects from the new backend.
>
> There is some work around an abstract conv3d interface which will 
> hopefully be complete soon that you might be able to use.
>
> 2016-09-19 4:33 GMT-04:00 :
>
>> Hi Fred,
>> I thank you very much for your help and I hope that  DiagonalSubtensor 
>> and  IncDiagonalSubtensor may be supported on GPU with float16 
>>
>> Many thanks
>> Luca
>>
>> -- 
>>
>> --- 
>> You received this message because you are subscribed to the Google Groups 
>> "theano-users" group.
>> To unsubscribe from this group and stop receiving emails from it, send an 
>> email to theano-users...@googlegroups.com .
>> For more options, visit https://groups.google.com/d/optout.
>>
>
>

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-22 Thread Arnaud Bergeron
Actually I believe that your code is using conv2d3d in order to get it
working with 3d convolutions on the GPU.  This is now directly supported by
cudnn if you use dnn_conv() with 5d objects from the new backend.

There is some work around an abstract conv3d interface which will hopefully
be complete soon that you might be able to use.

2016-09-19 4:33 GMT-04:00 :

> Hi Fred,
> I thank you very much for your help and I hope that  DiagonalSubtensor
> and  IncDiagonalSubtensor may be supported on GPU with float16
>
> Many thanks
> Luca
>
> --
>
> ---
> You received this message because you are subscribed to the Google Groups
> "theano-users" group.
> To unsubscribe from this group and stop receiving emails from it, send an
> email to theano-users+unsubscr...@googlegroups.com.
> For more options, visit https://groups.google.com/d/optout.
>

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-09-19 Thread luca . wagner . 0812
Hi Fred,
I thank you very much for your help and I hope that  DiagonalSubtensor and  
IncDiagonalSubtensor may be supported on GPU with float16 

Many thanks
Luca

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-08-29 Thread Pascal Lamblin
It is likely that some operations are still not yet supported on GPU
with float16. From your messages, ( would guess at least the following
ones:
- DiagonalSubtensor
- IncDiagonalSubtensor

I thought that random sampling was supported, but I see
"RandomFunction{binomial}", which is surprising. Are you using
shared_randomstreams.RandomStreams, or MRG_RandomStreams?

On Mon, Aug 29, 2016, luca.wagner.0...@gmail.com wrote:
> 
> Fred, 
> I entered cnmem = 1 in .theanorc with float16, but no message is showed as 
> with float32 (CNMeM is enabled with initial size: 95.0% of memory)  and the 
> speed  has not been improved.
> These are the outputs:
> 
> 
> USING FLOAT16
> 
> .theanorc:
> 
> [global]
> floatX = float16
> device = cuda
> 
> [lib]
> cnmem=1
> 
> [cuda] 
> root = /usr/local/cuda-7.5
> 
> 
> [nvcc]
> fastmath=True
> 
> optimizer = fast_compile
> 
> output:
> 
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/run/media/luca/8C9A-AEF4/core/run_multi_conv.py', 
> wdir='/run/media/luca/8C9A-AEF4/core')
> Mapped name None to device cuda: Tesla K40c
> Using cuDNN version 5005 on context None
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
> UserWarning: downsample module has been moved to the 
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool 
> module.")
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Alloc due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
> Disabling C code for IncDiagonalSubtensor due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported float16
> Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
> Disabling C code for MaxAndArgmax due to unsupported float16
> 
> 
> start time:
> 29/08/2016
> 11:30:44
> 
> 
> images for training: 574
> images for validation: 102
> epochs: 1000
> 
> 
> ... training neural network 33
> 
> 
> training @ iter =  0
> 
> 
> USING FLOAT32
> .theanorc:
> 
> [global]
> floatX = float32
> device = gpu
> 
> [lib]
> cnmem=1
> 
> [cuda] 
> root = /usr/local/cuda-7.5
> 
> 
> [nvcc]
> fastmath=True
> 
> optimizer = fast_compile
> 
> output:
> Python 2.7.12 |Anaconda custom (64-bit)| (default, Jul  2 2016, 17:42:40) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> runfile('/run/media/luca/8C9A-AEF4/core/run_multi_conv.py', 
> wdir='/run/media/luca/8C9A-AEF4/core')
> Using gpu device 0: Tesla K40c (CNMeM is enabled with initial size: 95.0% 
> of memory, cuDNN 5005)
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
> UserWarning: downsample module has been moved to the 
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool 
> module.")
> 
> 
> start time:
> 29/08/2016
> 11:32:43
> 
> 
> images for training: 574
> images for validation: 102
> epochs: 1000
> 
> 
> ... training neural network 33
> 
> 
> training @ iter =  0
> 
> 
> 
> -- 
> 
> --- 
> You received this message because you are subscribed to the Google Groups 
> "theano-users" group.
> To unsubscribe from this group and stop receiving emails from it, send an 
> email to theano-users+unsubscr...@googlegroups.com.
> For more options, visit https://groups.google.com/d/optout.


-- 
Pascal

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-08-24 Thread Frédéric Bastien
thanks.

On Wed, Aug 24, 2016 at 5:16 AM,  wrote:

> Fred,
> many thanks for your help.
>
> I reinstalled and updated anaconda.
> I reinstalled  theano and gpuarray/pygpu:
> Theano==0.9.0.dev2
> pygpu==0.2.1
>
> Then I tested a small 3D convnet  using first:
> floatX = float32
> device=gpu
> and the neural network converges.
>
>
> Then I tested the convnet using:
> floatX = float16
> device=cuda
>
> and the neural network converges.
>
> Next step I reinstall theano and gpuarray on the TeslaK40 server and test
> the large convnet: I'll give  another feedback next days.
>
> Luca
>
>
>
>
> --
>
> ---
> You received this message because you are subscribed to the Google Groups
> "theano-users" group.
> To unsubscribe from this group and stop receiving emails from it, send an
> email to theano-users+unsubscr...@googlegroups.com.
> For more options, visit https://groups.google.com/d/optout.
>

-- 

--- 
You received this message because you are subscribed to the Google Groups 
"theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to theano-users+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.


Re: [theano-users] Error using floatX = float16 to save memory

2016-07-21 Thread luca . wagner . 0812
After I reinstalled theano+gpuarray+pygpu,
I'm still doing tests.
Using flags:
floatX = float32
device=gpu

error is:

Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 18:08:32) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
Using cuDNN version 5005 on context None
Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5005)
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
ERROR (theano.gof.opt): Optimization failure due to: 
LocalOptGroup(local_abstractconv_cudnn,local_conv_dnn,local_abstractconv_gemm,local_abstractconv_gradinputs_gemm,local_abstractconv_gradweight_gemm,local_conv_gemm)
ERROR (theano.gof.opt): node: AbstractConv2d{border_mode='valid', 
subsample=(1, 1), filter_flip=True, imshp=(20, 1, 20, 20), kshp=(100, 1, 5, 
5), filter_dilation=(1, 1)}(GpuFromHost.0, GpuReshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1265, in 
transform
repl = opt.transform(node)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
3149, in local_abstractconv_cudnn
conv_mode=conv_mode)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
1181, in dnn_conv
conv_mode=conv_mode, precision=precision)(img.shape,
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
180, in __init__
assert precision in ['float16', 'float32', 'float64']
AssertionError

Traceback (most recent call last):
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 714, in runfile
execfile(filename, namespace)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 81, in execfile
builtins.execfile(filename, *where)
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py",
 
line 124, in 
run_experiments()
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py",
 
line 83, in run_experiments
Pretrained = False
  File "mpr_convnet_class.py", line 291, in __init__
train_model = theano.function([x,y],cost, 
updates=updates) 
  File "/home/luca/data/Theano-master/theano/compile/function.py", line 
322, in function
output_keys=output_keys)
  File "/home/luca/data/Theano-master/theano/compile/pfunc.py", line 480, 
in pfunc
output_keys=output_keys)
  File "/home/luca/data/Theano-master/theano/compile/function_module.py", 
line 1783, in orig_function
output_keys=output_keys).create(
  File "/home/luca/data/Theano-master/theano/compile/function_module.py", 
line 1463, in __init__
optimizer_profile = optimizer(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 102, in 
__call__
return self.optimize(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
optimize
ret = self.apply(fgraph, *args, **kwargs)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
optimize
ret = self.apply(fgraph, *args, **kwargs)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 90, in 
optimize
ret = self.apply(fgraph, *args, **kwargs)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 2257, in 
apply
lopt_change = self.process_node(fgraph, node, lopt)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1825, in 
process_node
lopt, node)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1719, in 
warn_inplace
return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt, node)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1705, in warn
raise exc
AssertionError
>>> 


Using flags:
floatX = float16
device=cuda

the convnet starts without errors:

luca@cuda:~/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core$ 
python
Python 2.7.11 

Re: [theano-users] Error using floatX = float16 to save memory

2016-07-21 Thread luca . wagner . 0812
Frederic,
I had to reinstall theano  theano updated version + gpuarray and pygpu.
If the flags are:
floatX = float16
device=cuda

the convnet starts without errors:

Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 18:08:32) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
Using cuDNN version 5005 on context None
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
Disabling C code for Alloc due to unsupported float16
Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
Disabling C code for IncDiagonalSubtensor due to unsupported float16
Disabling C code for DiagonalSubtensor{inplace} due to unsupported float16
Disabling C code for MaxAndArgmax due to unsupported float16


start time:
21/07/2016
14:32:07


images for training: 594
images for validation: 82
epochs: 200


... training neural network 13


training @ iter =  0
training @ iter =  200
training @ iter =  400


training cost 0.69336
epoch 1, training batch 594/594,validation error 45.122 %
training @ iter =  600
..


but if I put instead
floatX = float32
device=gpu

the error is:

Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 18:08:32) 
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
Anaconda is brought to you by Continuum Analytics.
Please check out: http://continuum.io/thanks and https://anaconda.org
>>> 
runfile('/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py',
 
wdir='/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core')
Mapped name None to device cuda: GeForce 840M
Using cuDNN version 5005 on context None
Using gpu device 0: GeForce 840M (CNMeM is disabled, cuDNN 5005)
/home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
UserWarning: downsample module has been moved to the 
theano.tensor.signal.pool module.
  "downsample module has been moved to the theano.tensor.signal.pool 
module.")
ERROR (theano.gof.opt): Optimization failure due to: 
LocalOptGroup(local_abstractconv_cudnn,local_conv_dnn,local_abstractconv_gemm,local_abstractconv_gradinputs_gemm,local_abstractconv_gradweight_gemm,local_conv_gemm)
ERROR (theano.gof.opt): node: AbstractConv2d{border_mode='valid', 
subsample=(1, 1), filter_flip=True, imshp=(20, 1, 20, 20), kshp=(100, 1, 5, 
5), filter_dilation=(1, 1)}(GpuFromHost.0, GpuReshape{4}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1820, in 
process_node
replacements = lopt.transform(node)
  File "/home/luca/data/Theano-master/theano/gof/opt.py", line 1265, in 
transform
repl = opt.transform(node)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
3149, in local_abstractconv_cudnn
conv_mode=conv_mode)
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
1181, in dnn_conv
conv_mode=conv_mode, precision=precision)(img.shape,
  File "/home/luca/data/Theano-master/theano/sandbox/cuda/dnn.py", line 
180, in __init__
assert precision in ['float16', 'float32', 'float64']
AssertionError

Traceback (most recent call last):
  File "", line 1, in 
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 714, in runfile
execfile(filename, namespace)
  File 
"/home/luca/anaconda2/lib/python2.7/site-packages/spyderlib/widgets/externalshell/sitecustomize.py",
 
line 81, in execfile
builtins.execfile(filename, *where)
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py",
 
line 124, in 
run_experiments()
  File 
"/home/luca/data/DeepLearningTutorials/Theano-3D-ConvNet-master/convnet3d/core/run_multi_conv.py",
 
line 83, in run_experiments
Pretrained = False
  File "mpr_convnet_class.py", line 291, in __init__
train_model = theano.function([x,y],cost, 
updates=updates) 
  File 

Re: [theano-users] Error using floatX = float16 to save memory

2016-07-21 Thread luca . wagner . 0812
Frederic,
In ops.py  I can't find shape_i_op
thanks

On Thursday, July 21, 2016 at 11:50:51 AM UTC+2, luca.wag...@gmail.com 
wrote:
>
> Frederic,
> this is the feedback afterl the upgrades about float 16.
>
> Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 18:08:32) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> import run_multi_conv
> Traceback (most recent call last):
>   File "", line 1, in 
>   File "run_multi_conv.py", line 1, in 
> import mpr_convnet_class as conv
>   File "mpr_convnet_class.py", line 2, in 
> from convnet3d import ConvLayer, PoolLayer
>   File "convnet3d.py", line 3, in 
> from theano.tensor.nnet.conv3d2d import conv3d
>   File "/home/luca/data/Theano-master/theano/__init__.py", line 125, in 
> 
> import theano.gpuarray
>   File "/home/luca/data/Theano-master/theano/gpuarray/__init__.py", line 
> 31, in 
> from . import fft, dnn, opt, nerv, extra_ops
>   File "/home/luca/data/Theano-master/theano/gpuarray/dnn.py", line 17, in 
> 
> from theano.compile.ops import shape_i, shape_i_op
> ImportError: cannot import name shape_i_op
> >>> 
>
>
>
>
>
> On Thursday, July 21, 2016 at 11:15:06 AM UTC+2, luca.wag...@gmail.com 
> wrote:
>>
>> Frederic,
>> I'll do it and give you a feedback,
>> many thanks
>> Luca
>>
>> On Tuesday, July 19, 2016 at 10:09:21 PM UTC+2, nouiz wrote:
>>>
>>> We have a PR that upgrade some stuff about float16:
>>>
>>> https://github.com/Theano/Theano/pull/4764/files
>>>
>>> It probably fix your problem. Can you try it to confirm that you don't 
>>> have a different problem?
>>>
>>> thanks
>>>
>>> Frédéric
>>>
>>> On Fri, Jul 15, 2016 at 4:55 AM,  wrote:
>>>
 ok I try.
 thanks

 On Thursday, July 14, 2016 at 11:44:41 PM UTC+2, Arnaud Bergeron wrote:
>
> I can't reproduce your problem using a simple convolution in float16.
>
> Either this is because your code is doing something unexpected or 
> because the problem has been fixed in the development version.
>
> In nay case the development version is a much better option for the 
> new backend and float16 so I encourage you to upgrade and try again: 
> http://deeplearning.net/software/theano/install.html#bleeding-edge-install-instructions
> .
>
> 2016-07-14 4:22 GMT-04:00 :
>
>> Here is .theanorc:
>>
>> [global]
>> floatX = float16
>> device=cuda
>> [cuda] 
>> root = /usr/local/cuda-7.5
>>
>>
>> [nvcc]
>> fastmath=True
>>
>> optimizer = fast_compile
>>
>> On Thursday, July 14, 2016 at 10:19:56 AM UTC+2, 
>> luca.wag...@gmail.com wrote:
>>>
>>> Hi Arnaud,
>>> I put _f16_ok = True in dnn.py ( attached).
>>>
>>> This is the error I received:
>>>
>>> Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 
>>> 18:08:32) 
>>> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
>>> Type "help", "copyright", "credits" or "license" for more 
>>> information.
>>> Anaconda is brought to you by Continuum Analytics.
>>> Please check out: http://continuum.io/thanks and 
>>> https://anaconda.org
>>> >>> import run_multi_conv
>>>
>>> Mapped name None to device cuda: GeForce 840M
>>> WARNING (theano.gof.compilelock): Overriding existing lock by dead 
>>> process '3202' (I am process '3351')
>>> Using cuDNN version 5005 on context None
>>> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
>>> UserWarning: downsample module has been moved to the 
>>> theano.tensor.signal.pool module.
>>>   "downsample module has been moved to the theano.tensor.signal.pool 
>>> module.")
>>> >>> 
>>> >>> run_multi_conv.run_experiments()
>>> Disabling C code for Elemwise{mul,no_inplace} due to unsupported 
>>> float16
>>> Disabling C code for Elemwise{Cast{float32}} due to unsupported 
>>> float16
>>> Disabling C code for Elemwise{Cast{float16}} due to unsupported 
>>> float16
>>> Disabling C code for Elemwise{Cast{float16}} due to unsupported 
>>> float16
>>> Disabling C code for Alloc due to unsupported float16
>>> Disabling C code for Cast{float16} due to unsupported float16
>>> Disabling C code for Cast{float16} due to unsupported float16
>>> Disabling C code for Cast{float16} due to unsupported float16
>>> Disabling C code for Cast{float16} due to unsupported float16
>>> Disabling C code for RandomFunction{binomial} due to unsupported 
>>> float16
>>> Disabling C code for RandomFunction{binomial} due to unsupported 
>>> float16
>>> ===
>>> 1#include 
>>> 2#include 
>>> 3

Re: [theano-users] Error using floatX = float16 to save memory

2016-07-21 Thread luca . wagner . 0812
Frederic,
I'll do it and give you a feedback,
many thanks
Luca

On Tuesday, July 19, 2016 at 10:09:21 PM UTC+2, nouiz wrote:
>
> We have a PR that upgrade some stuff about float16:
>
> https://github.com/Theano/Theano/pull/4764/files
>
> It probably fix your problem. Can you try it to confirm that you don't 
> have a different problem?
>
> thanks
>
> Frédéric
>
> On Fri, Jul 15, 2016 at 4:55 AM,  
> wrote:
>
>> ok I try.
>> thanks
>>
>> On Thursday, July 14, 2016 at 11:44:41 PM UTC+2, Arnaud Bergeron wrote:
>>>
>>> I can't reproduce your problem using a simple convolution in float16.
>>>
>>> Either this is because your code is doing something unexpected or 
>>> because the problem has been fixed in the development version.
>>>
>>> In nay case the development version is a much better option for the new 
>>> backend and float16 so I encourage you to upgrade and try again: 
>>> http://deeplearning.net/software/theano/install.html#bleeding-edge-install-instructions
>>> .
>>>
>>> 2016-07-14 4:22 GMT-04:00 :
>>>
 Here is .theanorc:

 [global]
 floatX = float16
 device=cuda
 [cuda] 
 root = /usr/local/cuda-7.5


 [nvcc]
 fastmath=True

 optimizer = fast_compile

 On Thursday, July 14, 2016 at 10:19:56 AM UTC+2, luca.wag...@gmail.com 
 wrote:
>
> Hi Arnaud,
> I put _f16_ok = True in dnn.py ( attached).
>
> This is the error I received:
>
> Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015, 
> 18:08:32) 
> [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
> Type "help", "copyright", "credits" or "license" for more information.
> Anaconda is brought to you by Continuum Analytics.
> Please check out: http://continuum.io/thanks and https://anaconda.org
> >>> import run_multi_conv
>
> Mapped name None to device cuda: GeForce 840M
> WARNING (theano.gof.compilelock): Overriding existing lock by dead 
> process '3202' (I am process '3351')
> Using cuDNN version 5005 on context None
> /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6: 
> UserWarning: downsample module has been moved to the 
> theano.tensor.signal.pool module.
>   "downsample module has been moved to the theano.tensor.signal.pool 
> module.")
> >>> 
> >>> run_multi_conv.run_experiments()
> Disabling C code for Elemwise{mul,no_inplace} due to unsupported 
> float16
> Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
> Disabling C code for Alloc due to unsupported float16
> Disabling C code for Cast{float16} due to unsupported float16
> Disabling C code for Cast{float16} due to unsupported float16
> Disabling C code for Cast{float16} due to unsupported float16
> Disabling C code for Cast{float16} due to unsupported float16
> Disabling C code for RandomFunction{binomial} due to unsupported 
> float16
> Disabling C code for RandomFunction{binomial} due to unsupported 
> float16
> ===
> 1#include 
> 2#include 
> 3#include "theano_mod_helper.h"
> 4#include 
> 5#include 
> 6#include 
> 7#include 
> 8#include 
> 9#include 
> 00010#include 
> 00011#include 
> 00012#include 
> 00013#include "cudnn.h"
> 00014#include "cudnn_helper.h"
> 00015#include "gpuarray_helper.h"
> 00016#include "gpuarray/types.h"
> 00017#include "gpuarray/array.h"
> 00018#include "gpuarray/util.h"
> 00019#include "gpuarray/ext_cuda.h"
> 00020#include "gpuarray_api.h"
> 00021#include "numpy_compat.h"
> 00022//
> 00023  Support Code
> 00024//
> 00025
> 00026
> 00027
> 00028static int
> 00029c_set_tensorNd(PyGpuArrayObject *var, cudnnTensorDescriptor_t 
> desc) {
> 00030  cudnnDataType_t dt;
> 00031  size_t ds;
> 00032  switch (var->ga.typecode) {
> 00033  case GA_FLOAT:
> 00034dt = CUDNN_DATA_FLOAT;
> 00035break;
> 00036  case GA_DOUBLE:
> 00037dt = CUDNN_DATA_DOUBLE;
> 00038break;
> 00039#if CUDNN_VERSION > 3000
> 00040  case GA_HALF:
> 00041dt = CUDNN_DATA_HALF;
> 00042break;
> 00043#endif
> 00044  default:
> 00045PyErr_SetString(PyExc_TypeError, "Non-float datatype in 
> c_set_tensorNd");
> 00046return -1;
> 00047  }
> 00048  ds = gpuarray_get_elsize(var->ga.typecode);
> 00049
> 00050  int 

Re: [theano-users] Error using floatX = float16 to save memory

2016-07-19 Thread Frédéric Bastien
We have a PR that upgrade some stuff about float16:

https://github.com/Theano/Theano/pull/4764/files

It probably fix your problem. Can you try it to confirm that you don't have
a different problem?

thanks

Frédéric

On Fri, Jul 15, 2016 at 4:55 AM,  wrote:

> ok I try.
> thanks
>
> On Thursday, July 14, 2016 at 11:44:41 PM UTC+2, Arnaud Bergeron wrote:
>>
>> I can't reproduce your problem using a simple convolution in float16.
>>
>> Either this is because your code is doing something unexpected or because
>> the problem has been fixed in the development version.
>>
>> In nay case the development version is a much better option for the new
>> backend and float16 so I encourage you to upgrade and try again:
>> http://deeplearning.net/software/theano/install.html#bleeding-edge-install-instructions
>> .
>>
>> 2016-07-14 4:22 GMT-04:00 :
>>
>>> Here is .theanorc:
>>>
>>> [global]
>>> floatX = float16
>>> device=cuda
>>> [cuda]
>>> root = /usr/local/cuda-7.5
>>>
>>>
>>> [nvcc]
>>> fastmath=True
>>>
>>> optimizer = fast_compile
>>>
>>> On Thursday, July 14, 2016 at 10:19:56 AM UTC+2, luca.wag...@gmail.com
>>> wrote:

 Hi Arnaud,
 I put _f16_ok = True in dnn.py ( attached).

 This is the error I received:

 Python 2.7.11 |Anaconda custom (64-bit)| (default, Dec  6 2015,
 18:08:32)
 [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux2
 Type "help", "copyright", "credits" or "license" for more information.
 Anaconda is brought to you by Continuum Analytics.
 Please check out: http://continuum.io/thanks and https://anaconda.org
 >>> import run_multi_conv

 Mapped name None to device cuda: GeForce 840M
 WARNING (theano.gof.compilelock): Overriding existing lock by dead
 process '3202' (I am process '3351')
 Using cuDNN version 5005 on context None
 /home/luca/data/Theano-master/theano/tensor/signal/downsample.py:6:
 UserWarning: downsample module has been moved to the
 theano.tensor.signal.pool module.
   "downsample module has been moved to the theano.tensor.signal.pool
 module.")
 >>>
 >>> run_multi_conv.run_experiments()
 Disabling C code for Elemwise{mul,no_inplace} due to unsupported float16
 Disabling C code for Elemwise{Cast{float32}} due to unsupported float16
 Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
 Disabling C code for Elemwise{Cast{float16}} due to unsupported float16
 Disabling C code for Alloc due to unsupported float16
 Disabling C code for Cast{float16} due to unsupported float16
 Disabling C code for Cast{float16} due to unsupported float16
 Disabling C code for Cast{float16} due to unsupported float16
 Disabling C code for Cast{float16} due to unsupported float16
 Disabling C code for RandomFunction{binomial} due to unsupported float16
 Disabling C code for RandomFunction{binomial} due to unsupported float16
 ===
 1#include 
 2#include 
 3#include "theano_mod_helper.h"
 4#include 
 5#include 
 6#include 
 7#include 
 8#include 
 9#include 
 00010#include 
 00011#include 
 00012#include 
 00013#include "cudnn.h"
 00014#include "cudnn_helper.h"
 00015#include "gpuarray_helper.h"
 00016#include "gpuarray/types.h"
 00017#include "gpuarray/array.h"
 00018#include "gpuarray/util.h"
 00019#include "gpuarray/ext_cuda.h"
 00020#include "gpuarray_api.h"
 00021#include "numpy_compat.h"
 00022//
 00023  Support Code
 00024//
 00025
 00026
 00027
 00028static int
 00029c_set_tensorNd(PyGpuArrayObject *var, cudnnTensorDescriptor_t
 desc) {
 00030  cudnnDataType_t dt;
 00031  size_t ds;
 00032  switch (var->ga.typecode) {
 00033  case GA_FLOAT:
 00034dt = CUDNN_DATA_FLOAT;
 00035break;
 00036  case GA_DOUBLE:
 00037dt = CUDNN_DATA_DOUBLE;
 00038break;
 00039#if CUDNN_VERSION > 3000
 00040  case GA_HALF:
 00041dt = CUDNN_DATA_HALF;
 00042break;
 00043#endif
 00044  default:
 00045PyErr_SetString(PyExc_TypeError, "Non-float datatype in
 c_set_tensorNd");
 00046return -1;
 00047  }
 00048  ds = gpuarray_get_elsize(var->ga.typecode);
 00049
 00050  int strs[5], dims[5], default_stride = 1;
 00051  unsigned int nd = PyGpuArray_NDIM(var);
 00052
 00053  if (nd > 5) {
 00054PyErr_SetString(PyExc_TypeError, "Tensor of more than 5d");
 00055return -1;
 00056  }
 00057
 00058  for (unsigned int _i = nd; _i > 0;