I've fixed problem with Python 3 str vs. bytearray. You can find my changes at git://github.com/rybaktomasz/pycuda.git in branch python3
There is new problem under Python 3, see log below.
I do not know for now how to fix it.
Best regards.
_______________________ TestGPUArray.test_divide_scalar
________________________
args = (<test_gpuarray.TestGPUArray object at 0x2f8ad10>,), kwargs = {}
pycuda = <module 'pycuda' from
'/usr/lib/python3/dist-packages/pycuda/__init__.py'>
ctx = <pycuda._driver.Context object at 0x2d45de8>
clear_context_caches = <function clear_context_caches at 0x2715c88>
collect = <built-in function collect>
def f(*args, **kwargs):
import pycuda.driver
# appears to be idempotent, i.e. no harm in calling it more than
once
pycuda.driver.init()
ctx = make_default_context()
try:
assert isinstance(ctx.get_device().name(), str)
assert isinstance(ctx.get_device().compute_capability(),
tuple)
assert isinstance(ctx.get_device().get_attributes(), dict)
> inner_f(*args, **kwargs)
/usr/lib/python3/dist-packages/pycuda/tools.py:432:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _
self = <test_gpuarray.TestGPUArray object at 0x2f8ad10>
@mark_cuda_test
def test_divide_scalar(self):
"""Test the division of an array and a scalar."""
a = np.array([1,2,3,4,5,6,7,8,9,10]).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
> result = (a_gpu/2).get()
test_gpuarray.py:208:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _
self = array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10.], dtype=float32)
other = 2
def __div__(self, other):
"""Divides an array by an array or a scalar::
x = self / n
"""
if isinstance(other, GPUArray):
result = self._new_like_me(_get_common_dtype(self, other))
return self._div(other, result)
else:
if other == 1:
return self
else:
# create a new array for the result
result = self._new_like_me(_get_common_dtype(self,
other))
> return self._axpbz(1/other, 0, result)
/usr/lib/python3/dist-packages/pycuda/gpuarray.py:474:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _
self = array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10.], dtype=float32)
selffac = 0.5, other = 0
out = array([ 2.00000000e+00, 1.00000000e+01, -4.98687756e+36,
1.32095984e+05, 3.94565056e-34, 1.65436123e-24,
-4.36901452e+36, 1.31072000e+05, -2.67645790e+36,
7.10542736e-15], dtype=float32)
stream = None
def _axpbz(self, selffac, other, out, stream=None):
"""Compute ``out = selffac * self + other``, where `other` is a
scalar."""
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
func = elementwise.get_axpbz_kernel(self.dtype,out.dtype)
func.prepared_async_call(self._grid, self._block, stream,
selffac, self.gpudata,
> other, out.gpudata, self.mem_size)
/usr/lib/python3/dist-packages/pycuda/gpuarray.py:319:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
_ _ _ _
func = <pycuda._driver.Function object at 0x2d65398>, grid = (1, 1)
block = (32, 1, 1), stream = None
args = (0.5, <pycuda._driver.DeviceAllocation object at 0x2d50520>, 0,
<pycuda._driver.DeviceAllocation object at 0x2d50600>, 10)
kwargs = {}, shared_size = 0, pack = <built-in function pack>
def function_prepared_async_call(func, grid, block, stream, *args,
**kwargs):
if isinstance(block, tuple):
func._set_block_shape(*block)
else:
from warnings import warn
warn("Not passing the block size to prepared_async_call is
deprecated as of "
"version 2011.1.", DeprecationWarning, stacklevel=2)
args = (stream,) + args
stream = block
shared_size = kwargs.pop("shared_size", 0)
if kwargs:
raise TypeError("unknown keyword arguments: "
+ ", ".join(iter(kwargs.keys())))
from pycuda._pvt_struct import pack
> arg_buf = pack(func.arg_format, *args)
E struct.error: required argument is not an integer
/usr/lib/python3/dist-packages/pycuda/driver.py:475: error
--
Tomasz Rybak GPG/PGP key ID: 2AD5 9860
Fingerprint A481 824E 7DD3 9C0E C40A 488E C654 FB33 2AD5 9860
http://member.acm.org/~tomaszrybak
signature.asc
Description: This is a digitally signed message part
_______________________________________________ PyCUDA mailing list [email protected] http://lists.tiker.net/listinfo/pycuda
