Skip to content

Commit

Permalink
Add 'stream_or_out' and 'out' and 'stream' kwargs to cumath unary fun…
Browse files Browse the repository at this point in the history
…ctions.
  • Loading branch information
untom committed Mar 6, 2014
1 parent a1cc472 commit dd3ff30
Show file tree
Hide file tree
Showing 2 changed files with 86 additions and 1 deletion.
20 changes: 19 additions & 1 deletion pycuda/cumath.py
@@ -1,9 +1,27 @@
import pycuda.gpuarray as gpuarray
import pycuda.elementwise as elementwise
import numpy as np
import warnings
from pycuda.driver import Stream


def _make_unary_array_func(name):
def f(array, out=None, stream=None):
def f(array, stream_or_out=None, **kwargs):

if stream_or_out is not None:
warnings.warn("please use 'out' or 'stream' keyword arguments", DeprecationWarning)
if isinstance(stream_or_out, Stream):
stream = stream_or_out
out = None
else:
stream = None
out = stream_or_out

out, stream = None, None
if 'out' in kwargs:
out = kwargs['out']
if 'stream' in kwargs:
stream = kwargs['stream']

if array.dtype == np.float32:
func_name = name + "f"
Expand Down
67 changes: 67 additions & 0 deletions test/test_cumath.py
Expand Up @@ -165,6 +165,73 @@ def test_frexp(self):
assert sig_true == significands[i]
assert ex_true == exponents[i]

@mark_cuda_test
def test_unary_func_kwargs(self):
"""tests if the kwargs to the unary functions work"""
from pycuda.driver import Stream

name, a, b, threshold = ("exp", -3, 3, 1e-5)
gpu_func = getattr(cumath, name)
cpu_func = getattr(np, numpy_func_names.get(name, name))
for s in sizes:
for dtype in dtypes:
np.random.seed(1)
A = (np.random.random(s)*(b-a) + a).astype(dtype)
if complex:
A += (np.random.random(s)*(b-a) + a)*1j

np.random.seed(1)
A = (np.random.random(s)*(b-a) + a).astype(dtype)
args = gpuarray.to_gpu(A)

# 'out' kw
gpu_results = gpuarray.empty_like(args)
gpu_results = gpu_func(args, out=gpu_results).get()
cpu_results = cpu_func(A)
max_err = np.max(np.abs(cpu_results - gpu_results))
assert (max_err <= threshold).all(), (max_err, name, dtype)

# 'out' position
gpu_results = gpuarray.empty_like(args)
gpu_results = gpu_func(args, gpu_results).get()
cpu_results = cpu_func(A)
max_err = np.max(np.abs(cpu_results - gpu_results))
assert (max_err <= threshold).all(), (max_err, name, dtype)

# 'stream' kw
mystream = Stream()
np.random.seed(1)
A = (np.random.random(s)*(b-a) + a).astype(dtype)
args = gpuarray.to_gpu(A)
gpu_results = gpuarray.empty_like(args)
gpu_results = gpu_func(args, stream=mystream).get()
cpu_results = cpu_func(A)
max_err = np.max(np.abs(cpu_results - gpu_results))
assert (max_err <= threshold).all(), (max_err, name, dtype)

# 'stream' position
mystream = Stream()
np.random.seed(1)
A = (np.random.random(s)*(b-a) + a).astype(dtype)
args = gpuarray.to_gpu(A)
gpu_results = gpuarray.empty_like(args)
gpu_results = gpu_func(args, mystream).get()
cpu_results = cpu_func(A)
max_err = np.max(np.abs(cpu_results - gpu_results))
assert (max_err <= threshold).all(), (max_err, name, dtype)

# 'out' and 'stream' kw
mystream = Stream()
np.random.seed(1)
A = (np.random.random(s)*(b-a) + a).astype(dtype)
args = gpuarray.to_gpu(A)
gpu_results = gpuarray.empty_like(args)
gpu_results = gpu_func(args, stream=mystream, out=gpu_results).get()
cpu_results = cpu_func(A)
max_err = np.max(np.abs(cpu_results - gpu_results))
assert (max_err <= threshold).all(), (max_err, name, dtype)


if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pycuda.autoinit # noqa
Expand Down

0 comments on commit dd3ff30

Please sign in to comment.