Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
davidweichiang committed Jul 11, 2015
2 parents a6551ff + 8a764bf commit 60648a2
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 12 deletions.
8 changes: 6 additions & 2 deletions doc/source/array.rst
Expand Up @@ -120,11 +120,15 @@ The :class:`GPUArray` Array Class
.. method :: get(ary=None, pagelocked=False)
Transfer the contents of *self* into *ary* or a newly allocated
:mod:`numpy.ndarray`. If *ary* is given, it must have the right
size (not necessarily shape) and dtype. If it is not given,
:mod:`numpy.ndarray`. If *ary* is given, it must have the same
shape and dtype. If it is not given,
a *pagelocked* specifies whether the new array is allocated
page-locked.
.. versionchanged:: 2015.2
*ary* with different shape was deprecated.
.. method :: get_async(stream=None, ary=None)
Transfer the contents of *self* into *ary* or a newly allocated
Expand Down
36 changes: 28 additions & 8 deletions pycuda/gpuarray.py
@@ -1,5 +1,5 @@
from __future__ import division
from __future__ import absolute_import
from __future__ import division, absolute_import

import numpy as np
import pycuda.elementwise as elementwise
from pytools import memoize, memoize_method
Expand All @@ -12,9 +12,7 @@
get_common_dtype as _get_common_dtype_base)
from pycuda.characterize import has_double_support
import six
from six.moves import range
from six.moves import zip
from functools import reduce
from six.moves import range, zip, reduce


def _get_common_dtype(obj1, obj2):
Expand Down Expand Up @@ -267,6 +265,12 @@ def get(self, ary=None, pagelocked=False):
assert ary.dtype == self.dtype
assert ary.flags.forc

if self.shape != ary.shape:
from warnings import warn
warn("get() between arrays of different shape is deprecated "
"and will be removed in PyCUDA 2017.x",
DeprecationWarning, stacklevel=2)

assert self.flags.forc, "Array in get() must be contiguous"

if self.size:
Expand Down Expand Up @@ -298,6 +302,12 @@ def get_async(self, stream=None, ary=None):
assert ary.dtype == self.dtype
assert ary.flags.forc

if self.shape != ary.shape:
from warnings import warn
warn("get() between arrays of different shape is deprecated "
"and will be removed in PyCUDA 2017.x",
DeprecationWarning, stacklevel=2)

assert self.flags.forc, "Array in get() must be contiguous"

if self.size:
Expand Down Expand Up @@ -787,14 +797,16 @@ def transpose(self, axes=None):
according to the values given.
:returns: :class:`GPUArray` A view of the array with its axes permuted.
.. versionadded:: 2015.2
"""

if axes is None:
axes = range(self.ndim-1, -1, -1)
if len(axes) != len(self.shape):
raise ValueError("axes don't match array")
new_shape = [self.shape[axes[i]] for i in xrange(len(axes))]
new_strides = [self.strides[axes[i]] for i in xrange(len(axes))]
new_shape = [self.shape[axes[i]] for i in range(len(axes))]
new_strides = [self.strides[axes[i]] for i in range(len(axes))]
return GPUArray(shape=tuple(new_shape),
dtype=self.dtype,
allocator=self.allocator,
Expand All @@ -804,6 +816,9 @@ def transpose(self, axes=None):

@property
def T(self): # noqa
"""
.. versionadded:: 2015.2
"""
return self.transpose()

# {{{ slicing
Expand Down Expand Up @@ -1407,12 +1422,17 @@ def transpose(a, axes=None):
according to the values given.
:returns: :class:`GPUArray` A view of the array with its axes permuted.
.. versionadded:: 2015.2
"""
return a.transpose(axes)


def reshape(a, shape):
"""Gives a new shape to an array without changing its data."""
"""Gives a new shape to an array without changing its data.
.. versionadded:: 2015.2
"""

return a.reshape(shape)

Expand Down
4 changes: 2 additions & 2 deletions test/test_gpuarray.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import
from __future__ import print_function
#! /usr/bin/env python

from __future__ import absolute_import, print_function
import numpy as np
import numpy.linalg as la
import sys
Expand Down

0 comments on commit 60648a2

Please sign in to comment.