Merge pull request #6728 from gerritholl/structured_multidim_masked_array_fillvalue
BUG/TST: Fix for #6723 including test: force fill_value.ndim==0
diff --git a/.travis.yml b/.travis.yml
index 2447360..e0887a8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -29,16 +29,18 @@
- 3.5
matrix:
include:
- - python: 3.3
- env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=3.4
+ - python: 2.7
+ env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=2.7
sudo: true
+ dist: trusty
addons:
apt:
packages:
- - *common_packages
- debootstrap
- - python: 3.2
+ - python: 3.4
env: USE_DEBUG=1
+ sudo: true
+ dist: trusty
addons:
apt:
packages:
@@ -48,10 +50,20 @@
- python3-nose
- python: 2.7
env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1
+ - python: 3.5
+ env:
+ - USE_WHEEL=1
+ - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy
+ # The following is generated with the command:
+ # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY
+ - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\
+ XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\
+ iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\
+ ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU="
- python: 2.7
- env: USE_WHEEL=1
- - python: 2.7
- env: PYTHONOPTIMIZE=2
+ env:
+ - PYTHONOPTIMIZE=2
+ - USE_ASV=1
before_install:
- uname -a
- free -m
@@ -69,7 +81,11 @@
# pip install coverage
# Speed up install by not compiling Cython
- pip install --install-option="--no-cython-compile" Cython
+ - if [ -n "$USE_ASV" ]; then pip install asv; fi
- popd
script:
- ./tools/travis-test.sh
+
+after_success:
+ - ./tools/travis-upload-wheel.sh
diff --git a/LICENSE.txt b/LICENSE.txt
index b4139af..9014534 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2005-2015, NumPy Developers.
+Copyright (c) 2005-2016, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6031279
--- /dev/null
+++ b/README.md
@@ -0,0 +1,24 @@
+[](https://travis-ci.org/numpy/numpy)
+
+NumPy is the fundamental package needed for scientific computing with Python.
+This package contains:
+
+ * a powerful N-dimensional array object
+ * sophisticated (broadcasting) functions
+ * tools for integrating C/C++ and Fortran code
+ * useful linear algebra, Fourier transform, and random number capabilities.
+
+It derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by numarray and can be used to replace numarray.
+
+More information can be found at the website:
+
+* http://www.numpy.org
+
+After installation, tests can be run (if ``nose`` is installed) with:
+
+ python -c 'import numpy; numpy.test()'
+
+The most current development version is always available from our
+git repository:
+
+* http://github.com/numpy/numpy
diff --git a/README.txt b/README.txt
deleted file mode 100644
index a20163a..0000000
--- a/README.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-NumPy is the fundamental package needed for scientific computing with Python.
-This package contains:
-
- * a powerful N-dimensional array object
- * sophisticated (broadcasting) functions
- * tools for integrating C/C++ and Fortran code
- * useful linear algebra, Fourier transform, and random number capabilities.
-
-It derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by numarray and can be used to replace numarray.
-
-More information can be found at the website:
-
-http://www.numpy.org
-
-After installation, tests can be run with:
-
-python -c 'import numpy; numpy.test()'
-
-The most current development version is always available from our
-git repository:
-
-http://github.com/numpy/numpy
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..e9467e0
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,25 @@
+skip_tags: true
+clone_depth: 1
+
+os: Visual Studio 2015
+
+environment:
+ matrix:
+ - PY_MAJOR_VER: 2
+ PYTHON_ARCH: "x86"
+ - PY_MAJOR_VER: 3
+ PYTHON_ARCH: "x86_64"
+ - PY_MAJOR_VER: 3
+ PYTHON_ARCH: "x86"
+
+build_script:
+ - ps: Start-FileDownload "https://repo.continuum.io/miniconda/Miniconda$env:PY_MAJOR_VER-latest-Windows-$env:PYTHON_ARCH.exe" C:\Miniconda.exe; echo "Finished downloading miniconda"
+ - cmd: C:\Miniconda.exe /S /D=C:\Py
+ - SET PATH=C:\Py;C:\Py\Scripts;C:\Py\Library\bin;%PATH%
+ - conda config --set always_yes yes
+ - conda update conda
+ - conda install cython nose
+ - pip install . -vvv
+
+test_script:
+ - python runtests.py -v -n
diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py
index d6dc4ed..3e5a2ee 100644
--- a/benchmarks/benchmarks/bench_indexing.py
+++ b/benchmarks/benchmarks/bench_indexing.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_, indexes_, indexes_rand_
+from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
import sys
import six
@@ -17,10 +17,10 @@
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
- ns = {'squares_': squares_,
+ ns = {'squares_': get_squares_(),
'np': np,
- 'indexes_': indexes_,
- 'indexes_rand_': indexes_rand_}
+ 'indexes_': get_indexes_(),
+ 'indexes_rand_': get_indexes_rand_()}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py
index 45cdf95..782d4ab 100644
--- a/benchmarks/benchmarks/bench_io.py
+++ b/benchmarks/benchmarks/bench_io.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares
+from .common import Benchmark, get_squares
import numpy as np
@@ -57,5 +57,8 @@
class Savez(Benchmark):
+ def setup(self):
+ self.squares = get_squares()
+
def time_vb_savez_squares(self):
- np.savez('tmp.npz', squares)
+ np.savez('tmp.npz', self.squares)
diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py
index c844cc7..3d26b80 100644
--- a/benchmarks/benchmarks/bench_linalg.py
+++ b/benchmarks/benchmarks/bench_linalg.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_, indexes_rand
+from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
import numpy as np
@@ -8,6 +8,9 @@
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
+ self.ac = self.a.copy()
+ self.at = self.a.T
+ self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
@@ -21,6 +24,24 @@
def time_dot_a_b(self):
np.dot(self.a, self.b)
+ def time_dot_trans_a_at(self):
+ np.dot(self.a, self.at)
+
+ def time_dot_trans_a_atc(self):
+ np.dot(self.a, self.atc)
+
+ def time_dot_trans_at_a(self):
+ np.dot(self.at, self.a)
+
+ def time_dot_trans_atc_a(self):
+ np.dot(self.atc, self.a)
+
+ def time_inner_trans_a_a(self):
+ np.inner(self.a, self.a)
+
+ def time_inner_trans_a_ac(self):
+ np.inner(self.a, self.ac)
+
def time_einsum_i_ij_j(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
@@ -36,7 +57,7 @@
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
- list(squares_.keys())]
+ TYPES1]
param_names = ['op', 'type']
def setup(self, op, typename):
@@ -46,10 +67,10 @@
if op == 'cholesky':
# we need a positive definite
- self.a = np.dot(squares_[typename],
- squares_[typename].T)
+ self.a = np.dot(get_squares_()[typename],
+ get_squares_()[typename].T)
else:
- self.a = squares_[typename]
+ self.a = get_squares_()[typename]
# check that dtype is supported at all
try:
@@ -63,8 +84,8 @@
class Lstsq(Benchmark):
def setup(self):
- self.a = squares_['float64']
- self.b = indexes_rand[:100].astype(np.float64)
+ self.a = get_squares_()['float64']
+ self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b)
diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py
index a3c3566..18444b9 100644
--- a/benchmarks/benchmarks/bench_random.py
+++ b/benchmarks/benchmarks/bench_random.py
@@ -3,6 +3,7 @@
from .common import Benchmark
import numpy as np
+from numpy.lib import NumpyVersion
class Random(Benchmark):
@@ -27,3 +28,40 @@
def time_100000(self):
np.random.shuffle(self.a)
+
+
+class Randint(Benchmark):
+
+ def time_randint_fast(self):
+ """Compare to uint32 below"""
+ np.random.randint(0, 2**30, size=10**5)
+
+ def time_randint_slow(self):
+ """Compare to uint32 below"""
+ np.random.randint(0, 2**30 + 1, size=10**5)
+
+
+class Randint_dtype(Benchmark):
+ high = {
+ 'bool': 1,
+ 'uint8': 2**7,
+ 'uint16': 2**15,
+ 'uint32': 2**31,
+ 'uint64': 2**63
+ }
+
+ param_names = ['dtype']
+ params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
+
+ def setup(self, name):
+ if NumpyVersion(np.__version__) < '1.11.0.dev0':
+ raise NotImplementedError
+
+ def time_randint_fast(self, name):
+ high = self.high[name]
+ np.random.randint(0, high, size=10**5, dtype=name)
+
+ def time_randint_slow(self, name):
+ high = self.high[name]
+ np.random.randint(0, high + 1, size=10**5, dtype=name)
+
diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py
index a810e82..7040235 100644
--- a/benchmarks/benchmarks/bench_reduce.py
+++ b/benchmarks/benchmarks/bench_reduce.py
@@ -1,16 +1,19 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, TYPES1, squares
+from .common import Benchmark, TYPES1, get_squares
import numpy as np
class AddReduce(Benchmark):
+ def setup(self):
+ self.squares = get_squares().values()
+
def time_axis_0(self):
- [np.add.reduce(a, axis=0) for a in squares.values()]
+ [np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
- [np.add.reduce(a, axis=1) for a in squares.values()]
+ [np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
@@ -18,7 +21,7 @@
param_names = ['axis', 'type']
def setup(self, axis, typename):
- self.a = squares[typename]
+ self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 7946ccf..8f821ce 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_
+from .common import Benchmark, get_squares_
import numpy as np
@@ -39,7 +39,7 @@
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
- timeout = 2
+ timeout = 10
def setup(self, ufuncname):
np.seterr(all='ignore')
@@ -48,7 +48,7 @@
except AttributeError:
raise NotImplementedError()
self.args = []
- for t, a in squares_.items():
+ for t, a in get_squares_().items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py
index e98396b..066d4b1 100644
--- a/benchmarks/benchmarks/common.py
+++ b/benchmarks/benchmarks/common.py
@@ -25,40 +25,90 @@
'complex256',
]
+
+def memoize(func):
+ result = []
+ def wrapper():
+ if not result:
+ result.append(func())
+ return result[0]
+ return wrapper
+
+
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
-values = [random.uniform(0, 100) for x in range(nx*ny//10)]*10
-squares = {t: numpy.array(values,
- dtype=getattr(numpy, t)).reshape((nx, ny))
- for t in TYPES1}
+@memoize
+def get_values():
+ rnd = numpy.random.RandomState(1)
+ values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10)
+ return values
-# adjust complex ones to have non-degenerated imagery part -- use
-# original data transposed for that
-for t, v in squares.items():
- if t.startswith('complex'):
- v += v.T*1j
-# smaller squares
-squares_ = {t: s[:nxs, :nys] for t, s in squares.items()}
-# vectors
-vectors = {t: s[0] for t, s in squares.items()}
+@memoize
+def get_squares():
+ values = get_values()
+ squares = {t: numpy.array(values,
+ dtype=getattr(numpy, t)).reshape((nx, ny))
+ for t in TYPES1}
-indexes = list(range(nx))
-# so we do not have all items
-indexes.pop(5)
-indexes.pop(95)
+ # adjust complex ones to have non-degenerated imagery part -- use
+ # original data transposed for that
+ for t, v in squares.items():
+ if t.startswith('complex'):
+ v += v.T*1j
+ return squares
-indexes_rand = indexes[:] # copy
-random.shuffle(indexes_rand) # in-place shuffle
-# only now make them arrays
-indexes = numpy.array(indexes)
-indexes_rand = numpy.array(indexes_rand)
-# smaller versions
-indexes_ = indexes[indexes < nxs]
-indexes_rand_ = indexes_rand[indexes_rand < nxs]
+@memoize
+def get_squares_():
+ # smaller squares
+ squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()}
+ return squares_
+
+
+@memoize
+def get_vectors():
+ # vectors
+ vectors = {t: s[0] for t, s in get_squares().items()}
+ return vectors
+
+
+@memoize
+def get_indexes():
+ indexes = list(range(nx))
+ # so we do not have all items
+ indexes.pop(5)
+ indexes.pop(95)
+
+ indexes = numpy.array(indexes)
+ return indexes
+
+
+@memoize
+def get_indexes_rand():
+ rnd = random.Random(1)
+
+ indexes_rand = get_indexes().tolist() # copy
+ rnd.shuffle(indexes_rand) # in-place shuffle
+ indexes_rand = numpy.array(indexes_rand)
+ return indexes_rand
+
+
+@memoize
+def get_indexes_():
+ # smaller versions
+ indexes = get_indexes()
+ indexes_ = indexes[indexes < nxs]
+ return indexes_
+
+
+@memoize
+def get_indexes_rand_():
+ indexes_rand = get_indexes_rand()
+ indexes_rand_ = indexes_rand[indexes_rand < nxs]
+ return indexes_rand_
class Benchmark(object):
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index b77a6c2..ee05981 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -1,10 +1,12 @@
This file gives an overview of what is necessary to build binary releases for
-NumPy on OS X. Windows binaries are built here using Wine, they can of course
-also be built on Windows itself. Building OS X binaries on another platform is
-not possible.
+NumPy. Windows binaries are built here using Wine, they can of course also be
+built on Windows itself. Building OS X binaries on another platform is not
+possible, but our current OSX binary build procedure uses travis-ci virtual
+machines running OSX.
Current build and release info
==============================
+
The current info on building and releasing NumPy and SciPy is scattered in
several places. It should be summarized in one place, updated and where
necessary described in more detail. The sections below list all places where
@@ -34,24 +36,35 @@
---------------
* https://github.com/numpy/numpy-vendor
-
Supported platforms and versions
================================
-Python 2.6-2.7 and >=3.2 are the currently supported versions on all platforms.
+
+Python 2.6-2.7 and >=3.2 are the currently supported versions when building
+from source. We test numpy against all these versions every time we merge
+code to trunk. Binary installers may be available for a subset of these
+versions (see below).
OS X
----
-OS X versions >= 10.5 are supported. Note that there are currently still
-issues with compiling on 10.7, due to Apple moving to gcc-llvm.
-Only the Python from `python.org <http://python.org>`_ is supported. Binaries
-do *not* support Apple Python.
+
+Python 2.7 and >=3.3 are the versions for which we provide binary installers.
+OS X versions >= 10.6 are supported. We build binary wheels for OSX that are
+compatible with Python.org Python, system Python, homebrew and macports - see
+this `OSX wheel building summary
+<https://github.com/MacPython/wiki/wiki/Spinning-wheels>`_ for details.
Windows
-------
-Windows XP, Vista and 7 are supported.
+
+32-bit Python 2.7, 3.3, 3.4 are the versions for which we provide binary
+installers. Windows XP, Vista and 7 are supported. Our current windows mingw
+toolchain is not able to build 64-bit binaries of numpy. We are hoping to
+update to a `mingw-w64 toolchain
+<https://github.com/numpy/numpy/wiki/Mingw-w64-faq>`_ soon.
Linux
-----
+
Many distributions include NumPy. Building from source is also relatively
straightforward. Only tarballs are created for Linux, no specific binary
installers are provided (yet).
@@ -61,28 +74,24 @@
No binaries are provided, but succesful builds on Solaris and BSD have been
reported.
-
Tool chain
==========
+
Compilers
---------
+
The same gcc version is used as the one with which Python itself is built on
each platform. At the moment this means:
-* OS X uses gcc-4.0 (since that is what Python itself is built with) up to
- Python 2.6. Python 2.7 comes in two flavors; the 32-bit version is built with
- gcc-4.0 and the 64-bit version with gcc-4.2. The "release.sh" script
- sets environment variables to pick the right compiler.
- All binaries should be built on OS X 10.5, with the exception of the 64-bit
- Python 2.7 one which should be built on 10.6.
+* OS X builds on travis currently use `clang`. It appears that binary wheels
+ for OSX >= 10.6 can be safely built from from OSX 10.9 when building against
+ the Python from the Python.org installers.
* Windows builds use MinGW 3.4.5. Updating this to a more recent MinGW with
GCC 4.x is desired, but there are still practical difficulties in building
the binary installers.
-Cython is not needed for building the binaries, because generated C files from
-Cython sources are checked in at the moment. It is worth keeping an eye on what
-Cython versions have been used to generate all current C files, it should be
-the same and most recent version (0.16 as of now).
+You will need Cython for building the binaries. Cython compiles the ``.pyx``
+files in the numpy distribution to ``.c`` files.
Fortran: on OS X gfortran from `this site <http://r.research.att.com/tools/>`_
is used. On Windows g77 (included in MinGW) is the current default, in the future
@@ -93,13 +102,6 @@
* Python(s) from `python.org <http://python.org>`_
* virtualenv
* paver
-* bdist_mpkg from https://github.com/rgommers/bdist_mpkg (has a necessary
- patch, don't use the unsupported version on PyPi).
-
-Python itself should be installed multiple times - each version a binary is
-built for should be installed. The other dependencies only have to be installed
-for the default Python version on the system. The same applies to the doc-build
-dependencies below.
Building docs
-------------
@@ -113,7 +115,7 @@
For building Windows binaries on OS X Wine can be used. In Wine the following
needs to be installed:
-* Python 2.6-2.7 and 3.2
+* Python 2.6-2.7 and 3.3
* MakeNsis
* CpuId plugin for MakeNsis : this can be found in the NumPy source tree under
tools/win32build/cpucaps and has to be built with MinGW (see SConstruct file in
@@ -167,22 +169,27 @@
Binaries
--------
-Windows binaries in "superpack" form for Python 2.6/2.7/3.2/3.3.
-A superpack contains three builds, for SSE2, SSE3 and no SSE.
-OS X binaries are made in dmg format, targeting only the Python from
-`python.org <http://python.org>`_
+Windows binary installers in "superpack" form for Python 2.7/3.3/3.4. A
+superpack contains three builds, for SSE2, SSE3 and no SSE.
+Wheels
+------
+
+OSX wheels built via travis-ci : see - see `building OSX wheels`_.
+
+.. _build OSX wheels: https://github.com/MacPython/numpy-wheels
Other
-----
+
* Release Notes
* Changelog
Source distribution
-------------------
-A source release in both .zip and .tar.gz formats is released.
+We build source releases in both .zip and .tar.gz formats.
Release process
===============
@@ -200,6 +207,7 @@
--------------------------------------------------
::
+ git clean -fxd
python setup.py bdist
python setup.py sdist
@@ -270,8 +278,12 @@
Check the release notes
-----------------------
-Check that the release notes are up-to-date, and mention at least the
-following:
+Check that the release notes are up-to-date.
+
+Write or update the release notes in a file named for the release, such as
+``doc/release/1.11.0-notes.rst``.
+
+Mention at least the following:
- major new features
- deprecated and removed features
@@ -289,15 +301,55 @@
::
git co 1b2e1d63ff # gives warning about detached head
-Now, set ``release=True`` in setup.py, then
+First, change/check the following variables in ``pavement.py`` depending on the
+release version::
-::
+ RELEASE_NOTES = 'doc/release/1.7.0-notes.rst'
+ LOG_START = 'v1.6.0'
+ LOG_END = 'maintenance/1.7.x'
- git commit -m "REL: Release." setup.py
+Do any other changes. When you are ready to release, do the following
+changes::
+
+ diff --git a/setup.py b/setup.py
+ index b1f53e3..8b36dbe 100755
+ --- a/setup.py
+ +++ b/setup.py
+ @@ -57,7 +57,7 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-
+ MAJOR = 1
+ MINOR = 7
+ MICRO = 0
+ -ISRELEASED = False
+ +ISRELEASED = True
+ VERSION = '%d.%d.%drc1' % (MAJOR, MINOR, MICRO)
+
+ # Return the git revision as a string
+
+And make sure the ``VERSION`` variable is set properly.
+
+Now you can make the release commit and tag. We recommend you don't push
+the commit or tag immediately, just in case you need to do more cleanup. We
+prefer to defer the push of the tag until we're confident this is the exact
+form of the released code (see: :ref:`push-tag-and-commit`):
+
+ git commit -s -m "REL: Release." setup.py
git tag -s <version>
- git push origin <version>
-Note: ``git tag -s`` creates a signed tag - make sure your PGP key is public.
+The ``-s`` flag makes a PGP (usually GPG) signed tag. Please do sign the
+release tags.
+
+The release tag should have the release number in the annotation (tag
+message). Unfortunately the name of a tag can be changed without breaking the
+signature, the contents of the message cannot.
+
+See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing
+release tags, and http://keyring.debian.org/creating-key.html for instructions
+on creating a GPG key if you do not have one.
+
+To make your key more readily identifiable as you, consider sending your key
+to public keyservers, with a command such as::
+
+ gpg --send-keys <yourkeyid>
Apply patch to fix bogus strides
--------------------------------
@@ -314,8 +366,34 @@
Also create a new version hash in cversions.txt and a corresponding version
define NPY_x_y_API_VERSION in numpyconfig.h
+Trigger the OSX builds on travis
+--------------------------------
+
+See `build OSX wheels`_.
+
+You may need to check the ``.travis.yml`` file of the
+https://github.com/MacPython/numpy-wheels repository.
+
+Make sure that the releast tag has been pushed, and that the ``.travis.yml``
+is set thusly::
+
+ - NP_COMMIT=latest-tag # comment out to build version in submodule
+
+Trigger a build by doing an empty (or otherwise) commit to the repository::
+
+ cd /path/to/numpy-wheels
+ git commit --allow-empty
+ git push
+
+The wheels, once built, appear in http://wheels.scipy.org
+
Make the release
----------------
+
+Build the changelog and notes for upload with::
+
+ paver write_release_and_log
+
The tar-files and binary releases for distribution should be uploaded to SourceForge,
together with the Release Notes and the Changelog. Uploading can be done
through a web interface or, more efficiently, through scp/sftp/rsync as
@@ -327,19 +405,41 @@
Update PyPi
-----------
+
The final release (not betas or release candidates) should be uploaded to PyPi.
There are two ways to update PyPi, the first one is::
- $ python setup.py sdist upload
+ $ git clean -fxd # to be safe
+ $ python setup.py sdist --formats=gztar,zip # to check
+ # python setup.py sdist --formats=gztar,zip upload --sign
-and the second one is to upload the PKG_INFO file inside the sdist dir in the
+This will ask for your key PGP passphrase, in order to sign the built source
+packages.
+
+The second way is to upload the PKG_INFO file inside the sdist dir in the
web interface of PyPi. The source tarball can also be uploaded through this
-interface. A simple binary installer for windows, created with
-``bdist_wininst``, should also be uploaded to PyPi so ``easy_install numpy``
-works.
+interface.
+
+To push the travis-ci OSX wheels up to pypi see :
+https://github.com/MacPython/numpy-wheels#uploading-the-built-wheels-to-pypi
+
+.. _push-tag-and-commit:
+
+Push the release tag and commit
+-------------------------------
+
+Finally, now you are confident this tag correctly defines the source code that
+you released you can push the tag and release commit up to github::
+
+ git push # Push release commit
+ git push upstream <version> # Push tag named <version>
+
+where ``upstream`` points to the main https://github.com/numpy/numpy.git
+repository.
Update docs.scipy.org
---------------------
+
All documentation for a release can be updated on http://docs.scipy.org/ with:
make dist
@@ -361,11 +461,16 @@
Update scipy.org
----------------
+
A release announcement with a link to the download site should be placed in the
sidebar of the front page of scipy.org.
+The scipy.org should be a PR at https://github.com/scipy/scipy.org. The file
+that needs modification is ``www/index.rst``. Search for ``News``.
+
Announce to the lists
---------------------
+
The release should be announced on the mailing lists of
NumPy and SciPy, to python-announce, and possibly also those of
Matplotlib,IPython and/or Pygame.
@@ -374,6 +479,12 @@
several other libraries (SciPy/Matplotlib/Pygame) should be posted on the
mailing list.
+Announce to Linux Weekly News
+-----------------------------
+
+Email the editor of LWN to let them know of the release. Directions at:
+https://lwn.net/op/FAQ.lwn#contact
+
After the final release
-----------------------
After the final release is announced, a few administrative tasks are left to be
diff --git a/doc/neps/npy-format.rst b/doc/neps/npy-format.rst
index bf88c3f..3f12e1b 100644
--- a/doc/neps/npy-format.rst
+++ b/doc/neps/npy-format.rst
@@ -199,7 +199,7 @@
by multiplying the number of elements given by the shape (noting
that shape=() means there is 1 element) by dtype.itemsize.
-Format Specification: Version 1.0
+Format Specification: Version 2.0
---------------------------------
The version 1.0 format only allowed the array header to have a
diff --git a/doc/newdtype_example/floatint.c b/doc/newdtype_example/floatint.c
index cf698a7..0cc1983 100644
--- a/doc/newdtype_example/floatint.c
+++ b/doc/newdtype_example/floatint.c
@@ -1,10 +1,10 @@
#include "Python.h"
-#include "structmember.h" /* for offsetof macro if needed */
+#include "structmember.h" /* for offset of macro if needed */
#include "numpy/arrayobject.h"
-/* Use a Python float as the cannonical type being added
+/* Use a Python float as the canonical type being added
*/
typedef struct _floatint {
@@ -14,10 +14,10 @@
} PyFloatIntObject;
static PyTypeObject PyFloatInt_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
- "floatint.floatint", /*tp_name*/
- sizeof(PyFloatIntObject), /*tp_basicsize*/
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "floatint.floatint", /*tp_name*/
+ sizeof(PyFloatIntObject), /*tp_basicsize*/
};
static PyArray_ArrFuncs _PyFloatInt_Funcs;
@@ -45,17 +45,18 @@
static void
twoint_copyswap(void *dst, void *src, int swap, void *arr)
{
- if (src != NULL)
- memcpy(dst, src, sizeof(double));
-
+ if (src != NULL) {
+ memcpy(dst, src, sizeof(double));
+ }
+
if (swap) {
- register char *a, *b, c;
- a = (char *)dst;
- b = a + 7;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b = c;
+ register char *a, *b, c;
+ a = (char *)dst;
+ b = a + 7;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b-- = c;
+ c = *a; *a++ = *b; *b = c;
}
}
@@ -64,12 +65,11 @@
npy_int32 a[2];
if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) {
- a[0] = *((npy_int32 *)ip);
- a[1] = *((npy_int32 *)ip + 1);
+ a[0] = *((npy_int32 *)ip);
+ a[1] = *((npy_int32 *)ip + 1);
}
else {
- ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap),
- ap);
+ ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap), ap);
}
return Py_BuildValue("(ii)", a[0], a[1]);
}
@@ -79,17 +79,16 @@
npy_int32 a[2];
if (!PyTuple_Check(op)) {
- PyErr_SetString(PyExc_TypeError, "must be a tuple");
- return -1;
+ PyErr_SetString(PyExc_TypeError, "must be a tuple");
+ return -1;
}
if (!PyArg_ParseTuple(op, "ii", a, a+1)) return -1;
if (ap == NULL || PyArray_ISBEHAVED(ap)) {
- memcpy(ov, a, sizeof(double));
+ memcpy(ov, a, sizeof(double));
}
else {
- ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap),
- ap);
+ ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap), ap);
}
return 0;
}
@@ -145,7 +144,7 @@
dtype = _register_dtype();
Py_XINCREF(dtype);
if (dtype != NULL) {
- PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype);
+ PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype);
}
Py_INCREF(&PyFloatInt_Type);
PyDict_SetItemString(d, "floatint", (PyObject *)&PyFloatInt_Type);
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index e753707..35e967f 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -78,6 +78,12 @@
~~~~~~~~~~~~~~~~~~~~~~~
NPY_RELAXED_STRIDE_CHECKING is now true by default.
+UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was
+changed to false for back compatibility reasons. More time is needed before
+it can be made the default. As part of the roadmap a deprecation of
+dimension changing views of f_contiguous not c_contiguous arrays was also
+added.
+
Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now
diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst
index 70c9398..02e7564 100644
--- a/doc/release/1.10.2-notes.rst
+++ b/doc/release/1.10.2-notes.rst
@@ -10,24 +10,47 @@
Compatibility notes
===================
-fix swig bug in ``numpy.i``
+Relaxed stride checking is no longer the default
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+There were back compatibility problems involving views changing the dtype of
+multidimensional Fortran arrays that need to be dealt with over a longer
+timeframe.
+
+Fix swig bug in ``numpy.i``
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was
using PyArray_ISFORTRAN to check for Fortran contiguity instead of
PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the
updated numpy.i
+Deprecate views changing dimensions in fortran order
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This deprecates assignment of a new descriptor to the dtype attribute of
+a non-C-contiguous array if it result in changing the shape. This
+effectively bars viewing a multidimensional Fortran array using a dtype
+that changes the element size along the first axis.
+
+The reason for the deprecation is that, when relaxed strides checking is
+enabled, arrays that are both C and Fortran contiguous are always treated
+as C contiguous which breaks some code that depended the two being mutually
+exclusive for non-scalar arrays of ndim > 1. This deprecation prepares the
+way to always enable relaxed stride checking.
+
+
Issues Fixed
============
+* gh-6019 Masked array repr fails for structured array with multi-dimensional column.
* gh-6462 Median of empty array produces IndexError.
* gh-6467 Performance regression for record array access.
+* gh-6468 numpy.interp uses 'left' value even when x[0]==xp[0].
* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap.
* gh-6491 Error in broadcasting stride_tricks array.
* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran.
* gh-6497 Failure of reduce operation on recarrays.
* gh-6498 Mention change in default casting rule in 1.10 release notes.
* gh-6530 The partition function errors out on empty input.
+* gh-6532 numpy.inner return wrong inaccurate value sometimes.
* gh-6563 Intent(out) broken in recent versions of f2py.
* gh-6569 Cannot run tests after 'python setup.py build_ext -i'
* gh-6572 Error in broadcasting stride_tricks array component.
@@ -39,13 +62,23 @@
* gh-6636 Memory leak in nested dtypes in numpy.recarray
* gh-6641 Subsetting recarray by fields yields a structured array.
* gh-6667 ma.make_mask handles ma.nomask input incorrectly.
+* gh-6675 Optimized blas detection broken in master and 1.10.
+* gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex))
+* gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv.
+* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil.
+* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1
+* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing.
+* gh-6807 Windows testing errors for 1.10.2
+
Merged PRs
==========
-The following PRs in master have been backported to 1.10.2
+The following PRs have been merged into 1.10.2. When the PR is a backport,
+the PR number for the original PR against master is listed.
* gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest.
+* gh-6094 BUG: Fixed a bug with string representation of masked structured arrays.
* gh-6208 MAINT: Speedup field access by removing unneeded safety checks.
* gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure.
* gh-6470 BUG: Fix AttributeError in numpy distutils.
@@ -80,6 +113,23 @@
* gh-6643 ENH: make recarray.getitem return a recarray.
* gh-6653 BUG: Fix ma dot to always return masked array.
* gh-6668 BUG: ma.make_mask should always return nomask for nomask argument.
+* gh-6686 BUG: Fix a bug in assert_string_equal.
+* gh-6695 BUG: Fix removing tempdirs created during build.
+* gh-6697 MAINT: Fix spurious semicolon in macro definition of PyArray_FROM_OT.
+* gh-6698 TST: test np.rint bug for large integers.
+* gh-6717 BUG: Readd fallback CBLAS detection on linux.
+* gh-6721 BUG: Fix for #6719.
+* gh-6726 BUG: Fix bugs exposed by relaxed stride rollback.
+* gh-6757 BUG: link cblas library if cblas is detected.
+* gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718.
+* gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr.
+* gh-6775 MAINT: Include from __future__ boilerplate in some files missing it.
+* gh-6780 BUG: metadata is not copied to base_dtype.
+* gh-6783 BUG: Fix travis ci testing for new google infrastructure.
+* gh-6785 BUG: Quick and dirty fix for interp.
+* gh-6813 TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems.
+* gh-6817 BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint.
+* gh-6819 TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows.
Initial support for mingwpy was reverted as it was causing problems for
non-windows builds.
@@ -87,11 +137,19 @@
* gh-6536 BUG: Revert gh-5614 to fix non-windows build problems
A fix for np.lib.split was reverted because it resulted in "fixing"
-behavior will be present in the Numpy 1.11 and was already present in
-Numpy 1.9. See the discussion of the issue at gh-6575 for clarification.
+behavior that will be present in the Numpy 1.11 and that was already
+present in Numpy 1.9. See the discussion of the issue at gh-6575 for
+clarification.
* gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays.
+Relaxed stride checking was reverted. There were back compatibility
+problems involving views changing the dtype of multidimensional Fortran
+arrays that need to be dealt with over a longer timeframe.
+
+* gh-6735 MAINT: Make no relaxed stride checking the default for 1.10.
+
+
Notes
=====
A bug in the Numpy 1.10.1 release resulted in exceptions being raised for
diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst
new file mode 100644
index 0000000..0368272
--- /dev/null
+++ b/doc/release/1.10.3-notes.rst
@@ -0,0 +1,4 @@
+NumPy 1.10.3 Release Notes
+**************************
+
+N/A this release did not happen due to various screwups involving PyPi.
diff --git a/doc/release/1.10.4-notes.rst b/doc/release/1.10.4-notes.rst
new file mode 100644
index 0000000..03eaf5e
--- /dev/null
+++ b/doc/release/1.10.4-notes.rst
@@ -0,0 +1,39 @@
+NumPy 1.10.4 Release Notes
+**************************
+
+This release is a bugfix source release motivated by a segfault regression.
+No windows binaries are provided for this release, as there appear to be
+bugs in the toolchain we use to generate those files. Hopefully that
+problem will be fixed for the next release. In the meantime, we suggest
+using one of the providers of windows binaries.
+
+Compatibility notes
+===================
+
+* The trace function now calls the trace method on subclasses of ndarray,
+ except for matrix, for which the current behavior is preserved. This is
+ to help with the units package of AstroPy and hopefully will not cause
+ problems.
+
+Issues Fixed
+============
+
+* gh-6922 BUG: numpy.recarray.sort segfaults on Windows.
+* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll.
+* gh-6949 BUG: Type is lost when slicing a subclass of recarray.
+
+Merged PRs
+==========
+
+The following PRs have been merged into 1.10.3. When the PR is a backport,
+the PR number for the original PR against master is listed.
+
+* gh-6840 TST: Update travis testing script in 1.10.x
+* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py.
+* gh-6884 REL: Update pavement.py and setup.py to reflect current version.
+* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py.
+* gh-6924 BUG: Fix segfault gh-6922.
+* gh-6942 Fix datetime roll='modifiedpreceding' bug.
+* gh-6943 DOC,BUG: Fix some latex generation problems.
+* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace().
+* gh-6952 BUG recarray slices should preserve subclass.
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index fac868c..73beab5 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -18,7 +18,8 @@
Future Changes
==============
-* Relaxed stride checking will become the default.
+* Relaxed stride checking will become the default in 1.12.0.
+* Support for Python 2.6, 3.2, and 3.3 will be dropped in 1.12.0.
Compatibility notes
@@ -38,6 +39,7 @@
* Non-integers used as index values raise TypeError,
e.g., in reshape, take, and specifying reduce axis.
+
FutureWarning to changed behavior
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -61,32 +63,59 @@
New Features
============
-* `np.histogram` now provides plugin estimators for automatically estimating the optimal
-number of bins. Passing one of ['auto', 'fd', 'scott', 'rice', 'sturges']
-as the argument to 'bins' results in the corresponding estimator being used.
+* `np.histogram` now provides plugin estimators for automatically
+ estimating the optimal number of bins. Passing one of ['auto', 'fd',
+ 'scott', 'rice', 'sturges'] as the argument to 'bins' results in the
+ corresponding estimator being used.
-* A benchmark suite using `Airspeed Velocity <http://spacetelescope.github.io/asv/>`__
-has been added, converting the previous vbench-based one. You can run the suite locally
-via ``python runtests.py --bench``. For more details, see ``benchmarks/README.rst``.
+* A benchmark suite using `Airspeed Velocity
+ <http://spacetelescope.github.io/asv/>`__ has been added, converting the
+ previous vbench-based one. You can run the suite locally via ``python
+ runtests.py --bench``. For more details, see ``benchmarks/README.rst``.
* A new function ``np.shares_memory`` that can check exactly whether two
-arrays have memory overlap is added. ``np.may_share_memory`` also now
-has an option to spend more effort to reduce false positives.
+ arrays have memory overlap is added. ``np.may_share_memory`` also now has
+ an option to spend more effort to reduce false positives.
-* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed in the
-``numpy.testing`` namespace. Raise them in a test function to mark the test to
-be skipped or mark it as a known failure, respectively.
+* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed
+ in the ``numpy.testing`` namespace. Raise them in a test function to mark
+ the test to be skipped or mark it as a known failure, respectively.
+
+* ``f2py.compile`` has a new ``extension`` keyword parameter that allows the
+ fortran extension to be specified for generated temp files. For instance,
+ the files can be specifies to be ``*.f90``. The ``verbose`` argument is
+ also activated, it was previously ignored.
+
+* A ``dtype`` parameter has been added to ``np.random.randint``
+ Random ndarrays of the following types can now be generated:
+
+ - np.bool,
+ - np.int8, np.uint8,
+ - np.int16, np.uint16,
+ - np.int32, np.uint32,
+ - np.int64, np.uint64,
+ - np.int_ (long), np.intp
+
+ The specification is by precision rather than by C type. Hence, on some
+ platforms np.int64 may be a `long` instead of `long long` even if the
+ specified dtype is `long long` because the two may have the same
+ precision. The resulting type depends on which C type numpy uses for the
+ given precision. The byteorder specification is also ignored, the
+ generated arrays are always in native byte order.
+
+* ``np.moveaxis`` allows for moving one or more array axes to a new position
+ by explicitly providing source and destination axes.
Improvements
============
*np.gradient* now supports an ``axis`` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``axis`` parameter was added to *np.gradient* for consistency.
It allows to specify over which axes the gradient is calculated.
*np.lexsort* now supports arrays with object data-type
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The function now internally calls the generic ``npy_amergesort``
when the type does not implement a merge-sort kind of ``argsort``
method.
@@ -98,9 +127,43 @@
a big memory peak. Another optimization was done to avoid a memory peak and
useless computations when printing a masked array.
+*ndarray.tofile* now uses fallocate on linux
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The function now uses the fallocate system call to reserve sufficient
+diskspace on filesystems that support it.
+
+``np.dot`` optimized for operations of the form ``A.T @ A`` and ``A @ A.T``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Previously, ``gemm`` BLAS operations were used for all matrix products. Now,
+if the matrix product is between a matrix and its transpose, it will use
+``syrk`` BLAS operations for a performance boost.
+
+**Note:** Requires the transposed and non-transposed matrices to share data.
+
Changes
=======
+Pyrex support was removed from ``numpy.distutils``. The method
+``build_src.generate_a_pyrex_source`` will remain available; it has been
+monkeypatched by users to support Cython instead of Pyrex. It's recommended to
+switch to a better supported method of build Cython extensions though.
+*np.broadcast* can now be called with a single argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The resulting object in that case will simply mimic iteration over
+a single array. This change obsoletes distinctions like
+
+ if len(x) == 1:
+ shape = x[0].shape
+ else:
+ shape = np.broadcast(*x).shape
+
+Instead, ``np.broadcast`` can be used in all cases.
+
+*np.trace* now respects array subclasses
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This behaviour mimics that of other functions such as ``np.diagonal`` and
+ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give
+the same result.
Deprecations
============
@@ -110,7 +173,7 @@
The f_contiguous flag was used to signal that views as a dtypes that
changed the element size would change the first index. This was always a
bit problematical for arrays that were both f_contiguous and c_contiguous
-because c_contiguous took precendence. Relaxed stride checking results in
+because c_contiguous took precedence. Relaxed stride checking results in
more such dual contiguous arrays and breaks some existing code as a result.
Note that this also affects changing the dtype by assigning to the dtype
attribute of an array. The aim of this deprecation is to restrict views to
@@ -118,3 +181,26 @@
compatible is to use `a.T.view(...).T` instead. A parameter will also be
added to the view method to explicitly ask for Fortran order views, but
that will not be backward compatible.
+
+Invalid arguments for array ordering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It is currently possible to pass in arguments for the ``order``
+parameter in methods like ``array.flatten`` or ``array.ravel``
+that were not one of the following: 'C', 'F', 'A', 'K' (note that
+all of these possible values are unicode- and case-insensitive).
+Such behaviour will not be allowed in future releases.
+
+Random number generator in the ``testing`` namespace
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Python standard library random number generator was previously exposed in the
+``testing`` namespace as ``testing.rand``. Using this generator is not
+recommended and it will be removed in a future release. Use generators from
+``numpy.random`` namespace instead.
+
+Random integer generation on a closed interval
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In accordance with the Python C API, which gives preference to the half-open
+interval over the closed one, ``np.random.random_integers`` is being
+deprecated in favor of calling ``np.random.randint``, which has been
+enhanced with the ``dtype`` parameter as described under "New Features".
+However, ``np.random.random_integers`` will not be removed anytime soon.
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index b09728e..0fb5a66 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -137,6 +137,9 @@
For more extensive info on running and writing tests, see
https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt .
+*Note: do not run the tests from the root directory of your numpy git repo,
+that will result in strange test errors.*
+
Rebuilding & cleaning the workspace
-----------------------------------
diff --git a/doc/source/dev/governance/index.rst b/doc/source/dev/governance/index.rst
index 9a611a2..3919e5e 100644
--- a/doc/source/dev/governance/index.rst
+++ b/doc/source/dev/governance/index.rst
@@ -1,5 +1,5 @@
#####################
-Contributing to Numpy
+NumPy governance
#####################
.. toctree::
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index 5716f45..b82f7d3 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -39,9 +39,9 @@
Numpy provides several hooks that classes can customize:
-.. function:: class.__numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs)
+.. method:: class.__numpy_ufunc__(ufunc, method, i, inputs, **kwargs)
- .. versionadded:: 1.10
+ .. versionadded:: 1.11
Any class (ndarray subclass or not) can define this method to
override behavior of Numpy's ufuncs. This works quite similarly to
@@ -109,7 +109,7 @@
your_obj)`` always calls only your ``__numpy_ufunc__``, as
expected.
-.. function:: class.__array_finalize__(self)
+.. method:: class.__array_finalize__(obj)
This method is called whenever the system internally allocates a
new array from *obj*, where *obj* is a subclass (subtype) of the
@@ -118,7 +118,7 @@
to update meta-information from the "parent." Subclasses inherit
a default implementation of this method that does nothing.
-.. function:: class.__array_prepare__(array, context=None)
+.. method:: class.__array_prepare__(array, context=None)
At the beginning of every :ref:`ufunc <ufuncs.output-type>`, this
method is called on the input object with the highest array
@@ -130,7 +130,7 @@
the subclass and update metadata before returning the array to the
ufunc for computation.
-.. function:: class.__array_wrap__(array, context=None)
+.. method:: class.__array_wrap__(array, context=None)
At the end of every :ref:`ufunc <ufuncs.output-type>`, this method
is called on the input object with the highest array priority, or
@@ -149,7 +149,7 @@
possibility for the Python type of the returned object. Subclasses
inherit a default value of 0.0 for this attribute.
-.. function:: class.__array__([dtype])
+.. method:: class.__array__([dtype])
If a class (ndarray subclass or not) having the :func:`__array__`
method is used as the output object of an :ref:`ufunc
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 14bac44..0f5fb92 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -45,8 +45,8 @@
The array can be indexed using Python container-like syntax:
- >>> x[1,2] # i.e., the element of x in the *second* row, *third*
- column, namely, 6.
+ >>> # The element of x in the *second* row, *third* column, namely, 6.
+ >>> x[1, 2]
For example :ref:`slicing <arrays.indexing>` can produce views of
the array:
diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst
index a8aa2d0..f3ce488 100644
--- a/doc/source/reference/routines.array-manipulation.rst
+++ b/doc/source/reference/routines.array-manipulation.rst
@@ -26,6 +26,7 @@
.. autosummary::
:toctree: generated/
+ moveaxis
rollaxis
swapaxes
ndarray.T
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9e908dd..6da6176 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -3,6 +3,8 @@
*************
.. include:: ../release/1.11.0-notes.rst
+.. include:: ../release/1.10.4-notes.rst
+.. include:: ../release/1.10.3-notes.rst
.. include:: ../release/1.10.2-notes.rst
.. include:: ../release/1.10.1-notes.rst
.. include:: ../release/1.10.0-notes.rst
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index c5f8fea..8acb2fa 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -60,8 +60,8 @@
``setuptools`` is only used when building via ``pip`` or with ``python
setupegg.py``. Using ``virtualenv`` should work as expected.
-*Note: for build instructions to do development work on NumPy itself, see
-:ref:`development-environment`*.
+*Note: for build instructions to do development work on NumPy itself, see*
+:ref:`development-environment`.
.. _parallel-builds:
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index d94233a..c3179b1 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -7,12 +7,12 @@
Introduction
============
-MATLAB® and NumPy/SciPy have a lot in common. But
-there are many differences. NumPy and SciPy were created to do numerical
-and scientific computing in the most natural way with Python, not to be
-MATLAB® clones. This page is intended to be a place to collect wisdom
-about the differences, mostly for the purpose of helping proficient
-MATLAB® users become proficient NumPy and SciPy users.
+MATLAB® and NumPy/SciPy have a lot in common. But there are many
+differences. NumPy and SciPy were created to do numerical and scientific
+computing in the most natural way with Python, not to be MATLAB® clones.
+This page is intended to be a place to collect wisdom about the
+differences, mostly for the purpose of helping proficient MATLAB® users
+become proficient NumPy and SciPy users.
.. raw:: html
@@ -25,18 +25,39 @@
.. list-table::
- * - In MATLAB®, the basic data type is a multidimensional array of double precision floating point numbers. Most expressions take such arrays and return such arrays. Operations on the 2-D instances of these arrays are designed to act more or less like matrix operations in linear algebra.
- - In NumPy the basic type is a multidimensional ``array``. Operations on these arrays in all dimensionalities including 2D are elementwise operations. However, there is a special ``matrix`` type for doing linear algebra, which is just a subclass of the ``array`` class. Operations on matrix-class arrays are linear algebra operations.
-
- * - MATLAB® uses 1 (one) based indexing. The initial element of a sequence is found using a(1).
+ * - In MATLAB®, the basic data type is a multidimensional array of
+ double precision floating point numbers. Most expressions take such
+ arrays and return such arrays. Operations on the 2-D instances of
+ these arrays are designed to act more or less like matrix operations
+ in linear algebra.
+ - In NumPy the basic type is a multidimensional ``array``. Operations
+ on these arrays in all dimensionalities including 2D are elementwise
+ operations. However, there is a special ``matrix`` type for doing
+ linear algebra, which is just a subclass of the ``array`` class.
+ Operations on matrix-class arrays are linear algebra operations.
+
+ * - MATLAB® uses 1 (one) based indexing. The initial element of a
+ sequence is found using a(1).
:ref:`See note INDEXING <numpy-for-matlab-users.notes>`
- - Python uses 0 (zero) based indexing. The initial element of a sequence is found using a[0].
-
- * - MATLAB®'s scripting language was created for doing linear algebra. The syntax for basic matrix operations is nice and clean, but the API for adding GUIs and making full-fledged applications is more or less an afterthought.
- - NumPy is based on Python, which was designed from the outset to be an excellent general-purpose programming language. While Matlab's syntax for some array manipulations is more compact than NumPy's, NumPy (by virtue of being an add-on to Python) can do many things that Matlab just cannot, for instance subclassing the main array type to do both array and matrix math cleanly.
-
- * - In MATLAB®, arrays have pass-by-value semantics, with a lazy copy-on-write scheme to prevent actually creating copies until they are actually needed. Slice operations copy parts of the array.
- - In NumPy arrays have pass-by-reference semantics. Slice operations are views into an array.
+ - Python uses 0 (zero) based indexing. The initial element of a
+ sequence is found using a[0].
+
+ * - MATLAB®'s scripting language was created for doing linear algebra.
+ The syntax for basic matrix operations is nice and clean, but the API
+ for adding GUIs and making full-fledged applications is more or less
+ an afterthought.
+ - NumPy is based on Python, which was designed from the outset to be
+ an excellent general-purpose programming language. While Matlab's
+ syntax for some array manipulations is more compact than
+ NumPy's, NumPy (by virtue of being an add-on to Python) can do many
+ things that Matlab just cannot, for instance subclassing the main
+ array type to do both array and matrix math cleanly.
+
+ * - In MATLAB®, arrays have pass-by-value semantics, with a lazy
+ copy-on-write scheme to prevent actually creating copies until they
+ are actually needed. Slice operations copy parts of the array.
+ - In NumPy arrays have pass-by-reference semantics. Slice operations
+ are views into an array.
'array' or 'matrix'? Which should I use?
@@ -76,20 +97,20 @@
- For ``matrix``, **'``*``\ ' means matrix multiplication**, and the
``multiply()`` function is used for element-wise multiplication.
-- Handling of vectors (rank-1 arrays)
+- Handling of vectors (one-dimensional arrays)
- - For ``array``, the **vector shapes 1xN, Nx1, and N are all
- different things**. Operations like ``A[:,1]`` return a rank-1
- array of shape N, not a rank-2 of shape Nx1. Transpose on a rank-1
- ``array`` does nothing.
- - For ``matrix``, **rank-1 arrays are always upconverted to 1xN or
- Nx1 matrices** (row or column vectors). ``A[:,1]`` returns a
- rank-2 matrix of shape Nx1.
+ - For ``array``, the **vector shapes 1xN, Nx1, and N are all different
+ things**. Operations like ``A[:,1]`` return a one-dimensional array of
+ shape N, not a two-dimensional array of shape Nx1. Transpose on a
+ one-dimensional ``array`` does nothing.
+ - For ``matrix``, **one-dimensional arrays are always upconverted to 1xN
+ or Nx1 matrices** (row or column vectors). ``A[:,1]`` returns a
+ two-dimensional matrix of shape Nx1.
-- Handling of higher-rank arrays (rank > 2)
+- Handling of higher-dimensional arrays (ndim > 2)
- - ``array`` objects **can have rank > 2**.
- - ``matrix`` objects **always have exactly rank 2**.
+ - ``array`` objects **can have number of dimensions > 2**;
+ - ``matrix`` objects **always have exactly two dimensions**.
- Convenience attributes
@@ -110,17 +131,17 @@
- ``array``
- - ``:)`` You can treat rank-1 arrays as *either* row or column
+ - ``:)`` You can treat one-dimensional arrays as *either* row or column
vectors. ``dot(A,v)`` treats ``v`` as a column vector, while
- ``dot(v,A)`` treats ``v`` as a row vector. This can save you
- having to type a lot of transposes.
+ ``dot(v,A)`` treats ``v`` as a row vector. This can save you having to
+ type a lot of transposes.
- ``<:(`` Having to use the ``dot()`` function for matrix-multiply is
messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``.
- ``:)`` Element-wise multiplication is easy: ``A*B``.
- ``:)`` ``array`` is the "default" NumPy type, so it gets the most
testing, and is the type most likely to be returned by 3rd party
code that uses NumPy.
- - ``:)`` Is quite at home handling data of any rank.
+ - ``:)`` Is quite at home handling data of any number of dimensions.
- ``:)`` Closer in semantics to tensor algebra, if you are familiar
with that.
- ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are
@@ -129,9 +150,9 @@
- ``matrix``
- ``:\\`` Behavior is more like that of MATLAB® matrices.
- - ``<:(`` Maximum of rank-2. To hold rank-3 data you need ``array`` or
- perhaps a Python list of ``matrix``.
- - ``<:(`` Minimum of rank-2. You cannot have vectors. They must be
+ - ``<:(`` Maximum of two-dimensional. To hold three-dimensional data you
+ need ``array`` or perhaps a Python list of ``matrix``.
+ - ``<:(`` Minimum of two-dimensional. You cannot have vectors. They must be
cast as single-column or single-row matrices.
- ``<:(`` Since ``array`` is the default in NumPy, some functions may
return an ``array`` even if you give them a ``matrix`` as an
@@ -201,7 +222,7 @@
import scipy.linalg
Also assume below that if the Notes talk about "matrix" that the
-arguments are rank 2 entities.
+arguments are two-dimensional entities.
General Purpose Equivalents
---------------------------
@@ -212,30 +233,41 @@
* - **MATLAB**
- **numpy**
- **Notes**
+
* - ``help func``
- ``info(func)`` or ``help(func)`` or ``func?`` (in Ipython)
- get help on the function *func*
+
* - ``which func``
- `see note HELP <numpy-for-matlab-users.notes>`__
- find out where *func* is defined
+
* - ``type func``
- ``source(func)`` or ``func??`` (in Ipython)
- print source for *func* (if not a native function)
+
* - ``a && b``
- ``a and b``
- - short-circuiting logical AND operator (Python native operator); scalar arguments only
+ - short-circuiting logical AND operator (Python native operator);
+ scalar arguments only
+
* - ``a || b``
- ``a or b``
- - short-circuiting logical OR operator (Python native operator); scalar arguments only
+ - short-circuiting logical OR operator (Python native operator);
+ scalar arguments only
+
* - ``1*i``, ``1*j``, ``1i``, ``1j``
- ``1j``
- complex numbers
+
* - ``eps``
- ``np.spacing(1)``
- - Distance between 1 and the nearest floating point number
+ - Distance between 1 and the nearest floating point number.
+
* - ``ode45``
- ``scipy.integrate.ode(f).set_integrator('dopri5')``
- integrate an ODE with Runge-Kutta 4,5
+
* - ``ode15s``
- ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=5)``
- integrate an ODE with BDF method
@@ -252,7 +284,7 @@
* - ``ndims(a)``
- ``ndim(a)`` or ``a.ndim``
- - get the number of dimensions of ``a`` (tensor rank)
+ - get the number of dimensions of an array
* - ``numel(a)``
- ``size(a)`` or ``a.size``
@@ -264,7 +296,9 @@
* - ``size(a,n)``
- ``a.shape[n-1]``
- - get the number of elements of the n-th dimension of array ``a``. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING <numpy-for-matlab-users.notes>`)
+ - get the number of elements of the n-th dimension of array ``a``. (Note
+ that MATLAB® uses 1 based indexing while Python uses 0 based indexing,
+ See note :ref:`INDEXING <numpy-for-matlab-users.notes>`)
* - ``[ 1 2 3; 4 5 6 ]``
- ``array([[1.,2.,3.], [4.,5.,6.]])``
@@ -297,15 +331,18 @@
* - ``a(1:3,5:9)``
- ``a[0:3][:,4:9]``
- - rows one to three and columns five to nine of ``a``. This gives read-only access.
+ - rows one to three and columns five to nine of ``a``. This gives
+ read-only access.
* - ``a([2,4,5],[1,3])``
- ``a[ix_([1,3,4],[0,2])]``
- - rows 2,4 and 5 and columns 1 and 3. This allows the matrix to be modified, and doesn't require a regular slice.
+ - rows 2,4 and 5 and columns 1 and 3. This allows the matrix to be
+ modified, and doesn't require a regular slice.
* - ``a(3:2:21,:)``
- ``a[ 2:21:2,:]``
- - every other row of ``a``, starting with the third and going to the twenty-first
+ - every other row of ``a``, starting with the third and going to the
+ twenty-first
* - ``a(1:2:end,:)``
- ``a[ ::2,:]``
@@ -345,8 +382,8 @@
* - ``(a>0.5)``
- ``(a>0.5)``
- - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is
- an array of 0s and 1s. The NumPy result is an array of the boolean
+ - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is an
+ array of 0s and 1s. The NumPy result is an array of the boolean
values ``False`` and ``True``.
* - ``find(a>0.5)``
@@ -387,11 +424,13 @@
* - ``1:10``
- ``arange(1.,11.)`` or ``r_[1.:11.]`` or ``r_[1:10:10j]``
- - create an increasing vector (see note :ref:`RANGES <numpy-for-matlab-users.notes>`)
+ - create an increasing vector (see note :ref:`RANGES
+ <numpy-for-matlab-users.notes>`)
* - ``0:9``
- ``arange(10.)`` or ``r_[:10.]`` or ``r_[:9:10j]``
- - create an increasing vector (see note :ref:`RANGES <numpy-for-matlab-users.notes>`)
+ - create an increasing vector (see note :ref:`RANGES
+ <numpy-for-matlab-users.notes>`)
* - ``[1:10]'``
- ``arange(1.,11.)[:, newaxis]``
@@ -399,15 +438,15 @@
* - ``zeros(3,4)``
- ``zeros((3,4))``
- - 3x4 rank-2 array full of 64-bit floating point zeros
+ - 3x4 two-dimensional array full of 64-bit floating point zeros
* - ``zeros(3,4,5)``
- ``zeros((3,4,5))``
- - 3x4x5 rank-3 array full of 64-bit floating point zeros
+ - 3x4x5 three-dimensional array full of 64-bit floating point zeros
* - ``ones(3,4)``
- ``ones((3,4))``
- - 3x4 rank-2 array full of 64-bit floating point ones
+ - 3x4 two-dimensional array full of 64-bit floating point ones
* - ``eye(3)``
- ``eye(3)``
@@ -419,7 +458,8 @@
* - ``diag(a,0)``
- ``diag(a,0)``
- - square diagonal matrix whose nonzero values are the elements of ``a``
+ - square diagonal matrix whose nonzero values are the elements of
+ ``a``
* - ``rand(3,4)``
- ``random.rand(3,4)``
@@ -450,7 +490,8 @@
- create m by n copies of ``a``
* - ``[a b]``
- - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or ``column_stack((a,b))`` or ``c_[a,b]``
+ - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or
+ ``column_stack((a,b))`` or ``c_[a,b]``
- concatenate columns of ``a`` and ``b``
* - ``[a; b]``
@@ -471,7 +512,8 @@
* - ``max(a,b)``
- ``maximum(a, b)``
- - compares ``a`` and ``b`` element-wise, and returns the maximum value from each pair
+ - compares ``a`` and ``b`` element-wise, and returns the maximum value
+ from each pair
* - ``norm(v)``
- ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)``
@@ -479,11 +521,13 @@
* - ``a & b``
- ``logical_and(a,b)``
- - element-by-element AND operator (Numpy ufunc) :ref:`See note LOGICOPS <numpy-for-matlab-users.notes>`
+ - element-by-element AND operator (Numpy ufunc) :ref:`See note
+ LOGICOPS <numpy-for-matlab-users.notes>`
* - ``a | b``
- ``logical_or(a,b)``
- - element-by-element OR operator (Numpy ufunc) :ref:`See note LOGICOPS <numpy-for-matlab-users.notes>`
+ - element-by-element OR operator (Numpy ufunc) :ref:`See note LOGICOPS
+ <numpy-for-matlab-users.notes>`
* - ``bitand(a,b)``
- ``a & b``
@@ -503,10 +547,11 @@
* - ``rank(a)``
- ``linalg.matrix_rank(a)``
- - rank of a matrix ``a``
+ - matrix rank of a 2D array / matrix ``a``
* - ``a\b``
- - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise
+ - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)``
+ otherwise
- solution of a x = b for x
* - ``b/a``
@@ -519,7 +564,9 @@
* - ``chol(a)``
- ``linalg.cholesky(a).T``
- - cholesky factorization of a matrix (``chol(a)`` in matlab returns an upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower triangular matrix)
+ - cholesky factorization of a matrix (``chol(a)`` in matlab returns an
+ upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower
+ triangular matrix)
* - ``[V,D]=eig(a)``
- ``D,V = linalg.eig(a)``
@@ -622,6 +669,7 @@
inputs. Matlab treats any non-zero value as 1 and returns the logical
AND. For example (3 & 4) in Numpy is 0, while in Matlab both 3 and 4
are considered logical true and (3 & 4) returns 1.
+
- Precedence: Numpy's & operator is higher precedence than logical
operators like < and >; Matlab's is the reverse.
@@ -657,6 +705,7 @@
- To modify your Python search path to include the locations of your
own modules, define the ``PYTHONPATH`` environment variable.
+
- To have a particular script file executed when the interactive Python
interpreter is started, define the ``PYTHONSTARTUP`` environment
variable to contain the name of your startup script.
diff --git a/numpy/__init__.py b/numpy/__init__.py
index d4ef54d..0fcd509 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -184,9 +184,11 @@
pkgload.__doc__ = PackageLoader.__call__.__doc__
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
from .testing import Tester
- test = Tester().test
- bench = Tester().bench
+ test = testing.nosetester._numpy_tester().test
+ bench = testing.nosetester._numpy_tester().bench
from . import core
from .core import *
diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py
index d7351f4..2d5bbab 100644
--- a/numpy/_build_utils/apple_accelerate.py
+++ b/numpy/_build_utils/apple_accelerate.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
import re
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index c140360..e79720c 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -49,7 +49,7 @@
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
- ... print item
+ ... print(item)
...
0
1
@@ -1548,7 +1548,7 @@
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
- >>> print ind
+ >>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
@@ -3567,10 +3567,14 @@
Parameters
----------
- order : {'C', 'F', 'A'}, optional
- Whether to flatten in row-major (C-style) or
- column-major (Fortran-style) order or preserve the
- C/Fortran ordering from `a`. The default is 'C'.
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order.
+ 'F' means to flatten in column-major (Fortran-
+ style) order. 'A' means to flatten in column-major
+ order if `a` is Fortran *contiguous* in memory,
+ row-major order otherwise. 'K' means to flatten
+ `a` in the order the elements occur in memory.
+ The default is 'C'.
Returns
-------
@@ -3884,13 +3888,13 @@
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
- above. `new_order` codes can be any of::
+ below. `new_order` codes can be any of:
- * 'S' - swap dtype from current to opposite endian
- * {'<', 'L'} - little endian
- * {'>', 'B'} - big endian
- * {'=', 'N'} - native order
- * {'|', 'I'} - ignore (no change to byte order)
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'L'} - little endian
+ * {'>', 'B'} - big endian
+ * {'=', 'N'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
@@ -4769,7 +4773,7 @@
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
- >>> print type(y)
+ >>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
@@ -4785,7 +4789,7 @@
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
- >>> print x
+ >>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
@@ -4911,7 +4915,7 @@
[10000, 0, None]
>>> def err_handler(type, flag):
- ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
@@ -4975,7 +4979,7 @@
[10000, 0, None]
>>> def err_handler(type, flag):
- ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
@@ -5060,7 +5064,7 @@
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
- ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
+ ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
@@ -5469,7 +5473,7 @@
1
>>> np.power.identity
1
- >>> print np.exp.identity
+ >>> print(np.exp.identity)
None
"""))
@@ -6177,7 +6181,7 @@
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
- >>> print dt.fields
+ >>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
@@ -6355,16 +6359,15 @@
Parameters
----------
new_order : string, optional
- Byte order to force; a value from the byte order
- specifications below. The default value ('S') results in
- swapping the current byte order.
- `new_order` codes can be any of::
+ Byte order to force; a value from the byte order specifications
+ below. The default value ('S') results in swapping the current
+ byte order. `new_order` codes can be any of:
- * 'S' - swap dtype from current to opposite endian
- * {'<', 'L'} - little endian
- * {'>', 'B'} - big endian
- * {'=', 'N'} - native order
- * {'|', 'I'} - ignore (no change to byte order)
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'L'} - little endian
+ * {'>', 'B'} - big endian
+ * {'=', 'N'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
@@ -7227,10 +7230,10 @@
The `new_order` code can be any from the following:
+ * 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
- * 'S' - swap dtype from current to opposite endian
* {'|', 'I'} - ignore (no change to byte order)
Parameters
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 9822ab3..1ac2440 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from os.path import join
from numpy.compat import isfileobj
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 16dcbe0..e8719ca 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -55,9 +55,9 @@
__all__ += shape_base.__all__
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index a28b5a8..fefcb64 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -114,13 +114,13 @@
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
- >>> print np.array([1.123456789])
+ >>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
- >>> print np.arange(10)
+ >>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
@@ -420,8 +420,8 @@
Examples
--------
>>> x = np.array([1e-16,1,2,3])
- >>> print np.array2string(x, precision=2, separator=',',
- ... suppress_small=True)
+ >>> print(np.array2string(x, precision=2, separator=',',
+ ... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 1975132..67d2c5b 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -518,7 +518,7 @@
See Also
--------
- rollaxis
+ moveaxis
argsort
Notes
@@ -1367,7 +1367,11 @@
(2, 3)
"""
- return asarray(a).trace(offset, axis1, axis2, dtype, out)
+ if isinstance(a, np.matrix):
+ # Get trace of matrix via an array to preserve backward compatibility.
+ return asarray(a).trace(offset, axis1, axis2, dtype, out)
+ else:
+ return asanyarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
@@ -1434,20 +1438,20 @@
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
- >>> print np.ravel(x)
+ >>> print(np.ravel(x))
[1 2 3 4 5 6]
- >>> print x.reshape(-1)
+ >>> print(x.reshape(-1))
[1 2 3 4 5 6]
- >>> print np.ravel(x, order='F')
+ >>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
- >>> print np.ravel(x.T)
+ >>> print(np.ravel(x.T))
[1 4 2 5 3 6]
- >>> print np.ravel(x.T, order='A')
+ >>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
@@ -1739,31 +1743,30 @@
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
- Axis or axes along which a sum is performed.
- The default (`axis` = `None`) is perform a sum over all
- the dimensions of the input array. `axis` may be negative, in
- which case it counts from the last to the first axis.
+ Axis or axes along which a sum is performed. The default,
+ axis=None, will sum all of the elements of the input array. If
+ axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
- If this is a tuple of ints, a sum is performed on multiple
- axes, instead of a single axis or all the axes as before.
+ If axis is a tuple of ints, a sum is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
dtype : dtype, optional
- The type of the returned array and of the accumulator in which
- the elements are summed. By default, the dtype of `a` is used.
- An exception is when `a` has an integer type with less precision
- than the default platform integer. In that case, the default
- platform integer is used instead.
+ The type of the returned array and of the accumulator in which the
+ elements are summed. The dtype of `a` is used by default unless `a`
+ has an integer dtype of less precision than the default platform
+ integer. In that case, if `a` is signed then the platform integer
+ is used while if `a` is unsigned then an unsigned integer of the
+ same precision as the platform integer is used.
out : ndarray, optional
- Array into which the output is placed. By default, a new array is
- created. If `out` is given, it must be of the appropriate shape
- (the shape of `a` with `axis` removed, i.e.,
- ``numpy.delete(a.shape, axis)``). Its type is preserved. See
- `doc.ufuncs` (Section "Output arguments") for more details.
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left
- in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ If this is set to True, the axes which are reduced are left in the
+ result as dimensions with size one. With this option, the result
+ will broadcast correctly against the input array.
Returns
-------
@@ -2392,29 +2395,31 @@
a : array_like
Input data.
axis : None or int or tuple of ints, optional
- Axis or axes along which a product is performed.
- The default (`axis` = `None`) is perform a product over all
- the dimensions of the input array. `axis` may be negative, in
- which case it counts from the last to the first axis.
+ Axis or axes along which a product is performed. The default,
+ axis=None, will calculate the product of all the elements in the
+ input array. If axis is negative it counts from the last to the
+ first axis.
.. versionadded:: 1.7.0
- If this is a tuple of ints, a product is performed on multiple
- axes, instead of a single axis or all the axes as before.
- dtype : data-type, optional
- The data-type of the returned array, as well as of the accumulator
- in which the elements are multiplied. By default, if `a` is of
- integer type, `dtype` is the default platform integer. (Note: if
- the type of `a` is unsigned, then so is `dtype`.) Otherwise,
- the dtype is the same as that of `a`.
+ If axis is a tuple of ints, a product is performed on all of the
+ axes specified in the tuple instead of a single axis or all the
+ axes as before.
+ dtype : dtype, optional
+ The type of the returned array, as well as of the accumulator in
+ which the elements are multiplied. The dtype of `a` is used by
+ default unless `a` has an integer dtype of less precision than the
+ default platform integer. In that case, if `a` is signed then the
+ platform integer is used while if `a` is unsigned then an unsigned
+ integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
- the same shape as the expected output, but the type of the
- output values will be cast if necessary.
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left
- in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ If this is set to True, the axes which are reduced are left in the
+ result as dimensions with size one. With this option, the result
+ will broadcast correctly against the input array.
Returns
-------
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 47ef94c..baf5549 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -7,6 +7,9 @@
#include <npy_config.h>
#endif
+/* need Python.h for npy_intp, npy_uintp */
+#include <Python.h>
+
/*
* gcc does not unroll even with -O3
* use with care, unrolling on modern cpus rarely speeds things up
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 3b442ea..a18b380 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import sys
+import operator
import warnings
import collections
from numpy.core import multiarray
@@ -15,8 +16,10 @@
if sys.version_info[0] >= 3:
import pickle
basestring = str
+ import builtins
else:
import cPickle as pickle
+ import __builtin__ as builtins
loads = pickle.loads
@@ -31,15 +34,15 @@
'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
- 'rollaxis', 'cross', 'tensordot', 'array2string', 'get_printoptions',
- 'set_printoptions', 'array_repr', 'array_str', 'set_string_function',
- 'little_endian', 'require', 'fromiter', 'array_equal', 'array_equiv',
- 'indices', 'fromfunction', 'isclose', 'load', 'loads', 'isscalar',
- 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose',
- 'compare_chararrays', 'putmask', 'seterr', 'geterr', 'setbufsize',
- 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
- 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
- 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
+ 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
+ 'set_string_function', 'little_endian', 'require', 'fromiter',
+ 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
+ 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
+ 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
+ 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
+ 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
+ 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
'TooHardError',
@@ -1422,6 +1425,7 @@
See Also
--------
+ moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
@@ -1457,6 +1461,91 @@
return a.transpose(axes)
+def _validate_axis(axis, ndim, argname):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ axis = list(axis)
+ axis = [a + ndim if a < 0 else a for a in axis]
+ if not builtins.all(0 <= a < ndim for a in axis):
+ raise ValueError('invalid axis for this array in `%s` argument' %
+ argname)
+ if len(set(axis)) != len(axis):
+ raise ValueError('repeated axis in `%s` argument' % argname)
+ return axis
+
+
+def moveaxis(a, source, destination):
+ """
+ Move axes of an array to new positions.
+
+ Other axes remain in their original order.
+
+ .. versionadded::1.11.0
+
+ Parameters
+ ----------
+ a : np.ndarray
+ The array whose axes should be reordered.
+ source : int or sequence of int
+ Original positions of the axes to move. These must be unique.
+ destination : int or sequence of int
+ Destination positions for each of the original axes. These must also be
+ unique.
+
+ Returns
+ -------
+ result : np.ndarray
+ Array with moved axes. This array is a view of the input array.
+
+ See Also
+ --------
+ transpose: Permute the dimensions of an array.
+ swapaxes: Interchange two axes of an array.
+
+ Examples
+ --------
+
+ >>> x = np.zeros((3, 4, 5))
+ >>> np.moveaxis(x, 0, -1).shape
+ (4, 5, 3)
+ >>> np.moveaxis(x, -1, 0).shape
+ (5, 3, 4)
+
+ These all achieve the same result:
+
+ >>> np.transpose(x).shape
+ (5, 4, 3)
+ >>> np.swapaxis(x, 0, -1).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1], [-1, -2]).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
+ (5, 4, 3)
+
+ """
+ try:
+ # allow duck-array types if they define transpose
+ transpose = a.transpose
+ except AttributeError:
+ a = asarray(a)
+ transpose = a.transpose
+
+ source = _validate_axis(source, a.ndim, 'source')
+ destination = _validate_axis(destination, a.ndim, 'destination')
+ if len(source) != len(destination):
+ raise ValueError('`source` and `destination` arguments must have '
+ 'the same number of elements')
+
+ order = [n for n in range(a.ndim) if n not in source]
+
+ for dest, src in sorted(zip(destination, source)):
+ order.insert(dest, src)
+
+ result = transpose(order)
+ return result
+
+
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
@@ -1808,7 +1897,7 @@
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
- >>> print a
+ >>> print(a)
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
@@ -2710,7 +2799,7 @@
Callback upon error:
>>> def err_handler(type, flag):
- ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> saved_handler = np.seterrcall(err_handler)
@@ -2729,7 +2818,7 @@
>>> class Log(object):
... def write(self, msg):
- ... print "LOG: %s" % msg
+ ... print("LOG: %s" % msg)
...
>>> log = Log()
@@ -2787,7 +2876,7 @@
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
- ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 7dc6e0b..1b6551e 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -822,7 +822,7 @@
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
- ... print np.sctype2char(sctype)
+ ... print(np.sctype2char(sctype))
l
d
D
diff --git a/numpy/core/records.py b/numpy/core/records.py
index b077553..9f5dcc8 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -425,7 +425,7 @@
def __array_finalize__(self, obj):
if self.dtype.type is not record:
- # if self.dtype is not np.record, invoke __setattr__ which will
+ # if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
@@ -496,13 +496,13 @@
return self.setfield(val, *res)
def __getitem__(self, indx):
- obj = ndarray.__getitem__(self, indx)
+ obj = super(recarray, self).__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
- obj = obj.view(recarray)
+ obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
@@ -567,7 +567,7 @@
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
- >>> print r[1]
+ >>> print(r[1])
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
@@ -643,7 +643,7 @@
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
- >>> print r[0]
+ >>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
@@ -651,7 +651,7 @@
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
- >>> print pickle.loads(pickle.dumps(r))
+ >>> print(pickle.loads(pickle.dumps(r)))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
@@ -736,7 +736,7 @@
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
- >>> print r[5]
+ >>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index d93e475..57ddf33 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -104,7 +104,7 @@
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
- "strtoll", "strtoull", "cbrt", "strtold_l",]
+ "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate"]
OPTIONAL_HEADERS = [
@@ -192,7 +192,7 @@
if sys.platform == "win32" and not mingw32():
try:
cmd.compiler.compile_options.remove("/GL")
- except ValueError:
+ except (AttributeError, ValueError):
pass
# We need to use _compile because we need the object filename
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 0dd2e16..599b48d 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -150,7 +150,7 @@
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
- ... print arr, arr.shape
+ ... print(arr, arr.shape)
...
[[[1]
[2]]] (1, 2, 1)
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 060f250..b2ba831 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2866,7 +2866,9 @@
if (nip1 == NULL) {
goto finish;
}
- new->f->copyswap(nip1, ip1 + offset, swap, dummy);
+ memcpy(nip1, ip1 + offset, new->elsize);
+ if (swap)
+ new->f->copyswap(nip1, NULL, swap, dummy);
}
if (swap || !npy_is_aligned(nip2, new->alignment)) {
/* create buffer and copy */
@@ -2877,7 +2879,9 @@
}
goto finish;
}
- new->f->copyswap(nip2, ip2 + offset, swap, dummy);
+ memcpy(nip2, ip2 + offset, new->elsize);
+ if (swap)
+ new->f->copyswap(nip2, NULL, swap, dummy);
}
}
res = new->f->compare(nip1, nip2, dummy);
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index 67f325b..b11505c 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -111,6 +111,66 @@
}
+/*
+ * Helper: dispatch to appropriate cblas_?syrk for typenum.
+ */
+static void
+syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
+ int n, int k,
+ PyArrayObject *A, int lda, PyArrayObject *R)
+{
+ const void *Adata = PyArray_DATA(A);
+ void *Rdata = PyArray_DATA(R);
+ int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+
+ npy_intp i;
+ npy_intp j;
+
+ switch (typenum) {
+ case NPY_DOUBLE:
+ cblas_dsyrk(order, CblasUpper, trans, n, k, 1.,
+ Adata, lda, 0., Rdata, ldc);
+
+ for (i = 0; i < n; i++) {
+ for (j = i + 1; j < n; j++) {
+ *((npy_double*)PyArray_GETPTR2(R, j, i)) = *((npy_double*)PyArray_GETPTR2(R, i, j));
+ }
+ }
+ break;
+ case NPY_FLOAT:
+ cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f,
+ Adata, lda, 0.f, Rdata, ldc);
+
+ for (i = 0; i < n; i++) {
+ for (j = i + 1; j < n; j++) {
+ *((npy_float*)PyArray_GETPTR2(R, j, i)) = *((npy_float*)PyArray_GETPTR2(R, i, j));
+ }
+ }
+ break;
+ case NPY_CDOUBLE:
+ cblas_zsyrk(order, CblasUpper, trans, n, k, oneD,
+ Adata, lda, zeroD, Rdata, ldc);
+
+ for (i = 0; i < n; i++) {
+ for (j = i + 1; j < n; j++) {
+ *((npy_cdouble*)PyArray_GETPTR2(R, j, i)) = *((npy_cdouble*)PyArray_GETPTR2(R, i, j));
+ }
+ }
+ break;
+ case NPY_CFLOAT:
+ cblas_csyrk(order, CblasUpper, trans, n, k, oneF,
+ Adata, lda, zeroF, Rdata, ldc);
+
+ for (i = 0; i < n; i++) {
+ for (j = i + 1; j < n; j++) {
+ *((npy_cfloat*)PyArray_GETPTR2(R, j, i)) = *((npy_cfloat*)PyArray_GETPTR2(R, i, j));
+ }
+ }
+ break;
+ }
+}
+
+
typedef enum {_scalar, _column, _row, _matrix} MatrixShape;
@@ -647,7 +707,30 @@
Trans2 = CblasTrans;
ldb = (PyArray_DIM(ap2, 0) > 1 ? PyArray_DIM(ap2, 0) : 1);
}
- gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret);
+
+ /*
+ * Use syrk if we have a case of a matrix times its transpose.
+ * Otherwise, use gemm for all other cases.
+ */
+ if (
+ (PyArray_BYTES(ap1) == PyArray_BYTES(ap2)) &&
+ (PyArray_DIM(ap1, 0) == PyArray_DIM(ap2, 1)) &&
+ (PyArray_DIM(ap1, 1) == PyArray_DIM(ap2, 0)) &&
+ (PyArray_STRIDE(ap1, 0) == PyArray_STRIDE(ap2, 1)) &&
+ (PyArray_STRIDE(ap1, 1) == PyArray_STRIDE(ap2, 0)) &&
+ ((Trans1 == CblasTrans) ^ (Trans2 == CblasTrans)) &&
+ ((Trans1 == CblasNoTrans) ^ (Trans2 == CblasNoTrans))
+ ) {
+ if (Trans1 == CblasNoTrans) {
+ syrk(typenum, Order, Trans1, N, M, ap1, lda, ret);
+ }
+ else {
+ syrk(typenum, Order, Trans1, N, M, ap2, ldb, ret);
+ }
+ }
+ else {
+ gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret);
+ }
NPY_END_ALLOW_THREADS;
}
@@ -662,169 +745,3 @@
Py_XDECREF(ret);
return NULL;
}
-
-
-/*
- * innerproduct(a,b)
- *
- * Returns the inner product of a and b for arrays of
- * floating point types. Like the generic NumPy equivalent the product
- * sum is over the last dimension of a and b.
- * NB: The first argument is not conjugated.
- *
- * This is for use by PyArray_InnerProduct. It is assumed on entry that the
- * arrays ap1 and ap2 have a common data type given by typenum that is
- * float, double, cfloat, or cdouble and have dimension <= 2.
- * The * __numpy_ufunc__ nonsense is also assumed to
- * have been taken care of.
- */
-
-NPY_NO_EXPORT PyObject *
-cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2)
-{
- int j, l, lda, ldb;
- int nd;
- double prior1, prior2;
- PyArrayObject *ret = NULL;
- npy_intp dimensions[NPY_MAXDIMS];
- PyTypeObject *subtype;
-
- /* assure contiguous arrays */
- if (!PyArray_IS_C_CONTIGUOUS(ap1)) {
- PyObject *op1 = PyArray_NewCopy(ap1, NPY_CORDER);
- Py_DECREF(ap1);
- ap1 = (PyArrayObject *)op1;
- if (ap1 == NULL) {
- goto fail;
- }
- }
- if (!PyArray_IS_C_CONTIGUOUS(ap2)) {
- PyObject *op2 = PyArray_NewCopy(ap2, NPY_CORDER);
- Py_DECREF(ap2);
- ap2 = (PyArrayObject *)op2;
- if (ap2 == NULL) {
- goto fail;
- }
- }
-
- if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) {
- /* One of ap1 or ap2 is a scalar */
- if (PyArray_NDIM(ap1) == 0) {
- /* Make ap2 the scalar */
- PyArrayObject *t = ap1;
- ap1 = ap2;
- ap2 = t;
- }
- for (l = 1, j = 0; j < PyArray_NDIM(ap1); j++) {
- dimensions[j] = PyArray_DIM(ap1, j);
- l *= dimensions[j];
- }
- nd = PyArray_NDIM(ap1);
- }
- else {
- /*
- * (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2)
- * Both ap1 and ap2 are vectors or matrices
- */
- l = PyArray_DIM(ap1, PyArray_NDIM(ap1) - 1);
-
- if (PyArray_DIM(ap2, PyArray_NDIM(ap2) - 1) != l) {
- dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1,
- ap2, PyArray_NDIM(ap2) - 1);
- goto fail;
- }
- nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2;
-
- if (nd == 1)
- dimensions[0] = (PyArray_NDIM(ap1) == 2) ?
- PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 0);
- else if (nd == 2) {
- dimensions[0] = PyArray_DIM(ap1, 0);
- dimensions[1] = PyArray_DIM(ap2, 0);
- }
- }
-
- /* Choose which subtype to return */
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
-
- ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0,
- (PyObject *)
- (prior2 > prior1 ? ap2 : ap1));
-
- if (ret == NULL) {
- goto fail;
- }
-
- NPY_BEGIN_ALLOW_THREADS;
- memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
-
- if (PyArray_NDIM(ap2) == 0) {
- /* Multiplication by a scalar -- Level 1 BLAS */
- if (typenum == NPY_DOUBLE) {
- cblas_daxpy(l,
- *((double *)PyArray_DATA(ap2)),
- (double *)PyArray_DATA(ap1), 1,
- (double *)PyArray_DATA(ret), 1);
- }
- else if (typenum == NPY_CDOUBLE) {
- cblas_zaxpy(l,
- (double *)PyArray_DATA(ap2),
- (double *)PyArray_DATA(ap1), 1,
- (double *)PyArray_DATA(ret), 1);
- }
- else if (typenum == NPY_FLOAT) {
- cblas_saxpy(l,
- *((float *)PyArray_DATA(ap2)),
- (float *)PyArray_DATA(ap1), 1,
- (float *)PyArray_DATA(ret), 1);
- }
- else if (typenum == NPY_CFLOAT) {
- cblas_caxpy(l,
- (float *)PyArray_DATA(ap2),
- (float *)PyArray_DATA(ap1), 1,
- (float *)PyArray_DATA(ret), 1);
- }
- }
- else if (PyArray_NDIM(ap1) == 1 && PyArray_NDIM(ap2) == 1) {
- /* Dot product between two vectors -- Level 1 BLAS */
- blas_dot(typenum, l,
- PyArray_DATA(ap1), PyArray_ITEMSIZE(ap1),
- PyArray_DATA(ap2), PyArray_ITEMSIZE(ap2),
- PyArray_DATA(ret));
- }
- else if (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 1) {
- /* Matrix-vector multiplication -- Level 2 BLAS */
- lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1);
- gemv(typenum, CblasRowMajor, CblasNoTrans, ap1, lda, ap2, 1, ret);
- }
- else if (PyArray_NDIM(ap1) == 1 && PyArray_NDIM(ap2) == 2) {
- /* Vector matrix multiplication -- Level 2 BLAS */
- lda = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1);
- gemv(typenum, CblasRowMajor, CblasNoTrans, ap2, lda, ap1, 1, ret);
- }
- else {
- /*
- * (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 2)
- * Matrix matrix multiplication -- Level 3 BLAS
- */
- lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1);
- ldb = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1);
- gemm(typenum, CblasRowMajor, CblasNoTrans, CblasTrans,
- PyArray_DIM(ap1, 0), PyArray_DIM(ap2, 0), PyArray_DIM(ap1, 1),
- ap1, lda, ap2, ldb, ret);
- }
- NPY_END_ALLOW_THREADS;
-
- Py_DECREF(ap1);
- Py_DECREF(ap2);
- return PyArray_Return(ret);
-
- fail:
- Py_XDECREF(ap1);
- Py_XDECREF(ap2);
- Py_XDECREF(ret);
- return NULL;
-}
diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/multiarray/cblasfuncs.h
index d3ec08d..66ce4ca 100644
--- a/numpy/core/src/multiarray/cblasfuncs.h
+++ b/numpy/core/src/multiarray/cblasfuncs.h
@@ -4,7 +4,4 @@
NPY_NO_EXPORT PyObject *
cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *);
-NPY_NO_EXPORT PyObject *
-cblas_innerproduct(int, PyArrayObject *, PyArrayObject *);
-
#endif
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 8ffeeda..b9db3bb 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -529,14 +529,15 @@
}
/*
- * It would seem that for the following code to work, 'len' should
- * at least be 4. But because of the way 'guess' is normalized, it
- * will always be set to 1 if len <= 4. Given that, and that keys
- * outside of the 'arr' bounds have already been handled, and the
- * order in which comparisons happen below, it should become obvious
- * that it will work with any array of at least 2 items.
+ * If len <= 4 use linear search.
+ * From above we know key >= arr[0] when we start.
*/
- assert (len >= 2);
+ if (len <= 4) {
+ npy_intp i;
+
+ for (i = 1; i < len && key >= arr[i]; ++i);
+ return i - 1;
+ }
if (guess > len - 3) {
guess = len - 3;
@@ -546,36 +547,36 @@
}
/* check most likely values: guess - 1, guess, guess + 1 */
- if (key <= arr[guess]) {
- if (key <= arr[guess - 1]) {
+ if (key < arr[guess]) {
+ if (key < arr[guess - 1]) {
imax = guess - 1;
/* last attempt to restrict search to items in cache */
if (guess > LIKELY_IN_CACHE_SIZE &&
- key > arr[guess - LIKELY_IN_CACHE_SIZE]) {
+ key >= arr[guess - LIKELY_IN_CACHE_SIZE]) {
imin = guess - LIKELY_IN_CACHE_SIZE;
}
}
else {
- /* key > arr[guess - 1] */
+ /* key >= arr[guess - 1] */
return guess - 1;
}
}
else {
- /* key > arr[guess] */
- if (key <= arr[guess + 1]) {
+ /* key >= arr[guess] */
+ if (key < arr[guess + 1]) {
return guess;
}
else {
- /* key > arr[guess + 1] */
- if (key <= arr[guess + 2]) {
+ /* key >= arr[guess + 1] */
+ if (key < arr[guess + 2]) {
return guess + 1;
}
else {
- /* key > arr[guess + 2] */
+ /* key >= arr[guess + 2] */
imin = guess + 2;
/* last attempt to restrict search to items in cache */
if (guess < len - LIKELY_IN_CACHE_SIZE - 1 &&
- key <= arr[guess + LIKELY_IN_CACHE_SIZE]) {
+ key < arr[guess + LIKELY_IN_CACHE_SIZE]) {
imax = guess + LIKELY_IN_CACHE_SIZE;
}
}
@@ -673,7 +674,7 @@
}
}
- /* binary_search_with_guess needs at least a 2 item long array */
+ /* binary_search_with_guess needs at least a 3 item long array */
if (lenxp == 1) {
const npy_double xp_val = dx[0];
const npy_double fp_val = dy[0];
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 88064c1..d7a6178 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -540,6 +540,15 @@
return ret;
}
else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) {
+ /* 2015-12-14, 1.11 */
+ int ret = DEPRECATE("Non-string object detected for "
+ "the array ordering. Please pass "
+ "in 'C', 'F', 'A', or 'K' instead");
+
+ if (ret < 0) {
+ return -1;
+ }
+
if (PyObject_IsTrue(object)) {
*val = NPY_FORTRANORDER;
}
@@ -553,6 +562,18 @@
}
else {
str = PyBytes_AS_STRING(object);
+ if (strlen(str) != 1) {
+ /* 2015-12-14, 1.11 */
+ int ret = DEPRECATE("Non length-one string passed "
+ "in for the array ordering. "
+ "Please pass in 'C', 'F', 'A', "
+ "or 'K' instead");
+
+ if (ret < 0) {
+ return -1;
+ }
+ }
+
if (str[0] == 'C' || str[0] == 'c') {
*val = NPY_CORDER;
}
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 7cb2758..5499160 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -2,6 +2,8 @@
#include <Python.h>
#include "structmember.h"
+#include <npy_config.h>
+
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
@@ -19,6 +21,44 @@
#include "convert.h"
+int
+fallocate(int fd, int mode, off_t offset, off_t len);
+
+/*
+ * allocate nbytes of diskspace for file fp
+ * this allows the filesystem to make smarter allocation decisions and gives a
+ * fast exit on not enough free space
+ * returns -1 and raises exception on no space, ignores all other errors
+ */
+static int
+npy_fallocate(npy_intp nbytes, FILE * fp)
+{
+ /*
+ * unknown behavior on non-linux so don't try it
+ * we don't want explicit zeroing to happen
+ */
+#if defined(HAVE_FALLOCATE) && defined(__linux__)
+ int r;
+ /* small files not worth the system call */
+ if (nbytes < 16 * 1024 * 1024) {
+ return 0;
+ }
+ /* btrfs can take a while to allocate making release worthwhile */
+ NPY_BEGIN_ALLOW_THREADS;
+ r = fallocate(fileno(fp), 0, npy_ftell(fp), nbytes);
+ NPY_END_ALLOW_THREADS;
+ /*
+ * early exit on no space, other errors will also get found during fwrite
+ */
+ if (r == -1 && errno == ENOSPC) {
+ PyErr_Format(PyExc_IOError, "Not enough free space to write "
+ "%"NPY_INTP_FMT" bytes", nbytes);
+ return -1;
+ }
+#endif
+ return 0;
+}
+
/*
* Converts a subarray of 'self' into lists, with starting data pointer
* 'dataptr' and from dimension 'startdim' to the last dimension of 'self'.
@@ -92,6 +132,9 @@
"cannot write object arrays to a file in binary mode");
return -1;
}
+ if (npy_fallocate(PyArray_NBYTES(self), fp) != 0) {
+ return -1;
+ }
if (PyArray_ISCONTIGUOUS(self)) {
size = PyArray_SIZE(self);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index e23cbe3..2b8c352 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1926,14 +1926,34 @@
/* Raise an error if the casting rule isn't followed */
if (!PyArray_CanCastArrayTo(arr, newtype, casting)) {
PyObject *errmsg;
+ PyArray_Descr *arr_descr = NULL;
+ PyObject *arr_descr_repr = NULL;
+ PyObject *newtype_repr = NULL;
+ PyErr_Clear();
errmsg = PyUString_FromString("Cannot cast array data from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(arr)));
+ arr_descr = PyArray_DESCR(arr);
+ if (arr_descr == NULL) {
+ Py_DECREF(newtype);
+ Py_DECREF(errmsg);
+ return NULL;
+ }
+ arr_descr_repr = PyObject_Repr((PyObject *)arr_descr);
+ if (arr_descr_repr == NULL) {
+ Py_DECREF(newtype);
+ Py_DECREF(errmsg);
+ return NULL;
+ }
+ PyUString_ConcatAndDel(&errmsg, arr_descr_repr);
PyUString_ConcatAndDel(&errmsg,
PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)newtype));
+ newtype_repr = PyObject_Repr((PyObject *)newtype);
+ if (newtype_repr == NULL) {
+ Py_DECREF(newtype);
+ Py_DECREF(errmsg);
+ return NULL;
+ }
+ PyUString_ConcatAndDel(&errmsg, newtype_repr);
PyUString_ConcatAndDel(&errmsg,
PyUString_FromFormat(" according to the rule %s",
npy_casting_to_string(casting)));
diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c
index 331e104..4fade4d 100644
--- a/numpy/core/src/multiarray/datetime_busday.c
+++ b/numpy/core/src/multiarray/datetime_busday.c
@@ -889,7 +889,7 @@
break;
case 'p':
if (strcmp(str, "modifiedpreceding") == 0) {
- *roll = NPY_BUSDAY_MODIFIEDFOLLOWING;
+ *roll = NPY_BUSDAY_MODIFIEDPRECEDING;
goto finish;
}
break;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 829994b..5099e3e 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1456,10 +1456,10 @@
int i, ntot, err=0;
ntot = n + nadd;
- if (ntot < 2 || ntot > NPY_MAXARGS) {
+ if (ntot < 1 || ntot > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
- "Need between 2 and (%d) " \
- "array objects (inclusive).", NPY_MAXARGS);
+ "Need at least 1 and at most %d "
+ "array objects.", NPY_MAXARGS);
return NULL;
}
multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
@@ -1522,10 +1522,10 @@
int i, err = 0;
- if (n < 2 || n > NPY_MAXARGS) {
+ if (n < 1 || n > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
- "Need between 2 and (%d) " \
- "array objects (inclusive).", NPY_MAXARGS);
+ "Need at least 1 and at most %d "
+ "array objects.", NPY_MAXARGS);
return NULL;
}
@@ -1603,12 +1603,12 @@
++n;
}
}
- if (n < 2 || n > NPY_MAXARGS) {
+ if (n < 1 || n > NPY_MAXARGS) {
if (PyErr_Occurred()) {
return NULL;
}
PyErr_Format(PyExc_ValueError,
- "Need at least two and fewer than (%d) "
+ "Need at least 1 and at most %d "
"array objects.", NPY_MAXARGS);
return NULL;
}
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index 5e247e1..45092dc 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -778,7 +778,7 @@
test_as_c_array(PyObject *NPY_UNUSED(self), PyObject *args)
{
PyArrayObject *array_obj;
- npy_intp dims[3]; // max 3-dim
+ npy_intp dims[3]; /* max 3-dim */
npy_intp i=0, j=0, k=0;
npy_intp num_dims = 0;
PyArray_Descr *descr = NULL;
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index b9d7902..2c17ebe 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -813,121 +813,69 @@
NPY_NO_EXPORT PyObject *
PyArray_InnerProduct(PyObject *op1, PyObject *op2)
{
- PyArrayObject *ap1, *ap2, *ret = NULL;
- PyArrayIterObject *it1, *it2;
- npy_intp i, j, l;
- int typenum, nd, axis;
- npy_intp is1, is2, os;
- char *op;
- npy_intp dimensions[NPY_MAXDIMS];
- PyArray_DotFunc *dot;
- PyArray_Descr *typec;
- NPY_BEGIN_THREADS_DEF;
+ PyArrayObject *ap1 = NULL;
+ PyArrayObject *ap2 = NULL;
+ int typenum;
+ PyArray_Descr *typec = NULL;
+ PyObject* ap2t = NULL;
+ npy_intp dims[NPY_MAXDIMS];
+ PyArray_Dims newaxes = {dims, 0};
+ int i;
+ PyObject* ret = NULL;
typenum = PyArray_ObjectType(op1, 0);
typenum = PyArray_ObjectType(op2, typenum);
-
typec = PyArray_DescrFromType(typenum);
if (typec == NULL) {
- return NULL;
- }
- Py_INCREF(typec);
- ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap1 == NULL) {
- Py_DECREF(typec);
- return NULL;
- }
- ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap2 == NULL) {
- Py_DECREF(ap1);
- return NULL;
- }
-
-#if defined(HAVE_CBLAS)
- if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 &&
- (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum ||
- NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) {
- return cblas_innerproduct(typenum, ap1, ap2);
- }
-#endif
-
- if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) {
- ret = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2);
- ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply(
- (PyObject *)ap1, (PyObject *)ap2);
- Py_DECREF(ap1);
- Py_DECREF(ap2);
- return (PyObject *)ret;
- }
-
- l = PyArray_DIMS(ap1)[PyArray_NDIM(ap1) - 1];
- if (PyArray_DIMS(ap2)[PyArray_NDIM(ap2) - 1] != l) {
- dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1,
- ap2, PyArray_NDIM(ap2) - 1);
goto fail;
}
- nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2;
- j = 0;
- for (i = 0; i < PyArray_NDIM(ap1) - 1; i++) {
- dimensions[j++] = PyArray_DIMS(ap1)[i];
+ Py_INCREF(typec);
+ ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0,
+ NPY_ARRAY_ALIGNED, NULL);
+ if (ap1 == NULL) {
+ Py_DECREF(typec);
+ goto fail;
}
- for (i = 0; i < PyArray_NDIM(ap2) - 1; i++) {
- dimensions[j++] = PyArray_DIMS(ap2)[i];
+ ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0,
+ NPY_ARRAY_ALIGNED, NULL);
+ if (ap2 == NULL) {
+ goto fail;
}
- /*
- * Need to choose an output array that can hold a sum
- * -- use priority to determine which subtype.
- */
- ret = new_array_for_sum(ap1, ap2, NULL, nd, dimensions, typenum);
+ newaxes.len = PyArray_NDIM(ap2);
+ if ((PyArray_NDIM(ap1) >= 1) && (newaxes.len >= 2)) {
+ for (i = 0; i < newaxes.len - 2; i++) {
+ dims[i] = (npy_intp)i;
+ }
+ dims[newaxes.len - 2] = newaxes.len - 1;
+ dims[newaxes.len - 1] = newaxes.len - 2;
+
+ ap2t = PyArray_Transpose(ap2, &newaxes);
+ if (ap2t == NULL) {
+ goto fail;
+ }
+ }
+ else {
+ ap2t = (PyObject *)ap2;
+ Py_INCREF(ap2);
+ }
+
+ ret = PyArray_MatrixProduct2((PyObject *)ap1, ap2t, NULL);
if (ret == NULL) {
goto fail;
}
- /* Ensure that multiarray.inner(<Nx0>,<Mx0>) -> zeros((N,M)) */
- if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) {
- memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
- }
- dot = (PyArray_DESCR(ret)->f->dotfunc);
- if (dot == NULL) {
- PyErr_SetString(PyExc_ValueError,
- "dot not available for this type");
- goto fail;
- }
- is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1) - 1];
- is2 = PyArray_STRIDES(ap2)[PyArray_NDIM(ap2) - 1];
- op = PyArray_DATA(ret);
- os = PyArray_DESCR(ret)->elsize;
- axis = PyArray_NDIM(ap1) - 1;
- it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis);
- axis = PyArray_NDIM(ap2) - 1;
- it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis);
- NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2));
- while (it1->index < it1->size) {
- while (it2->index < it2->size) {
- dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret);
- op += os;
- PyArray_ITER_NEXT(it2);
- }
- PyArray_ITER_NEXT(it1);
- PyArray_ITER_RESET(it2);
- }
- NPY_END_THREADS_DESCR(PyArray_DESCR(ap2));
- Py_DECREF(it1);
- Py_DECREF(it2);
- if (PyErr_Occurred()) {
- goto fail;
- }
+
Py_DECREF(ap1);
Py_DECREF(ap2);
- return (PyObject *)ret;
+ Py_DECREF(ap2t);
+ return ret;
fail:
Py_XDECREF(ap1);
Py_XDECREF(ap2);
+ Py_XDECREF(ap2t);
Py_XDECREF(ret);
return NULL;
}
diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src
index 7f62810..4dcb019 100644
--- a/numpy/core/src/npymath/npy_math.c.src
+++ b/numpy/core/src/npymath/npy_math.c.src
@@ -221,7 +221,20 @@
#ifndef HAVE_ACOSH
double npy_acosh(double x)
{
- return 2*npy_log(npy_sqrt((x + 1.0)/2) + npy_sqrt((x - 1.0)/2));
+ if (x < 1.0) {
+ return NPY_NAN;
+ }
+
+ if (npy_isfinite(x)) {
+ if (x > 1e8) {
+ return npy_log(x) + NPY_LOGE2;
+ }
+ else {
+ double u = x - 1.0;
+ return npy_log1p(u + npy_sqrt(2*u + u*u));
+ }
+ }
+ return x;
}
#endif
@@ -260,6 +273,9 @@
#endif
#ifndef HAVE_RINT
+#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64)
+#pragma optimize("", off)
+#endif
double npy_rint(double x)
{
double y, r;
@@ -280,6 +296,9 @@
}
return y;
}
+#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64)
+#pragma optimize("", on)
+#endif
#endif
#ifndef HAVE_TRUNC
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index fa20eb4..eb9c1e1 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -93,6 +93,12 @@
#undef HAVE_CATANH
#undef HAVE_CATANHF
#undef HAVE_CATANHL
+#undef HAVE_CACOS
+#undef HAVE_CACOSF
+#undef HAVE_CACOSL
+#undef HAVE_CACOSH
+#undef HAVE_CACOSHF
+#undef HAVE_CACOSHL
#endif
#undef TRIG_OK
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index aff6180..fc9ffec 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1558,14 +1558,11 @@
/**begin repeat1
* #kind = isnan, isinf, isfinite, signbit#
* #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit#
- * #isnan = 1, 0*3#
**/
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- char * margs[] = {args[0], args[0], args[1]};
- npy_intp msteps[] = {steps[0], steps[0], steps[1]};
- if (!@isnan@ || !run_binary_simd_not_equal_@TYPE@(margs, dimensions, msteps)) {
+ if (!run_@kind@_simd_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 84695f5..5da87ef 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -25,6 +25,7 @@
#endif
#include <assert.h>
#include <stdlib.h>
+#include <float.h>
#include <string.h> /* for memcpy */
/* Figure out the right abs function for pointer addresses */
@@ -259,6 +260,32 @@
/**end repeat1**/
+/**begin repeat1
+ * #kind = isnan, isfinite, isinf, signbit#
+ */
+
+#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
+
+static void
+sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n);
+
+#endif
+
+static NPY_INLINE int
+run_@kind@_simd_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (steps[0] == sizeof(@type@) && steps[1] == 1 &&
+ npy_is_aligned(args[0], sizeof(@type@))) {
+ sse2_@kind@_@TYPE@((npy_bool*)args[1], (@type@*)args[0], dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
/**end repeat**/
/*
@@ -528,11 +555,104 @@
#endif
}
+static void
+sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = npy_signbit(ip1[i]) != 0;
+ }
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ int r = @vpre@_movemask_@vsuf@(a);
+ if (sizeof(@type@) == 8) {
+ op[i] = r & 1;
+ op[i + 1] = (r >> 1);
+ }
+ else {
+ op[i] = r & 1;
+ op[i + 1] = (r >> 1) & 1;
+ op[i + 2] = (r >> 2) & 1;
+ op[i + 3] = (r >> 3);
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = npy_signbit(ip1[i]) != 0;
+ }
+}
+
+/**begin repeat1
+ * #kind = isnan, isfinite, isinf#
+ * #var = 0, 1, 2#
+ */
+
+static void
+sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
+{
+#if @var@ != 0 /* isinf/isfinite */
+ /* signbit mask 0x7FFFFFFF after andnot */
+ const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@);
+ const @vtype@ ones = @vpre@_cmpeq_@vsuf@(@vpre@_setzero_@vsuf@(),
+ @vpre@_setzero_@vsuf@());
+#if @double@
+ const @vtype@ fltmax = @vpre@_set1_@vsuf@(DBL_MAX);
+#else
+ const @vtype@ fltmax = @vpre@_set1_@vsuf@(FLT_MAX);
+#endif
+#endif
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = npy_@kind@(ip1[i]) != 0;
+ }
+ LOOP_BLOCKED(@type@, 64) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ r1, r2, r3, r4;
+#if @var@ != 0 /* isinf/isfinite */
+ /* fabs via masking of sign bit */
+ r1 = @vpre@_andnot_@vsuf@(mask, a);
+ r2 = @vpre@_andnot_@vsuf@(mask, b);
+ r3 = @vpre@_andnot_@vsuf@(mask, c);
+ r4 = @vpre@_andnot_@vsuf@(mask, d);
+#if @var@ == 1 /* isfinite */
+ /* negative compare against max float, nan is always true */
+ r1 = @vpre@_cmpnle_@vsuf@(r1, fltmax);
+ r2 = @vpre@_cmpnle_@vsuf@(r2, fltmax);
+ r3 = @vpre@_cmpnle_@vsuf@(r3, fltmax);
+ r4 = @vpre@_cmpnle_@vsuf@(r4, fltmax);
+#else /* isinf */
+ r1 = @vpre@_cmpnlt_@vsuf@(fltmax, r1);
+ r2 = @vpre@_cmpnlt_@vsuf@(fltmax, r2);
+ r3 = @vpre@_cmpnlt_@vsuf@(fltmax, r3);
+ r4 = @vpre@_cmpnlt_@vsuf@(fltmax, r4);
+#endif
+ /* flip results to what we want (andnot as there is no sse not) */
+ r1 = @vpre@_andnot_@vsuf@(r1, ones);
+ r2 = @vpre@_andnot_@vsuf@(r2, ones);
+ r3 = @vpre@_andnot_@vsuf@(r3, ones);
+ r4 = @vpre@_andnot_@vsuf@(r4, ones);
+#endif
+#if @var@ == 0 /* isnan */
+ r1 = @vpre@_cmpneq_@vsuf@(a, a);
+ r2 = @vpre@_cmpneq_@vsuf@(b, b);
+ r3 = @vpre@_cmpneq_@vsuf@(c, c);
+ r4 = @vpre@_cmpneq_@vsuf@(d, d);
+#endif
+ sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = npy_@kind@(ip1[i]) != 0;
+ }
+ /* silence exceptions from comparisons */
+ npy_clear_floatstatus();
+}
+
+/**end repeat1**/
+
/**begin repeat1
* #kind = equal, not_equal, less, less_equal, greater, greater_equal#
* #OP = ==, !=, <, <=, >, >=#
* #VOP = cmpeq, cmpneq, cmplt, cmple, cmpgt, cmpge#
- * #neq = 0, 1, 0*4#
*/
/* sets invalid fpu flag on QNaN for consistency with packed compare */
@@ -554,36 +674,20 @@
LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
}
- /* isnan special unary case */
- if (@neq@ && ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a, a);
- @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, b);
- @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, c);
- @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d, d);
- sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
- }
- }
- else {
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
- @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
- @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
- @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2);
- sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
- }
+ LOOP_BLOCKED(@type@, 64) {
+ @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
+ @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
+ @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
+ @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2);
+ sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
}
LOOP_BLOCKED_END {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 5fa2818..8a8eafe 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -571,9 +571,9 @@
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
- assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
- assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
@@ -1524,6 +1524,12 @@
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
+ assert_equal(
+ np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
+ np.datetime64('2010-10-18'))
+ assert_equal(
+ np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
+ np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 9ef3164..e828b87 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -680,15 +680,15 @@
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
- assert sl1.base is arr
- assert sl1.base.base is arr.base
+ assert_(sl1.base is arr)
+ assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
- assert sl2.base is arr
- assert sl2.base.base is arr.base
+ assert_(sl2.base is arr)
+ assert_(sl2.base.base is arr.base)
- assert arr[0, 0] == asbytes('abc')
+ assert_(arr[0, 0] == asbytes('abc'))
def test_empty_indexing():
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8f7e55d..65ddc1e 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -89,7 +89,7 @@
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [w.category for w in self.log]
- raise AssertionError("\n".join([msg] + [lst]))
+ raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
@@ -163,8 +163,8 @@
class TestComparisonDeprecations(_DeprecationTestCase):
- """This tests the deprecation, for non-elementwise comparison logic.
- This used to mean that when an error occured during element-wise comparison
+ """This tests the deprecation, for non-element-wise comparison logic.
+ This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
@@ -192,13 +192,13 @@
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
- # The empty list is not cast to string, this is only to document
+ # The empty list is not cast to string, as this is only to document
# that fact (it likely should be changed). This means that the
# following works (and returns False) due to dtype mismatch:
a == []
def test_none_comparison(self):
- # Test comparison of None, which should result in elementwise
+ # Test comparison of None, which should result in element-wise
# comparison in the future. [1, 2] == None should be [False, False].
with warnings.catch_warnings():
warnings.filterwarnings('always', '', FutureWarning)
@@ -211,7 +211,7 @@
assert_raises(FutureWarning, operator.ne, np.arange(3), None)
def test_scalar_none_comparison(self):
- # Scalars should still just return false and not give a warnings.
+ # Scalars should still just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
@@ -226,9 +226,9 @@
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
- # For documentaiton purpose, this is why the datetime is dubious.
+ # For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
- # it has to be considered when the deprecations is done.
+ # it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
def test_void_dtype_equality_failures(self):
@@ -277,7 +277,7 @@
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
- assert not l
+ assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
@@ -338,8 +338,8 @@
class TestAlterdotRestoredotDeprecations(_DeprecationTestCase):
"""The alterdot/restoredot functions are deprecated.
- These functions no longer do anything in numpy 1.10, so should not be
- used.
+ These functions no longer do anything in numpy 1.10, so
+ they should not be used.
"""
@@ -350,7 +350,7 @@
class TestBooleanIndexShapeMismatchDeprecation():
"""Tests deprecation for boolean indexing where the boolean array
- does not match the input array along the given diemsions.
+ does not match the input array along the given dimensions.
"""
message = r"boolean index did not match indexed array"
@@ -400,5 +400,48 @@
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
+class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
+ """Invalid arguments to the ORDER parameter in array.flatten() should not be
+ allowed and should raise an error. However, in the interests of not breaking
+ code that may inadvertently pass invalid arguments to this parameter, a
+ DeprecationWarning will be issued instead for the time being to give developers
+ time to refactor relevant code.
+ """
+
+ def test_flatten_array_non_string_arg(self):
+ x = np.zeros((3, 5))
+ self.message = ("Non-string object detected for "
+ "the array ordering. Please pass "
+ "in 'C', 'F', 'A', or 'K' instead")
+ self.assert_deprecated(x.flatten, args=(np.pi,))
+
+ def test_flatten_array_invalid_string_arg(self):
+ # Tests that a DeprecationWarning is raised
+ # when a string of length greater than one
+ # starting with "C", "F", "A", or "K" (case-
+ # and unicode-insensitive) is passed in for
+ # the ORDER parameter. Otherwise, a TypeError
+ # will be raised!
+
+ x = np.zeros((3, 5))
+ self.message = ("Non length-one string passed "
+ "in for the array ordering. Please "
+ "pass in 'C', 'F', 'A', or 'K' instead")
+ self.assert_deprecated(x.flatten, args=("FACK",))
+
+
+class TestTestDeprecated(object):
+ def test_assert_deprecated(self):
+ test_case_instance = _DeprecationTestCase()
+ test_case_instance.setUp()
+ assert_raises(AssertionError,
+ test_case_instance.assert_deprecated,
+ lambda: None)
+
+ def foo():
+ warnings.warn("foo", category=DeprecationWarning)
+
+ test_case_instance.assert_deprecated(foo)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 38280d0..deb2130 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -895,10 +895,7 @@
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
- if len(indx[1:]) != 1:
- res = np.broadcast(*indx[1:]) # raises ValueError...
- else:
- res = indx[1]
+ res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index fcc79ec..1c561a4 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -1,12 +1,11 @@
from __future__ import division, absolute_import, print_function
import locale
-from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, dec, assert_raises,
- assert_array_equal, TestCase
+ assert_array_equal, TestCase, temppath,
)
from numpy.compat import sixu
from test_print import in_foreign_locale
@@ -109,66 +108,48 @@
class FileBased(TestCase):
- def setUp(self):
- self.o = 1 + np.finfo(np.longdouble).eps
- self.f = NamedTemporaryFile(mode="wt")
- def tearDown(self):
- self.f.close()
- del self.f
+ ldbl = 1 + np.finfo(np.longdouble).eps
+ tgt = np.array([ldbl]*5)
+ out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
- self.f.write("1. 2. 3. flop 4.\n")
- self.f.flush()
- F = open(self.f.name, "rt")
- try:
- assert_equal(np.fromfile(F, dtype=float, sep=" "),
- np.array([1., 2., 3.]))
- finally:
- F.close()
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1. 2. 3. flop 4.\n")
+ res = np.fromfile(path, dtype=float, sep=" ")
+ assert_equal(res, np.array([1., 2., 3.]))
@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
def test_fromfile(self):
- for i in range(5):
- self.f.write(repr(self.o) + "\n")
- self.f.flush()
- a = np.array([self.o]*5)
- F = open(self.f.name, "rt")
- b = np.fromfile(F,
- dtype=np.longdouble,
- sep="\n")
- F.close()
- F = open(self.f.name, "rt")
- s = F.read()
- F.close()
- assert_equal(b, a, err_msg="decoded %s as %s" % (repr(s), repr(b)))
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.fromfile(path, dtype=np.longdouble, sep="\n")
+ assert_equal(res, self.tgt)
@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
def test_genfromtxt(self):
- for i in range(5):
- self.f.write(repr(self.o) + "\n")
- self.f.flush()
- a = np.array([self.o]*5)
- assert_equal(np.genfromtxt(self.f.name, dtype=np.longdouble), a)
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.genfromtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
def test_loadtxt(self):
- for i in range(5):
- self.f.write(repr(self.o) + "\n")
- self.f.flush()
- a = np.array([self.o]*5)
- assert_equal(np.loadtxt(self.f.name, dtype=np.longdouble), a)
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.loadtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
def test_tofile_roundtrip(self):
- a = np.array([self.o]*3)
- a.tofile(self.f.name, sep=" ")
- F = open(self.f.name, "rt")
- try:
- assert_equal(np.fromfile(F, dtype=np.longdouble, sep=" "),
- a)
- finally:
- F.close()
+ with temppath() as path:
+ self.tgt.tofile(path, sep=" ")
+ res = np.fromfile(path, dtype=np.longdouble, sep=" ")
+ assert_equal(res, self.tgt)
@in_foreign_locale
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 8d39fa4..82e66db 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -79,7 +79,8 @@
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
- assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)
+ assert_(np.all(arr == cpy),
+ 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
def test_overlapping_assignments():
@@ -109,17 +110,19 @@
numbers = []
while min(feasible_count, infeasible_count) < min_count:
# Ensure big and small integer problems
- A_max = 1 + rng.randint(0, 11)**6
- U_max = rng.randint(0, 11)**6
+ A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
+ U_max = rng.randint(0, 11, dtype=np.intp)**6
A_max = min(max_int, A_max)
U_max = min(max_int-1, U_max)
- A = tuple(rng.randint(1, A_max+1) for j in range(ndim))
- U = tuple(rng.randint(0, U_max+2) for j in range(ndim))
+ A = tuple(rng.randint(1, A_max+1, dtype=np.intp)
+ for j in range(ndim))
+ U = tuple(rng.randint(0, U_max+2, dtype=np.intp)
+ for j in range(ndim))
b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
- b = rng.randint(-1, b_ub+2)
+ b = rng.randint(-1, b_ub+2, dtype=np.intp)
if ndim == 0 and feasible_count < min_count:
b = 0
@@ -129,7 +132,7 @@
if X is None:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
- assert X_simplified is None, (A, U, b, X_simplified)
+ assert_(X_simplified is None, (A, U, b, X_simplified))
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
@@ -149,7 +152,7 @@
else:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
- assert X_simplified is not None, (A, U, b, X_simplified)
+ assert_(X_simplified is not None, (A, U, b, X_simplified))
# Check validity
assert_(sum(a*x for a, x in zip(A, X)) == b)
@@ -257,9 +260,9 @@
rng = np.random.RandomState(1234)
def random_slice(n, step):
- start = rng.randint(0, n+1)
- stop = rng.randint(start, n+1)
- if rng.randint(0, 2) == 0:
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
@@ -268,12 +271,14 @@
infeasible = 0
while min(feasible, infeasible) < min_count:
- steps = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
if same_steps:
steps2 = steps
else:
- steps2 = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1
+ steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
t1 = np.arange(x.ndim)
@@ -373,9 +378,9 @@
rng = np.random.RandomState(1234)
def random_slice(n, step):
- start = rng.randint(0, n+1)
- stop = rng.randint(start, n+1)
- if rng.randint(0, 2) == 0:
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
@@ -384,14 +389,15 @@
min_count = 5000
while cases < min_count:
- steps = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
a = x[s1].transpose(t1)
- assert not internal_overlap(a)
+ assert_(not internal_overlap(a))
cases += 1
@@ -468,10 +474,12 @@
rng = np.random.RandomState(1234)
while min(overlap, no_overlap) < min_count:
- ndim = rng.randint(1, 4)
+ ndim = rng.randint(1, 4, dtype=np.intp)
- strides = tuple(rng.randint(-1000, 1000) for j in range(ndim))
- shape = tuple(rng.randint(1, 30) for j in range(ndim))
+ strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
+ for j in range(ndim))
+ shape = tuple(rng.randint(1, 30, dtype=np.intp)
+ for j in range(ndim))
a = as_strided(x, strides=strides, shape=shape)
result = check_internal_overlap(a)
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 1585586..e41758c 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -103,28 +103,28 @@
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
- assert tmp._mmap is not fp._mmap
+ assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
if isinstance(tmp, memmap):
- assert tmp._mmap is not fp._mmap
+ assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
- assert fp[:2, :2]._mmap is fp._mmap
+ assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
- assert(new1.base is fp)
- assert(new2.base is fp)
+ assert_(new1.base is fp)
+ assert_(new2.base is fp)
new_array = asarray(fp)
- assert(new_array.base is fp)
+ assert_(new_array.base is fp)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 6938472..c9e610c 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -9,6 +9,7 @@
import io
import itertools
import ctypes
+import os
if sys.version_info[0] >= 3:
import builtins
else:
@@ -1935,7 +1936,80 @@
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
+ d = np.arange(24).reshape(4, 6)
+ ddt = np.array(
+ [[ 55, 145, 235, 325],
+ [ 145, 451, 757, 1063],
+ [ 235, 757, 1279, 1801],
+ [ 325, 1063, 1801, 2539]]
+ )
+ dtd = np.array(
+ [[504, 540, 576, 612, 648, 684],
+ [540, 580, 620, 660, 700, 740],
+ [576, 620, 664, 708, 752, 796],
+ [612, 660, 708, 756, 804, 852],
+ [648, 700, 752, 804, 856, 908],
+ [684, 740, 796, 852, 908, 964]]
+ )
+
+ # gemm vs syrk optimizations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ eaf = a.astype(et)
+ assert_equal(np.dot(eaf, eaf), eaf)
+ assert_equal(np.dot(eaf.T, eaf), eaf)
+ assert_equal(np.dot(eaf, eaf.T), eaf)
+ assert_equal(np.dot(eaf.T, eaf.T), eaf)
+ assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
+ assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
+ assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
+
+ # syrk validations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ eaf = a.astype(et)
+ ebf = b.astype(et)
+ assert_equal(np.dot(ebf, ebf), eaf)
+ assert_equal(np.dot(ebf.T, ebf), eaf)
+ assert_equal(np.dot(ebf, ebf.T), eaf)
+ assert_equal(np.dot(ebf.T, ebf.T), eaf)
+
+ # syrk - different shape, stride, and view validations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ edf = d.astype(et)
+ assert_equal(
+ np.dot(edf[::-1, :], edf.T),
+ np.dot(edf[::-1, :].copy(), edf.T.copy())
+ )
+ assert_equal(
+ np.dot(edf[:, ::-1], edf.T),
+ np.dot(edf[:, ::-1].copy(), edf.T.copy())
+ )
+ assert_equal(
+ np.dot(edf, edf[::-1, :].T),
+ np.dot(edf, edf[::-1, :].T.copy())
+ )
+ assert_equal(
+ np.dot(edf, edf[:, ::-1].T),
+ np.dot(edf, edf[:, ::-1].T.copy())
+ )
+ assert_equal(
+ np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
+ np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
+ )
+ assert_equal(
+ np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
+ np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
+ )
+
+ # syrk - different shape
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ edf = d.astype(et)
+ eddtf = ddt.astype(et)
+ edtdf = dtd.astype(et)
+ assert_equal(np.dot(edf, edf.T), eddtf)
+ assert_equal(np.dot(edf.T, edf), edtdf)
+
+ # function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
@@ -1967,6 +2041,13 @@
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
+ def test_dot_type_mismatch(self):
+ c = 1.
+ A = np.array((1,1), dtype='i,i')
+
+ assert_raises(ValueError, np.dot, c, A)
+ assert_raises(TypeError, np.dot, A, c)
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
@@ -2009,6 +2090,33 @@
a.diagonal()
assert_(sys.getrefcount(a) < 50)
+ def test_trace(self):
+ a = np.arange(12).reshape((3, 4))
+ assert_equal(a.trace(), 15)
+ assert_equal(a.trace(0), 15)
+ assert_equal(a.trace(1), 18)
+ assert_equal(a.trace(-1), 13)
+
+ b = np.arange(8).reshape((2, 2, 2))
+ assert_equal(b.trace(), [6, 8])
+ assert_equal(b.trace(0), [6, 8])
+ assert_equal(b.trace(1), [2, 3])
+ assert_equal(b.trace(-1), [4, 5])
+ assert_equal(b.trace(0, 0, 1), [6, 8])
+ assert_equal(b.trace(0, 0, 2), [5, 9])
+ assert_equal(b.trace(0, 1, 2), [3, 11])
+ assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
+
+ def test_trace_subclass(self):
+ # The class would need to overwrite trace to ensure single-element
+ # output also has the right subclass.
+ class MyArray(np.ndarray):
+ pass
+
+ b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
+ t = b.trace()
+ assert isinstance(t, MyArray)
+
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
@@ -3377,6 +3485,18 @@
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
+ def test_largish_file(self):
+ # check the fallocate path on files > 16MB
+ d = np.zeros(4 * 1024 ** 2)
+ d.tofile(self.filename)
+ assert_equal(os.path.getsize(self.filename), d.nbytes)
+ assert_array_equal(d, np.fromfile(self.filename));
+ # check offset
+ with open(self.filename, "r+b") as f:
+ f.seek(d.nbytes)
+ d.tofile(f)
+ assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
@@ -3588,8 +3708,8 @@
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
- assert testpassed
- assert self.a.flat[12] == 12.0
+ assert_(testpassed)
+ assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
@@ -3597,8 +3717,8 @@
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
- assert testpassed
- assert self.b.flat[4] == 12.0
+ assert_(testpassed)
+ assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
@@ -3606,16 +3726,16 @@
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
- assert c.flags.writeable is False
- assert d.flags.writeable is False
- assert e.flags.writeable is True
- assert f.flags.writeable is True
+ assert_(c.flags.writeable is False)
+ assert_(d.flags.writeable is False)
+ assert_(e.flags.writeable is True)
+ assert_(f.flags.writeable is True)
- assert c.flags.updateifcopy is False
- assert d.flags.updateifcopy is False
- assert e.flags.updateifcopy is False
- assert f.flags.updateifcopy is True
- assert f.base is self.b0
+ assert_(c.flags.updateifcopy is False)
+ assert_(d.flags.updateifcopy is False)
+ assert_(e.flags.updateifcopy is False)
+ assert_(f.flags.updateifcopy is True)
+ assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
@@ -4712,6 +4832,29 @@
class TestInner(TestCase):
+ def test_inner_type_mismatch(self):
+ c = 1.
+ A = np.array((1,1), dtype='i,i')
+
+ assert_raises(TypeError, np.inner, c, A)
+ assert_raises(TypeError, np.inner, A, c)
+
+ def test_inner_scalar_and_vector(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ vec = np.array([1, 2], dtype=dt)
+ desired = np.array([3, 6], dtype=dt)
+ assert_equal(np.inner(vec, sca), desired)
+ assert_equal(np.inner(sca, vec), desired)
+
+ def test_inner_scalar_and_matrix(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
+ assert_equal(np.inner(arr, sca), desired)
+ assert_equal(np.inner(sca, arr), desired)
+
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
@@ -4736,13 +4879,49 @@
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
+ assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
+ assert_equal(np.inner(C, B), desired)
+ # check a matrix product
+ desired = np.array([[7, 10], [15, 22]], dtype=dt)
+ assert_equal(np.inner(A, B), desired)
+ # check the syrk vs. gemm paths
+ desired = np.array([[5, 11], [11, 25]], dtype=dt)
+ assert_equal(np.inner(A, A), desired)
+ assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
+ def test_3d_tensor(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ a = np.arange(24).reshape(2,3,4).astype(dt)
+ b = np.arange(24, 48).reshape(2,3,4).astype(dt)
+ desired = np.array(
+ [[[[ 158, 182, 206],
+ [ 230, 254, 278]],
+
+ [[ 566, 654, 742],
+ [ 830, 918, 1006]],
+
+ [[ 974, 1126, 1278],
+ [1430, 1582, 1734]]],
+
+ [[[1382, 1598, 1814],
+ [2030, 2246, 2462]],
+
+ [[1790, 2070, 2350],
+ [2630, 2910, 3190]],
+
+ [[2198, 2542, 2886],
+ [3230, 3574, 3918]]]],
+ dtype=dt
+ )
+ assert_equal(np.inner(a, b), desired)
+ assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
+
class TestSummarization(TestCase):
def test_1d(self):
@@ -5440,14 +5619,14 @@
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
- assert memoryview(c).strides == (800, 80, 8)
+ assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
- assert memoryview(fortran).strides == (8, 80, 800)
+ assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 43dad42..a114d5a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -11,7 +11,8 @@
from numpy.random import rand, randint, randn
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec
+ assert_raises_regex, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, dec
)
@@ -234,6 +235,31 @@
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
+ self.inff = self.f.copy()
+ self.infd = self.d.copy()
+ self.inff[::3][self.ef[::3]] = np.inf
+ self.infd[::3][self.ed[::3]] = np.inf
+ self.inff[1::3][self.ef[1::3]] = -np.inf
+ self.infd[1::3][self.ed[1::3]] = -np.inf
+ self.inff[2::3][self.ef[2::3]] = np.nan
+ self.infd[2::3][self.ed[2::3]] = np.nan
+ self.efnonan = self.ef.copy()
+ self.efnonan[2::3] = False
+ self.ednonan = self.ed.copy()
+ self.ednonan[2::3] = False
+
+ self.signf = self.f.copy()
+ self.signd = self.d.copy()
+ self.signf[self.ef] *= -1.
+ self.signd[self.ed] *= -1.
+ self.signf[1::6][self.ef[1::6]] = -np.inf
+ self.signd[1::6][self.ed[1::6]] = -np.inf
+ self.signf[3::6][self.ef[3::6]] = -np.nan
+ self.signd[3::6][self.ed[3::6]] = -np.nan
+ self.signf[4::6][self.ef[4::6]] = -0.
+ self.signd[4::6][self.ed[4::6]] = -0.
+
+
def test_float(self):
# offset for alignment test
for i in range(4):
@@ -255,6 +281,10 @@
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
+ assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
+ assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
+ assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
+ assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
@@ -277,6 +307,10 @@
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
+ assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
+ assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(TestCase):
@@ -328,8 +362,8 @@
def log_err(*args):
self.called += 1
extobj_err = args
- assert (len(extobj_err) == 2)
- assert ("divide" in extobj_err[0])
+ assert_(len(extobj_err) == 2)
+ assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
@@ -2029,6 +2063,80 @@
assert_(not res.flags['OWNDATA'])
+class TestMoveaxis(TestCase):
+ def test_move_to_end(self):
+ x = np.random.randn(5, 6, 7)
+ for source, expected in [(0, (6, 7, 5)),
+ (1, (5, 7, 6)),
+ (2, (5, 6, 7)),
+ (-1, (5, 6, 7))]:
+ actual = np.moveaxis(x, source, -1).shape
+ assert_(actual, expected)
+
+ def test_move_new_position(self):
+ x = np.random.randn(1, 2, 3, 4)
+ for source, destination, expected in [
+ (0, 1, (2, 1, 3, 4)),
+ (1, 2, (1, 3, 2, 4)),
+ (1, -1, (1, 3, 4, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_preserve_order(self):
+ x = np.zeros((1, 2, 3, 4))
+ for source, destination in [
+ (0, 0),
+ (3, -1),
+ (-1, 3),
+ ([0, -1], [0, -1]),
+ ([2, 0], [2, 0]),
+ (range(4), range(4)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, (1, 2, 3, 4))
+
+ def test_move_multiples(self):
+ x = np.zeros((0, 1, 2, 3))
+ for source, destination, expected in [
+ ([0, 1], [2, 3], (2, 3, 0, 1)),
+ ([2, 3], [0, 1], (2, 3, 0, 1)),
+ ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
+ ([3, 0], [1, 0], (0, 3, 1, 2)),
+ ([0, 3], [0, 1], (0, 3, 1, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_errors(self):
+ x = np.random.randn(1, 2, 3)
+ assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ np.moveaxis, x, 3, 0)
+ assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ np.moveaxis, x, -4, 0)
+ assert_raises_regex(ValueError, 'invalid axis .* `destination`',
+ np.moveaxis, x, 0, 5)
+ assert_raises_regex(ValueError, 'repeated axis in `source`',
+ np.moveaxis, x, [0, 0], [0, 1])
+ assert_raises_regex(ValueError, 'repeated axis in `destination`',
+ np.moveaxis, x, [0, 1], [1, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, 0, [0, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, [0, 1], [0])
+
+ def test_array_likes(self):
+ x = np.ma.zeros((1, 2, 3))
+ result = np.moveaxis(x, 0, 0)
+ assert_(x.shape, result.shape)
+ assert_(isinstance(result, np.ma.MaskedArray))
+
+ x = [1, 2, 3]
+ result = np.moveaxis(x, 0, 0)
+ assert_(x, list(result))
+ assert_(isinstance(result, np.ndarray))
+
+
class TestCross(TestCase):
def test_2x2(self):
u = [1, 2]
@@ -2207,11 +2315,20 @@
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
+ def test_broadcast_single_arg(self):
+ # gh-6899
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.nd, 3)
+ assert_equal(mit.numiter, 1)
+ assert_(arrs[0] is mit.iters[0].base)
+
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
- if j < 2 or j > 32:
+ if j < 1 or j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index e0f0a3a..9fbdf51 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -122,13 +122,20 @@
assert_equal(rv.dtype.type, np.record)
#check that getitem also preserves np.recarray and np.record
- r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
+ r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
assert_equal(r[['a', 'b']].dtype.type, np.record)
assert_equal(type(r[['a', 'b']]), np.recarray)
+ #and that it preserves subclasses (gh-6949)
+ class C(np.recarray):
+ pass
+
+ c = r.view(C)
+ assert_equal(type(c['c']), C)
+
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index ac34cfa..a61e64d 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -2177,5 +2177,11 @@
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
+ def test_void_compare_segfault(self):
+ # gh-6922. The following should not segfault
+ a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
+ a.sort()
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index a2ca3e4..e8cf7fd 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -2,9 +2,10 @@
""" Test printing of scalar types.
"""
+from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import TestCase, run_module_suite, assert_
class A(object):
@@ -25,17 +26,17 @@
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
- assert str(x) == '1.0'
+ assert_(str(x) == '1.0')
y = C(2.0)
- assert str(y) == '2.0'
+ assert_(str(y) == '2.0')
z = D(3.0)
- assert str(z) == '3.0'
+ assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
- assert str(x) == '1.0'
+ assert_(str(x) == '1.0')
y = C0(2.0)
- assert str(y) == '2.0'
+ assert_(str(y) == '2.0')
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index cba0838..0d163c1 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -295,8 +295,8 @@
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
- assert stack([[], [], []]).shape == (3, 0)
- assert stack([[], [], []], axis=1).shape == (0, 3)
+ assert_(stack([[], [], []]).shape == (3, 0))
+ assert_(stack([[], [], []], axis=1).shape == (0, 3))
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 934d91e..eb09853 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -37,17 +37,17 @@
class TestUfunc(TestCase):
def test_pickle(self):
import pickle
- assert pickle.loads(pickle.dumps(np.sin)) is np.sin
+ assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace such as
# numpy.core.test_rational.test_add can also be pickled
- assert pickle.loads(pickle.dumps(test_add)) is test_add
+ assert_(pickle.loads(pickle.dumps(test_add)) is test_add)
def test_pickle_withstring(self):
import pickle
astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
- assert pickle.loads(astring) is np.cos
+ assert_(pickle.loads(astring) is np.cos)
def test_reduceat_shifting_sum(self):
L = 6
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 9297185..766439d 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -18,6 +18,6 @@
_INSTALLED = False
if _INSTALLED:
- from numpy.testing import Tester
- test = Tester().test
- bench = Tester().bench
+ from numpy.testing.nosetester import _numpy_tester
+ test = _numpy_tester().test
+ bench = _numpy_tester().bench
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index ad235ed..2f2d63b 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -1,23 +1,22 @@
from __future__ import division, absolute_import, print_function
-import re
import os
+import re
import sys
import types
from copy import copy
-
-from distutils.ccompiler import *
from distutils import ccompiler
+from distutils.ccompiler import *
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
+from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args, get_num_build_jobs
-from numpy.distutils.compat import get_exception
def replace_method(klass, method_name, func):
@@ -634,7 +633,6 @@
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
-import re
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 7463a0e..2efcdea 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -1,4 +1,4 @@
-""" Build swig, f2py, pyrex sources.
+""" Build swig and f2py sources.
"""
from __future__ import division, absolute_import, print_function
@@ -13,12 +13,6 @@
from distutils.util import get_platform
from distutils.errors import DistutilsError, DistutilsSetupError
-def have_pyrex():
- try:
- import Pyrex.Compiler.Main
- return True
- except ImportError:
- return False
# this import can't be done here, as it uses numpy stuff only available
# after it's installed
@@ -327,13 +321,9 @@
self.ext_target_dir = self.get_package_dir(package)
sources = self.generate_sources(sources, ext)
-
sources = self.template_sources(sources, ext)
-
sources = self.swig_sources(sources, ext)
-
sources = self.f2py_sources(sources, ext)
-
sources = self.pyrex_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
@@ -450,6 +440,7 @@
return new_sources
def pyrex_sources(self, sources, extension):
+ """Pyrex not supported; this remains for Cython support (see below)"""
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
@@ -464,34 +455,12 @@
return new_sources
def generate_a_pyrex_source(self, base, ext_name, source, extension):
- if self.inplace or not have_pyrex():
- target_dir = os.path.dirname(base)
- else:
- target_dir = appendpath(self.build_src, os.path.dirname(base))
- target_file = os.path.join(target_dir, ext_name + '.c')
- depends = [source] + extension.depends
- if self.force or newer_group(depends, target_file, 'newer'):
- if have_pyrex():
- import Pyrex.Compiler.Main
- log.info("pyrexc:> %s" % (target_file))
- self.mkpath(target_dir)
- options = Pyrex.Compiler.Main.CompilationOptions(
- defaults=Pyrex.Compiler.Main.default_options,
- include_path=extension.include_dirs,
- output_file=target_file)
- pyrex_result = Pyrex.Compiler.Main.compile(source,
- options=options)
- if pyrex_result.num_errors != 0:
- raise DistutilsError("%d errors while compiling %r with Pyrex" \
- % (pyrex_result.num_errors, source))
- elif os.path.isfile(target_file):
- log.warn("Pyrex required for compiling %r but not available,"\
- " using old target %r"\
- % (source, target_file))
- else:
- raise DistutilsError("Pyrex required for compiling %r"\
- " but notavailable" % (source,))
- return target_file
+ """Pyrex is not supported, but some projects monkeypatch this method.
+
+ That allows compiling Cython code, see gh-6955.
+ This method will remain here for compatibility reasons.
+ """
+ return []
def f2py_sources(self, sources, extension):
new_sources = []
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 2dd08e7..c4f15a0 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -55,8 +55,8 @@
def get_flags(self):
return ['-fPIC']
- def get_flags_opt(self):
- return ['-xhost -openmp -fp-model strict']
+ def get_flags_opt(self): # Scipy test failures with -O2
+ return ['-xhost -openmp -fp-model strict -O1']
def get_flags_arch(self):
return []
@@ -119,8 +119,8 @@
def get_flags(self):
return ['-fPIC']
- def get_flags_opt(self):
- return ['-openmp -fp-model strict']
+ def get_flags_opt(self): # Scipy test failures with -O2
+ return ['-openmp -fp-model strict -O1']
def get_flags_arch(self):
return ['-xSSE4.2']
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
index 636165b..c53f455 100644
--- a/numpy/distutils/msvc9compiler.py
+++ b/numpy/distutils/msvc9compiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import distutils.msvc9compiler
from distutils.msvc9compiler import *
diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py
index 4c3658d..78a386d 100644
--- a/numpy/distutils/msvccompiler.py
+++ b/numpy/distutils/msvccompiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import distutils.msvccompiler
from distutils.msvccompiler import *
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 1c801fd..fe64709 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -366,7 +366,7 @@
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
- >>> print npymath_info
+ >>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 9443624..dde18df 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -677,11 +677,6 @@
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
- # Debian and Ubuntu added a g3f suffix to shared library to deal with
- # g77 -> gfortran ABI transition
- # XXX: disabled, it hides more problem than it solves.
- #if sys.platform[:5] == 'linux':
- # exts.append('.so.3gf')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
@@ -995,13 +990,10 @@
l = 'mkl' # use shared library
if cpu.is_Itanium():
plt = '64'
- #l = 'mkl_ipf'
elif cpu.is_Xeon():
plt = 'intel64'
- #l = 'mkl_intel64'
else:
plt = '32'
- #l = 'mkl_ia32'
if l not in self._lib_mkl:
self._lib_mkl.insert(0, l)
system_info.__init__(
@@ -1243,8 +1235,6 @@
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
- #if sys.platfcorm[:7] == 'freebsd':
- ## I don't think freebsd supports 3.10 at this time - 2014
_lib_atlas = _lib_names
_lib_lapack = _lib_names
@@ -1535,7 +1525,6 @@
('HAVE_CBLAS', None)])
return
- #atlas_info = {} ## uncomment for testing
need_lapack = 0
need_blas = 0
info = {}
@@ -1567,7 +1556,6 @@
if need_blas:
blas_info = get_info('blas')
- #blas_info = {} ## uncomment for testing
if blas_info:
dict_append(info, **blas_info)
else:
@@ -1941,13 +1929,6 @@
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
-## try:
-## macros.append(
-## (self.modulename.upper()+'_VERSION_HEX',
-## hex(vstr2hex(module.__version__))),
-## )
-## except Exception as msg:
-## print msg
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
@@ -2322,17 +2303,6 @@
self.set_info(**info)
return
-## def vstr2hex(version):
-## bits = []
-## n = [24,16,8,4,0]
-## r = 0
-## for s in version.split('.'):
-## r |= int(s) << n[0]
-## del n[0]
-## return r
-
-#--------------------------------------------------------------------
-
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
diff --git a/numpy/distutils/tests/f2py_ext/__init__.py b/numpy/distutils/tests/f2py_ext/__init__.py
deleted file mode 100644
index 1d0f69b..0000000
--- a/numpy/distutils/tests/f2py_ext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py
deleted file mode 100644
index bb7d4bc..0000000
--- a/numpy/distutils/tests/f2py_ext/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('f2py_ext', parent_package, top_path)
- config.add_extension('fib2', ['src/fib2.pyf', 'src/fib1.f'])
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy/distutils/tests/f2py_ext/src/fib1.f
deleted file mode 100644
index cfbb1ee..0000000
--- a/numpy/distutils/tests/f2py_ext/src/fib1.f
+++ /dev/null
@@ -1,18 +0,0 @@
-C FILE: FIB1.F
- SUBROUTINE FIB(A,N)
-C
-C CALCULATE FIRST N FIBONACCI NUMBERS
-C
- INTEGER N
- REAL*8 A(N)
- DO I=1,N
- IF (I.EQ.1) THEN
- A(I) = 0.0D0
- ELSEIF (I.EQ.2) THEN
- A(I) = 1.0D0
- ELSE
- A(I) = A(I-1) + A(I-2)
- ENDIF
- ENDDO
- END
-C END FILE FIB1.F
diff --git a/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy/distutils/tests/f2py_ext/src/fib2.pyf
deleted file mode 100644
index 90a8cf0..0000000
--- a/numpy/distutils/tests/f2py_ext/src/fib2.pyf
+++ /dev/null
@@ -1,9 +0,0 @@
-! -*- f90 -*-
-python module fib2
- interface
- subroutine fib(a,n)
- real*8 dimension(n),intent(out),depend(n) :: a
- integer intent(in) :: n
- end subroutine fib
- end interface
-end python module fib2
diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
deleted file mode 100644
index 0e5bab9..0000000
--- a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from numpy.testing import TestCase, run_module_suite, assert_array_equal
-from f2py_ext import fib2
-
-class TestFib2(TestCase):
-
- def test_fib(self):
- assert_array_equal(fib2.fib(6), [0, 1, 1, 2, 3, 5])
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy/distutils/tests/f2py_f90_ext/__init__.py
deleted file mode 100644
index 1d0f69b..0000000
--- a/numpy/distutils/tests/f2py_f90_ext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy/distutils/tests/f2py_f90_ext/include/body.f90
deleted file mode 100644
index 90b44e2..0000000
--- a/numpy/distutils/tests/f2py_f90_ext/include/body.f90
+++ /dev/null
@@ -1,5 +0,0 @@
- subroutine bar13(a)
- !f2py intent(out) a
- integer a
- a = 13
- end subroutine bar13
diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py
deleted file mode 100644
index 7cca816..0000000
--- a/numpy/distutils/tests/f2py_f90_ext/setup.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('f2py_f90_ext', parent_package, top_path)
- config.add_extension('foo',
- ['src/foo_free.f90'],
- include_dirs=['include'],
- f2py_options=['--include_paths',
- config.paths('include')[0]]
- )
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90
deleted file mode 100644
index c7713be..0000000
--- a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90
+++ /dev/null
@@ -1,6 +0,0 @@
-module foo_free
-contains
-
-include "body.f90"
-
-end module foo_free
diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
deleted file mode 100644
index 499b9eb..0000000
--- a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from numpy.testing import TestCase, run_module_suite, assert_equal
-from f2py_f90_ext import foo
-
-class TestFoo(TestCase):
- def test_foo_free(self):
- assert_equal(foo.foo_free.bar13(), 13)
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/gen_ext/__init__.py b/numpy/distutils/tests/gen_ext/__init__.py
deleted file mode 100644
index 1d0f69b..0000000
--- a/numpy/distutils/tests/gen_ext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py
deleted file mode 100644
index de6b941..0000000
--- a/numpy/distutils/tests/gen_ext/setup.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-fib3_f = '''
-C FILE: FIB3.F
- SUBROUTINE FIB(A,N)
-C
-C CALCULATE FIRST N FIBONACCI NUMBERS
-C
- INTEGER N
- REAL*8 A(N)
-Cf2py intent(in) n
-Cf2py intent(out) a
-Cf2py depend(n) a
- DO I=1,N
- IF (I.EQ.1) THEN
- A(I) = 0.0D0
- ELSEIF (I.EQ.2) THEN
- A(I) = 1.0D0
- ELSE
- A(I) = A(I-1) + A(I-2)
- ENDIF
- ENDDO
- END
-C END FILE FIB3.F
-'''
-
-def source_func(ext, build_dir):
- import os
- from distutils.dep_util import newer
- target = os.path.join(build_dir, 'fib3.f')
- if newer(__file__, target):
- f = open(target, 'w')
- f.write(fib3_f)
- f.close()
- return [target]
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('gen_ext', parent_package, top_path)
- config.add_extension('fib3',
- [source_func]
- )
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py
deleted file mode 100644
index e02ca81..0000000
--- a/numpy/distutils/tests/gen_ext/tests/test_fib3.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from gen_ext import fib3
-from numpy.testing import TestCase, run_module_suite, assert_array_equal
-
-class TestFib3(TestCase):
- def test_fib(self):
- assert_array_equal(fib3.fib(6), [0, 1, 1, 2, 3, 5])
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy/distutils/tests/pyrex_ext/__init__.py
deleted file mode 100644
index 1d0f69b..0000000
--- a/numpy/distutils/tests/pyrex_ext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy/distutils/tests/pyrex_ext/primes.pyx
deleted file mode 100644
index 2ada0c5..0000000
--- a/numpy/distutils/tests/pyrex_ext/primes.pyx
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Calculate prime numbers
-#
-
-def primes(int kmax):
- cdef int n, k, i
- cdef int p[1000]
- result = []
- if kmax > 1000:
- kmax = 1000
- k = 0
- n = 2
- while k < kmax:
- i = 0
- while i < k and n % p[i] <> 0:
- i = i + 1
- if i == k:
- p[k] = n
- k = k + 1
- result.append(n)
- n = n + 1
- return result
diff --git a/numpy/distutils/tests/pyrex_ext/setup.py b/numpy/distutils/tests/pyrex_ext/setup.py
deleted file mode 100644
index 819dd31..0000000
--- a/numpy/distutils/tests/pyrex_ext/setup.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('pyrex_ext', parent_package, top_path)
- config.add_extension('primes',
- ['primes.pyx'])
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy/distutils/tests/pyrex_ext/tests/test_primes.py
deleted file mode 100644
index 1ae436b..0000000
--- a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from numpy.testing import TestCase, run_module_suite, assert_equal
-from pyrex_ext.primes import primes
-
-class TestPrimes(TestCase):
- def test_simple(self, level=1):
- l = primes(10)
- assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29])
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py
deleted file mode 100644
index 135de7c..0000000
--- a/numpy/distutils/tests/setup.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('testnumpydistutils', parent_package, top_path)
- config.add_subpackage('pyrex_ext')
- config.add_subpackage('f2py_ext')
- #config.add_subpackage('f2py_f90_ext')
- config.add_subpackage('swig_ext')
- config.add_subpackage('gen_ext')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/swig_ext/__init__.py b/numpy/distutils/tests/swig_ext/__init__.py
deleted file mode 100644
index 1d0f69b..0000000
--- a/numpy/distutils/tests/swig_ext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py
deleted file mode 100644
index f6e0730..0000000
--- a/numpy/distutils/tests/swig_ext/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('swig_ext', parent_package, top_path)
- config.add_extension('_example',
- ['src/example.i', 'src/example.c']
- )
- config.add_extension('_example2',
- ['src/zoo.i', 'src/zoo.cc'],
- depends=['src/zoo.h'],
- include_dirs=['src']
- )
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/tests/swig_ext/src/example.c b/numpy/distutils/tests/swig_ext/src/example.c
deleted file mode 100644
index be15172..0000000
--- a/numpy/distutils/tests/swig_ext/src/example.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* File : example.c */
-
-double My_variable = 3.0;
-
-/* Compute factorial of n */
-int fact(int n) {
- if (n <= 1) return 1;
- else return n*fact(n-1);
-}
-
-/* Compute n mod m */
-int my_mod(int n, int m) {
- return(n % m);
-}
diff --git a/numpy/distutils/tests/swig_ext/src/example.i b/numpy/distutils/tests/swig_ext/src/example.i
deleted file mode 100644
index f4fc11e..0000000
--- a/numpy/distutils/tests/swig_ext/src/example.i
+++ /dev/null
@@ -1,14 +0,0 @@
-/* -*- c -*- */
-
-/* File : example.i */
-%module example
-%{
-/* Put headers and other declarations here */
-extern double My_variable;
-extern int fact(int);
-extern int my_mod(int n, int m);
-%}
-
-extern double My_variable;
-extern int fact(int);
-extern int my_mod(int n, int m);
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy/distutils/tests/swig_ext/src/zoo.cc
deleted file mode 100644
index 0a643d1..0000000
--- a/numpy/distutils/tests/swig_ext/src/zoo.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-#include "zoo.h"
-#include <cstdio>
-#include <cstring>
-
-Zoo::Zoo()
-{
- n = 0;
-}
-
-void Zoo::shut_up(char *animal)
-{
- if (n < 10) {
- strcpy(animals[n], animal);
- n++;
- }
-}
-
-void Zoo::display()
-{
- int i;
- for(i = 0; i < n; i++)
- printf("%s\n", animals[i]);
-}
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy/distutils/tests/swig_ext/src/zoo.h
deleted file mode 100644
index cb26e6c..0000000
--- a/numpy/distutils/tests/swig_ext/src/zoo.h
+++ /dev/null
@@ -1,9 +0,0 @@
-
-class Zoo{
- int n;
- char animals[10][50];
-public:
- Zoo();
- void shut_up(char *animal);
- void display();
-};
diff --git a/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy/distutils/tests/swig_ext/src/zoo.i
deleted file mode 100644
index a029c03..0000000
--- a/numpy/distutils/tests/swig_ext/src/zoo.i
+++ /dev/null
@@ -1,10 +0,0 @@
-// -*- c++ -*-
-// Example copied from http://linuxgazette.net/issue49/pramode.html
-
-%module example2
-
-%{
-#include "zoo.h"
-%}
-
-%include "zoo.h"
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py
deleted file mode 100644
index 81b82c8..0000000
--- a/numpy/distutils/tests/swig_ext/tests/test_example.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from numpy.testing import TestCase, run_module_suite, assert_equal
-from swig_ext import example
-
-class TestExample(TestCase):
- def test_fact(self):
- assert_equal(example.fact(10), 3628800)
-
- def test_cvar(self):
- assert_equal(example.cvar.My_variable, 3.0)
- example.cvar.My_variable = 5
- assert_equal(example.cvar.My_variable, 5.0)
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy/distutils/tests/swig_ext/tests/test_example2.py
deleted file mode 100644
index 381b30d..0000000
--- a/numpy/distutils/tests/swig_ext/tests/test_example2.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from numpy.testing import TestCase, run_module_suite
-from swig_ext import example2
-
-class TestExample2(TestCase):
- def test_zoo(self):
- z = example2.Zoo()
- z.shut_up('Tiger')
- z.shut_up('Lion')
- z.display()
-
-
-if __name__ == "__main__":
- run_module_suite()
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index 9a72842..bdef471 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -1,10 +1,9 @@
from __future__ import division, absolute_import, print_function
import os
-from tempfile import mkstemp
from numpy.distutils.npy_pkg_config import read_config, parse_flags
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import TestCase, run_module_suite, temppath
simple = """\
[meta]
@@ -39,41 +38,30 @@
class TestLibraryInfo(TestCase):
def test_simple(self):
- fd, filename = mkstemp('foo.ini')
- try:
- pkg = os.path.splitext(filename)[0]
- try:
- os.write(fd, simple.encode('ascii'))
- finally:
- os.close(fd)
-
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple)
+ pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_d['cflags'])
- self.assertTrue(out.libs() == simple_d['libflags'])
- self.assertTrue(out.name == simple_d['name'])
- self.assertTrue(out.version == simple_d['version'])
- finally:
- os.remove(filename)
+
+ self.assertTrue(out.cflags() == simple_d['cflags'])
+ self.assertTrue(out.libs() == simple_d['libflags'])
+ self.assertTrue(out.name == simple_d['name'])
+ self.assertTrue(out.version == simple_d['version'])
def test_simple_variable(self):
- fd, filename = mkstemp('foo.ini')
- try:
- pkg = os.path.splitext(filename)[0]
- try:
- os.write(fd, simple_variable.encode('ascii'))
- finally:
- os.close(fd)
-
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple_variable)
+ pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_variable_d['cflags'])
- self.assertTrue(out.libs() == simple_variable_d['libflags'])
- self.assertTrue(out.name == simple_variable_d['name'])
- self.assertTrue(out.version == simple_variable_d['version'])
- out.vars['prefix'] = '/Users/david'
- self.assertTrue(out.cflags() == '-I/Users/david/include')
- finally:
- os.remove(filename)
+ self.assertTrue(out.cflags() == simple_variable_d['cflags'])
+ self.assertTrue(out.libs() == simple_variable_d['libflags'])
+ self.assertTrue(out.name == simple_variable_d['name'])
+ self.assertTrue(out.version == simple_variable_d['version'])
+ out.vars['prefix'] = '/Users/david'
+ self.assertTrue(out.cflags() == '-I/Users/david/include')
class TestParseFlags(TestCase):
def test_simple_cflags(self):
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 9dacd1c..4a32384 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -109,7 +109,7 @@
>>> def log(f):
... def new_logging_func(*args, **kwargs):
- ... print "Logging call with parameters:", args, kwargs
+ ... print("Logging call with parameters:", args, kwargs)
... return f(*args, **kwargs)
...
... return new_logging_func
@@ -185,7 +185,7 @@
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
- ... print "Key %d: %s" % (n, k)
+ ... print("Key %d: %s" % (n, k))
...
Key 0: a
Key 1: b
@@ -315,7 +315,7 @@
... color = 'blue'
...
... def paint(self):
- ... print "Painting the city %s!" % self.color
+ ... print("Painting the city %s!" % self.color)
...
>>> p = Paintbrush()
>>> p.color = 'red'
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 1709ad6..e30caf0 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -86,7 +86,7 @@
>>> np.sqrt(np.array([-1.]))
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
- ... print "saw stupid error!"
+ ... print("saw stupid error!")
>>> np.seterrcall(errorhandler)
<function err_handler at 0x...>
>>> j = np.seterr(all='call')
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index fe17c13..1135c13 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -27,7 +27,7 @@
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
- >>> y = x['foo']
+ >>> y = x['bar']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index a62fc2d..85327fe 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -123,13 +123,13 @@
class C(object):
def __new__(cls, *args):
- print 'Cls in __new__:', cls
- print 'Args in __new__:', args
+ print('Cls in __new__:', cls)
+ print('Args in __new__:', args)
return object.__new__(cls, *args)
def __init__(self, *args):
- print 'type(self) in __init__:', type(self)
- print 'Args in __init__:', args
+ print('type(self) in __init__:', type(self))
+ print('Args in __init__:', args)
meaning that we get:
@@ -159,13 +159,13 @@
class D(C):
def __new__(cls, *args):
- print 'D cls is:', cls
- print 'D args in __new__:', args
+ print('D cls is:', cls)
+ print('D args in __new__:', args)
return C.__new__(C, *args)
def __init__(self, *args):
# we never get here
- print 'In D __init__'
+ print('In D __init__')
meaning that:
@@ -242,18 +242,18 @@
class C(np.ndarray):
def __new__(cls, *args, **kwargs):
- print 'In __new__ with class %s' % cls
+ print('In __new__ with class %s' % cls)
return np.ndarray.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# in practice you probably will not need or want an __init__
# method for your subclass
- print 'In __init__ with class %s' % self.__class__
+ print('In __init__ with class %s' % self.__class__)
def __array_finalize__(self, obj):
- print 'In array_finalize:'
- print ' self type is %s' % type(self)
- print ' obj type is %s' % type(obj)
+ print('In array_finalize:')
+ print(' self type is %s' % type(self))
+ print(' obj type is %s' % type(obj))
Now:
@@ -441,16 +441,16 @@
return obj
def __array_finalize__(self, obj):
- print 'In __array_finalize__:'
- print ' self is %s' % repr(self)
- print ' obj is %s' % repr(obj)
+ print('In __array_finalize__:')
+ print(' self is %s' % repr(self))
+ print(' obj is %s' % repr(obj))
if obj is None: return
self.info = getattr(obj, 'info', None)
def __array_wrap__(self, out_arr, context=None):
- print 'In __array_wrap__:'
- print ' self is %s' % repr(self)
- print ' arr is %s' % repr(out_arr)
+ print('In __array_wrap__:')
+ print(' self is %s' % repr(self))
+ print(' arr is %s' % repr(out_arr))
# then just call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 17a5759..50566cc 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -19,16 +19,32 @@
def compile(source,
modulename='untitled',
extra_args='',
- verbose=1,
- source_fn=None
+ verbose=True,
+ source_fn=None,
+ extension='.f'
):
''' Build extension module from processing source with f2py.
- Read the source of this function for more information.
+
+ Parameters
+ ----------
+ source : str
+ Fortran source of module / subroutine to compile
+ modulename : str, optional
+ the name of compiled python module
+ extra_args: str, optional
+ additional parameters passed to f2py
+ verbose: bool, optional
+ print f2py output to screen
+ extension: {'.f', '.f90'}, optional
+ filename extension influences the fortran compiler behavior
+
+ .. versionadded:: 1.11.0
+
'''
from numpy.distutils.exec_command import exec_command
import tempfile
if source_fn is None:
- f = tempfile.NamedTemporaryFile(suffix='.f')
+ f = tempfile.NamedTemporaryFile(suffix=extension)
else:
f = open(source_fn, 'w')
@@ -36,14 +52,16 @@
f.write(source)
f.flush()
- args = ' -c -m %s %s %s' % (modulename, f.name, extra_args)
- c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' % \
- (sys.executable, args)
- s, o = exec_command(c)
+ args = ' -c -m {} {} {}'.format(modulename, f.name, extra_args)
+ c = '{} -c "import numpy.f2py as f2py2e;f2py2e.main()" {}'
+ c = c.format(sys.executable, args)
+ status, output = exec_command(c)
+ if verbose:
+ print(output)
finally:
f.close()
- return s
+ return status
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
index 8f6d256..cb8f261 100644
--- a/numpy/f2py/__main__.py
+++ b/numpy/f2py/__main__.py
@@ -1,4 +1,6 @@
# See http://cens.ioc.ee/projects/f2py2e/
+from __future__ import division, print_function
+
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index b64aaa5..d27b959 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -430,9 +430,6 @@
def isintent_c(var):
return 'c' in var.get('intent', [])
-# def isintent_f(var):
-# return not isintent_c(var)
-
def isintent_cache(var):
return 'cache' in var.get('intent', [])
@@ -673,7 +670,6 @@
proto_args = ','.join(arg_types + arg_types2)
if not proto_args:
proto_args = 'void'
- # print proto_args
return proto_args
diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py
index ec3a248..85eae80 100644
--- a/numpy/f2py/f90mod_rules.py
+++ b/numpy/f2py/f90mod_rules.py
@@ -49,7 +49,7 @@
fgetdims1 = """\
external f2pysetdata
logical ns
- integer r,i,j
+ integer r,i
integer(%d) s(*)
ns = .FALSE.
if (allocated(d)) then
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 8d06d96..0c9e915 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -19,7 +19,7 @@
from numpy.compat import asbytes, asstr
import numpy.f2py
-from numpy.testing import SkipTest
+from numpy.testing import SkipTest, temppath
try:
from hashlib import md5
@@ -159,16 +159,11 @@
"""
if suffix is None:
suffix = '.f'
-
- fd, tmp_fn = tempfile.mkstemp(suffix=suffix)
- os.write(fd, asbytes(source_code))
- os.close(fd)
-
- try:
- return build_module([tmp_fn], options=options, skip=skip, only=only,
+ with temppath(suffix=suffix) as path:
+ with open(path, 'w') as f:
+ f.write(source_code)
+ return build_module([path], options=options, skip=skip, only=only,
module_name=module_name)
- finally:
- os.unlink(tmp_fn)
#
# Check if compilers are available at all...
@@ -209,22 +204,19 @@
"""
code = code % dict(syspath=repr(sys.path))
- fd, script = tempfile.mkstemp(suffix='.py')
- os.write(fd, asbytes(code))
- os.close(fd)
+ with temppath(suffix='.py') as script:
+ with open(script, 'w') as f:
+ f.write(code)
- try:
cmd = [sys.executable, script, 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
- m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out)
- if m:
- _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
- bool(int(m.group(3))))
- finally:
- os.unlink(script)
+ m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out)
+ if m:
+ _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
+ bool(int(m.group(3))))
# Finished
return _compiler_status
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index 96809a9..a1f9e90 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -6,6 +6,6 @@
from .fftpack import *
from .helper import *
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py
index 4ad4f68..c3bb732 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/fftpack.py
@@ -203,10 +203,16 @@
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
- i.e., ``a[0]`` should contain the zero frequency term,
- ``a[1:n/2+1]`` should contain the positive-frequency terms, and
- ``a[n/2+1:]`` should contain the negative-frequency terms, in order of
- decreasingly negative frequency. See `numpy.fft` for details.
+ i.e.,
+
+ * ``a[0]`` should contain the zero frequency term,
+ * ``a[1:n//2]`` should contain the positive-frequency terms,
+ * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
+ increasing order starting from the most negative frequency.
+
+ For an even number of input points, ``A[n//2]`` represents the sum of
+ the values at the positive and negative Nyquist frequencies, as the two
+ are aliased together. See `numpy.fft` for details.
Parameters
----------
@@ -263,9 +269,9 @@
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
- [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
+ ...
>>> plt.legend(('real', 'imaginary'))
- <matplotlib.legend.Legend object at 0x...>
+ ...
>>> plt.show()
"""
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 0606dfb..1d65db5 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -41,6 +41,6 @@
__all__ += financial.__all__
__all__ += nanfunctions.__all__
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index 80b369b..fb52ada 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -80,7 +80,7 @@
>>> for subarr in a_itor:
... if not subarr.all():
- ... print subarr, subarr.shape
+ ... print(subarr, subarr.shape)
...
[[[[0 1]]]] (1, 1, 1, 2)
@@ -158,7 +158,7 @@
>>> for subarr in a_itor.flat:
... if not subarr:
- ... print subarr, type(subarr)
+ ... print(subarr, type(subarr))
...
0 <type 'numpy.int32'>
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index a7e4e60..c42424d 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -247,7 +247,7 @@
If you only had $150/month to pay towards the loan, how long would it take
to pay-off a loan of $8,000 at 7% annual interest?
- >>> print round(np.nper(0.07/12, -150, 8000), 5)
+ >>> print(round(np.nper(0.07/12, -150, 8000), 5))
64.07335
So, over 64 months would be required to pay off the loan.
@@ -347,7 +347,7 @@
>>> for payment in per:
... index = payment - 1
... principal = principal + ppmt[index]
- ... print fmt.format(payment, ppmt[index], ipmt[index], principal)
+ ... print(fmt.format(payment, ppmt[index], ipmt[index], principal))
1 -200.58 -17.17 2299.42
2 -201.96 -15.79 2097.46
3 -203.35 -14.40 1894.11
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 9261dba..9bc128f 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -268,14 +268,14 @@
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
- .. math:: h = 2 \\frac{IQR}{n^{-1/3}}
+ .. math:: h = 2 \\frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
- .. math:: h = \\frac{3.5\\sigma}{n^{-1/3}}
+ .. math:: h = \\frac{3.5\\sigma}{n^{1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
@@ -833,7 +833,7 @@
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
- ... print 'ValueError'
+ ... print('ValueError')
...
ValueError
@@ -2097,7 +2097,8 @@
return _res
-def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
+def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
+ aweights=None):
"""
Estimate a covariance matrix, given data and weights.
@@ -2118,14 +2119,14 @@
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
- rowvar : int, optional
- If `rowvar` is non-zero (default), then each row represents a
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
- bias : int, optional
- Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
- number of observations given (unbiased estimate). If `bias` is 1, then
+ bias : bool, optional
+ Default normalization (False) is by ``(N - 1)``, where ``N`` is the
+ number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
@@ -2199,13 +2200,13 @@
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
- >>> print np.cov(X)
+ >>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
- >>> print np.cov(x, y)
+ >>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
- >>> print np.cov(x)
+ >>> print(np.cov(x))
11.71
"""
@@ -3645,11 +3646,13 @@
y : array_like
Input array to integrate.
x : array_like, optional
- If `x` is None, then spacing between all `y` elements is `dx`.
+ The sample points corresponding to the `y` values. If `x` is None,
+ the sample points are assumed to be evenly spaced `dx` apart. The
+ default is None.
dx : scalar, optional
- If `x` is None, spacing given by `dx` is assumed. Default is 1.
+ The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
- Specify the axis.
+ The axis along which to integrate.
Returns
-------
@@ -3966,6 +3969,7 @@
arr = asarray(arr)
ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
@@ -4003,7 +4007,7 @@
stop = xr[0] + 1
newshape[axis] -= numtodel
- new = empty(newshape, arr.dtype, arr.flags.fnc)
+ new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
@@ -4054,7 +4058,7 @@
if (obj < 0):
obj += N
newshape[axis] -= 1
- new = empty(newshape, arr.dtype, arr.flags.fnc)
+ new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
@@ -4197,6 +4201,7 @@
arr = asarray(arr)
ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
@@ -4265,7 +4270,7 @@
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
- new = empty(newshape, arr.dtype, arr.flags.fnc)
+ new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
@@ -4298,7 +4303,7 @@
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
- new = empty(newshape, arr.dtype, arr.flags.fnc)
+ new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 8bcc3fb..a0875a2 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -491,7 +491,7 @@
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
- ... print index, x
+ ... print(index, x)
(0, 0) 1
(0, 1) 2
(1, 0) 3
@@ -542,7 +542,7 @@
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
- ... print index
+ ... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 2f67743..189e591 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -61,7 +61,7 @@
See Also
--------
- polyval : Evaluate a polynomial at a point.
+ polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
@@ -182,7 +182,7 @@
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
- polyval : Evaluate a polynomial at a point.
+ polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
@@ -466,7 +466,7 @@
See Also
--------
- polyval : Computes polynomial values.
+ polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
@@ -631,7 +631,7 @@
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
- A number, a 1D array of numbers, or an instance of poly1d, "at"
+ A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
@@ -715,12 +715,12 @@
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
- >>> print p1
+ >>> print(p1)
1 x + 2
- >>> print p2
+ >>> print(p2)
2
9 x + 5 x + 4
- >>> print np.polyadd(p1, p2)
+ >>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
@@ -826,13 +826,13 @@
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
- >>> print p1
+ >>> print(p1)
2
1 x + 2 x + 3
- >>> print p2
+ >>> print(p2)
2
9 x + 5 x + 1
- >>> print np.polymul(p1, p2)
+ >>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
@@ -966,7 +966,7 @@
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
- >>> print np.poly1d(p)
+ >>> print(np.poly1d(p))
2
1 x + 2 x + 3
@@ -1022,7 +1022,7 @@
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
- >>> print p
+ >>> print(p)
2
1 z + 2 z + 3
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index f4b43a5..4c23ab3 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -121,9 +121,6 @@
"""
if not args:
raise ValueError('must provide at least one argument')
- if len(args) == 1:
- # a single argument does not work with np.broadcast
- return np.asarray(args[0]).shape
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 1bf65fa..a091ef5 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -112,7 +112,7 @@
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
- ... print repr(f.getvalue())
+ ... print(repr(f.getvalue()))
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 88c9326..a5ac78e 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1974,10 +1974,42 @@
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
- assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1])
- assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0])
+ # Needs range of sizes to test different code paths.
+ # size ==1 is special cased, 1 < size < 5 is linear search, and
+ # size >= 5 goes through local search and possibly binary search.
+ for size in range(1, 10):
+ xp = np.arange(size, dtype=np.double)
+ yp = np.ones(size, dtype=np.double)
+ incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
+ decpts = incpts[::-1]
+
+ incres = interp(incpts, xp, yp)
+ decres = interp(decpts, xp, yp)
+ inctgt = np.array([1, 1, 1, 1], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0)
+ decres = interp(decpts, xp, yp, left=0)
+ inctgt = np.array([0, 1, 1, 1], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, right=2)
+ decres = interp(decpts, xp, yp, right=2)
+ inctgt = np.array([1, 1, 1, 2], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0, right=2)
+ decres = interp(decpts, xp, yp, left=0, right=2)
+ inctgt = np.array([0, 1, 1, 2], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index af904e9..32e0c32 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -4,7 +4,7 @@
import gzip
import os
import threading
-from tempfile import mkstemp, NamedTemporaryFile
+from tempfile import NamedTemporaryFile
import time
import warnings
import gc
@@ -19,7 +19,7 @@
from numpy.testing import (
TestCase, run_module_suite, assert_warns, assert_,
assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal,
+ assert_array_equal,temppath
)
from numpy.testing.utils import tempdir
@@ -194,8 +194,7 @@
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
- with tempdir(prefix="numpy_test_big_arrays_") as tmpdir:
- tmp = os.path.join(tmpdir, "file.npz")
+ with temppath(prefix="numpy_test_big_arrays_") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
@@ -234,16 +233,12 @@
# and savez functions in multithreaded environment
def writer(error_list):
- fd, tmp = mkstemp(suffix='.npz')
- os.close(fd)
- try:
+ with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
- finally:
- os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
@@ -259,26 +254,17 @@
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
-
- fd, tmp = mkstemp(suffix='.npz')
- os.close(fd)
- try:
- fp = open(tmp, 'wb')
- np.savez(fp, data='LOVELY LOAD')
- fp.close()
-
- fp = open(tmp, 'rb', 10000)
- fp.seek(0)
- assert_(not fp.closed)
- np.load(fp)['data']
- # fp must not get closed by .load
- assert_(not fp.closed)
- fp.seek(0)
- assert_(not fp.closed)
-
- finally:
- fp.close()
- os.remove(tmp)
+ with temppath(suffix='.npz') as tmp:
+ with open(tmp, 'wb') as fp:
+ np.savez(fp, data='LOVELY LOAD')
+ with open(tmp, 'rb', 10000) as fp:
+ fp.seek(0)
+ assert_(not fp.closed)
+ np.load(fp)['data']
+ # fp must not get closed by .load
+ assert_(not fp.closed)
+ fp.seek(0)
+ assert_(not fp.closed)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
@@ -286,13 +272,8 @@
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
- fd, tmp = mkstemp(suffix='.npz')
- os.close(fd)
-
- try:
- fp = open(tmp, 'wb')
- np.savez(fp, data='LOVELY LOAD')
- fp.close()
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
@@ -308,16 +289,14 @@
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
- finally:
- os.remove(tmp)
def test_closing_zipfile_after_load(self):
- # Check that zipfile owns file and can close it.
- # This needs to pass a file name to load for the
- # test.
- with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir:
- fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
- os.close(fd)
+ # Check that zipfile owns file and can close it. This needs to
+ # pass a file name to load for the test. On windows failure will
+ # cause a second error will be raised when the attempt to remove
+ # the open file is made.
+ prefix = 'numpy_test_closing_zipfile_after_load_'
+ with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
@@ -425,15 +404,11 @@
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
- f, name = mkstemp()
- os.close(f)
- try:
+ with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
- finally:
- os.unlink(name)
def test_complex_arrays(self):
ncols = 2
@@ -748,15 +723,11 @@
assert_equal(res, tgt)
def test_universal_newline(self):
- f, name = mkstemp()
- os.write(f, b'1 21\r3 42\r')
- os.close(f)
-
- try:
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write('1 21\r3 42\r')
data = np.loadtxt(name)
- assert_array_equal(data, [[1, 21], [3, 42]])
- finally:
- os.unlink(name)
+ assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
@@ -1769,8 +1740,9 @@
assert_equal(test, control)
def test_gft_using_filename(self):
- # Test that we can load data from a filename as well as a file object
- wanted = np.arange(6).reshape((2, 3))
+ # Test that we can load data from a filename as well as a file
+ # object
+ tgt = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
@@ -1779,15 +1751,11 @@
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
- f, name = mkstemp()
- # We can't use NamedTemporaryFile on windows, because we cannot
- # reopen the file.
- try:
- os.write(f, asbytes(data))
- assert_array_equal(np.genfromtxt(name), wanted)
- finally:
- os.close(f)
- os.unlink(name)
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ res = np.genfromtxt(name)
+ assert_array_equal(res, tgt)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
@@ -1815,9 +1783,9 @@
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
- assert test.dtype['f0'] == np.float
- assert test.dtype['f1'] == np.int64
- assert test.dtype['f2'] == np.integer
+ assert_(test.dtype['f0'] == np.float)
+ assert_(test.dtype['f1'] == np.int64)
+ assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
@@ -1847,16 +1815,15 @@
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
- s.seek(0)
- f, name = mkstemp(suffix='.gz')
- try:
- os.write(f, s.read())
- s.close()
- assert_array_equal(np.loadtxt(name), [1, 2, 3])
- finally:
- os.close(f)
- os.unlink(name)
+ s.seek(0)
+ with temppath(suffix='.gz') as name:
+ with open(name, 'wb') as f:
+ f.write(s.read())
+ res = np.loadtxt(name)
+ s.close()
+
+ assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index f418504..7a7b37b 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -395,12 +395,12 @@
def test_dtype_error(self):
for f in self.nanfuncs:
- for dtype in [np.bool_, np.int_, np.object]:
- assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int)
+ for dtype in [np.bool_, np.int_, np.object_]:
+ assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
- for dtype in [np.bool_, np.int_, np.object]:
+ for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 186e896..0de084e 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -1,5 +1,6 @@
-import numpy as np
+from __future__ import division, absolute_import, print_function
+import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 464ffd9..b2f350b 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -664,7 +664,7 @@
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
- >>> print H[::-1] # This shows the bin content in the order as plotted
+ >>> print(H[::-1]) # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 2fe4e7d..1313adf 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -501,7 +501,7 @@
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
- ... print typechar, ' : ', np.typename(typechar)
+ ... print(typechar, ' : ', np.typename(typechar))
...
S1 : character
? : bool
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index bb5bec6..3103da5 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -1,5 +1,6 @@
"""
Standard container-class for easy multiple-inheritance.
+
Try to inherit from the ndarray instead of using this class as this is not
complete.
@@ -16,7 +17,19 @@
class container(object):
+ """
+ container(data, dtype=None, copy=True)
+ Standard container-class for easy multiple-inheritance.
+
+ Methods
+ -------
+ copy
+ tostring
+ byteswap
+ astype
+
+ """
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
@@ -219,15 +232,19 @@
return self._rc(greater_equal(self.array, other))
def copy(self):
+ ""
return self._rc(self.array.copy())
def tostring(self):
+ ""
return self.array.tostring()
def byteswap(self):
+ ""
return self._rc(self.array.byteswap())
def astype(self, typecode):
+ ""
return self._rc(self.array.astype(typecode))
def _rc(self, a):
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index bc2a1ff..69445f5 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -50,6 +50,6 @@
from .linalg import *
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().test
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index 4a517d5..f3d29aa 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -13,10 +13,6 @@
Scanner.__init__(self, self.lexicon, info, name)
def begin(self, state_name):
-# if self.state_name == '':
-# print '<default>'
-# else:
-# print self.state_name
Scanner.begin(self, state_name)
def sep_seq(sequence, sep):
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 2e96972..9dc879d 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1853,7 +1853,7 @@
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
- >>> print m, c
+ >>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py
index 13d2441..9b6fe34 100644
--- a/numpy/linalg/tests/test_deprecations.py
+++ b/numpy/linalg/tests/test_deprecations.py
@@ -1,6 +1,8 @@
"""Test deprecation and future warnings.
"""
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import assert_warns, run_module_suite
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index afa098f..fc139be 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -61,7 +61,7 @@
class LinalgCase(object):
def __init__(self, name, a, b, exception_cls=None):
- assert isinstance(name, str)
+ assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
@@ -267,7 +267,7 @@
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
- assert np.all(xi == x)
+ assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index 05b641d..af3468b 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -51,6 +51,6 @@
__all__ += core.__all__
__all__ += extras.__all__
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 0a83284..25b926e 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -2147,12 +2147,12 @@
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
- >>> print eat
+ >>> print(eat)
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
- >>> print eat
+ >>> print(eat)
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
@@ -2548,7 +2548,7 @@
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
- ... print item
+ ... print(item)
...
0
1
@@ -3064,11 +3064,11 @@
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
- >>> print x.astype(int32)
+ >>> print(x.astype(int32))
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
@@ -3546,6 +3546,8 @@
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
+ **However**, if there are no masked values to fill, self will be
+ returned instead as an ndarray.
Parameters
----------
@@ -3557,7 +3559,9 @@
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
- (be it the function argument or the attribute of ``self``.
+ (be it the function argument or the attribute of ``self``), or
+ ``self`` itself as an ndarray if there are no invalid entries to
+ be replaced.
Notes
-----
@@ -3676,7 +3680,7 @@
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
@@ -4281,11 +4285,11 @@
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
- >>> print x.ravel()
+ >>> print(x.ravel())
[1 -- 3 -- 5 -- 7 -- 9]
"""
@@ -4337,11 +4341,11 @@
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
- >>> print x
+ >>> print(x)
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
- >>> print x
+ >>> print(x)
[[--]
[2]
[3]
@@ -4402,18 +4406,18 @@
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
- >>> print x
+ >>> print(x)
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
- >>> print x
+ >>> print(x)
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
@@ -4765,17 +4769,17 @@
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
- >>> print x.sum()
+ >>> print(x.sum())
25
- >>> print x.sum(axis=1)
+ >>> print(x.sum(axis=1))
[4 5 16]
- >>> print x.sum(axis=0)
+ >>> print(x.sum(axis=0))
[8 5 12]
- >>> print type(x.sum(axis=0, dtype=np.int64)[0])
+ >>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<type 'numpy.int64'>
"""
@@ -4843,7 +4847,7 @@
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
- >>> print marr.cumsum()
+ >>> print(marr.cumsum())
[0 1 3 -- -- -- 9 16 24 33]
"""
@@ -5243,12 +5247,12 @@
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
- >>> print x
+ >>> print(x)
[[-- --]
[2 3]]
- >>> print x.argmin(axis=0, fill_value=-1)
+ >>> print(x.argmin(axis=0, fill_value=-1))
[0 0]
- >>> print x.argmin(axis=0, fill_value=9)
+ >>> print(x.argmin(axis=0, fill_value=9))
[1 1]
"""
@@ -5344,19 +5348,19 @@
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
- >>> print a
+ >>> print(a)
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
- >>> print a
+ >>> print(a)
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
- >>> print a
+ >>> print(a)
[1 -- -- 3 5]
"""
@@ -5472,7 +5476,7 @@
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
- >>> print x
+ >>> print(x)
[[0 --]
[2 3]
[4 --]]
@@ -5482,7 +5486,7 @@
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
- >>> print x.mini(axis=1)
+ >>> print(x.mini(axis=1))
[0 2 4]
"""
@@ -5761,11 +5765,11 @@
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print x
+ >>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
- >>> print x.toflex()
+ >>> print(x.toflex())
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
@@ -6934,14 +6938,14 @@
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
- >>> print x
+ >>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
- >>> print np.ma.where(x > 5, x, -3.1416)
+ >>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index e1d228e..9855b4e 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -439,7 +439,7 @@
>>> a = ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = ma.masked
>>> a[:,1,:] = ma.masked
- >>> print a
+ >>> print(a)
[[[0 -- 2 3]
[-- -- -- --]
[8 9 10 11]]
@@ -447,14 +447,14 @@
[[12 -- 14 15]
[-- -- -- --]
[20 21 22 23]]]
- >>> print ma.apply_over_axes(ma.sum, a, [0,2])
+ >>> print(ma.apply_over_axes(ma.sum, a, [0,2]))
[[[46]
[--]
[124]]]
Tuple axis arguments to ufuncs are equivalent:
- >>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1))
+ >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1)))
[[[46]
[--]
[124]]]
@@ -502,13 +502,13 @@
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
- >>> print x
+ >>> print(x)
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
- >>> print avg
+ >>> print(avg)
[2.66666666667 3.66666666667]
"""
@@ -1476,7 +1476,7 @@
array([3, 8])
>>> a[:] = np.ma.masked
- >>> print flatnotmasked_edges(ma)
+ >>> print(flatnotmasked_edges(ma))
None
"""
@@ -1578,7 +1578,7 @@
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
- >>> print np.ma.flatnotmasked_edges(a)
+ >>> print(np.ma.flatnotmasked_edges(a))
None
"""
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 8d7d9a4..9e5ad51 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -201,7 +201,7 @@
assert_(not np.may_share_memory(x.mask, y.mask))
def test_creation_with_list_of_maskedarrays(self):
- # Tests creaating a masked array from alist of masked arrays.
+ # Tests creating a masked array from a list of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
@@ -237,11 +237,6 @@
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
- # don't know why these should raise an exception...
- #self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
- #self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
- #self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
- #self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
@@ -360,10 +355,8 @@
x1 = np.arange(5)
y1 = array(x1, mask=m)
- #self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
- #self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
@@ -373,12 +366,10 @@
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
- #self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
- #self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
@@ -756,47 +747,47 @@
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
- dtype = [('a', '<i8', (3,))])
- assert str(t_ma[0]) == "([1, --, 3],)"
- assert repr(t_ma[0]) == "([1, --, 3],)"
+ dtype = [('a', '<i4', (3,))])
+ assert_(str(t_ma[0]) == "([1, --, 3],)")
+ assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
- dtype = [('a', '<i8', (2,2))])
- assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
- assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
+ dtype = [('a', '<i4', (2,2))])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
- dtype = [('a', '<i8'), ('b', '<i8')])
- assert str(t_0d[0]) == "(--, 2)"
- assert repr(t_0d[0]) == "(--, 2)"
+ dtype = [('a', '<i4'), ('b', '<i4')])
+ assert_(str(t_0d[0]) == "(--, 2)")
+ assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
- dtype = [('a', '<i8', (2,2)), ('b', float)])
- assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
- assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
+ dtype = [('a', '<i4', (2,2)), ('b', float)])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
- dtype = [('a', '<i8'), ('b', 'i4,i4')])
- assert str(t_ne[0]) == "(--, (--, 1))"
- assert repr(t_ne[0]) == "(--, (--, 1))"
+ dtype = [('a', '<i4'), ('b', 'i4,i4')])
+ assert_(str(t_ne[0]) == "(--, (--, 1))")
+ assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
- assert mx[0] is mx1
- assert mx[1] is not mx2
- assert np.all(mx[1].data == mx2.data)
- assert np.all(mx[1].mask)
+ assert_(mx[0] is mx1)
+ assert_(mx[1] is not mx2)
+ assert_(np.all(mx[1].data == mx2.data))
+ assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
- assert mx2[0] == 0.
+ assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic(TestCase):
@@ -1364,7 +1355,6 @@
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
- #assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
@@ -1372,7 +1362,6 @@
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
- #assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
@@ -1416,7 +1405,6 @@
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
- #assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
@@ -1839,9 +1827,7 @@
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
- # 'nonzero', 'around',
'floor', 'ceil',
- # 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
@@ -2064,15 +2050,12 @@
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
- #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
- #assert_equal(xm._data,
- # [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
@@ -2501,7 +2484,7 @@
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
- # Test all close w/ masked
+ # Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
@@ -2704,7 +2687,6 @@
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
- #self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
@@ -3220,6 +3202,7 @@
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
+ assert_equal(np.trace(mX), mX.trace())
def test_dot(self):
# Tests dot on MaskedArrays.
@@ -3879,10 +3862,6 @@
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
- # Using True as input. Won't work, but keep it for the kicks
- # test = mask_or(mask, True)
- # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
- # assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
@@ -4258,7 +4237,7 @@
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
- assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
+ assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
###############################################################################
if __name__ == "__main__":
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 84b68ba..574c652 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -15,7 +15,7 @@
from numpy import recarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma import masked, nomask
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import TestCase, run_module_suite, temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
@@ -476,7 +476,7 @@
def test_fromtextfile(self):
# Tests reading from a text file.
- fcontent = asbytes(
+ fcontent = (
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
@@ -484,14 +484,10 @@
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
- import os
- import tempfile
- (tmp_fd, tmp_fl) = tempfile.mkstemp()
- os.write(tmp_fd, fcontent)
- os.close(tmp_fd)
- mrectxt = fromtextfile(tmp_fl, delimitor=',', varnames='ABCDEFG')
- os.remove(tmp_fl)
-
+ with temppath() as path:
+ with open(path, 'w') as f:
+ f.write(fcontent)
+ mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
self.assertTrue(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index a32f358..6ce29cc 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -522,11 +522,6 @@
self.assertTrue(str(masked) == '--')
self.assertTrue(xx[1] is masked)
self.assertEqual(filled(xx[1], 0), 0)
- # don't know why these should raise an exception...
- #self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
- #self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
- #self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
- #self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_testAverage1(self):
# Test of average.
@@ -681,9 +676,7 @@
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
- # 'nonzero', 'around',
'floor', 'ceil',
- # 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
@@ -754,7 +747,6 @@
self.d = (x, X, XX, m, mx, mX, mXX)
- #------------------------------------------------------
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
@@ -825,55 +817,5 @@
return m1 is nomask
return (m1 == m2).all()
-#def timingTest():
-# for f in [testf, testinplace]:
-# for n in [1000,10000,50000]:
-# t = testta(n, f)
-# t1 = testtb(n, f)
-# t2 = testtc(n, f)
-# print f.test_name
-# print """\
-#n = %7d
-#numpy time (ms) %6.1f
-#MA maskless ratio %6.1f
-#MA masked ratio %6.1f
-#""" % (n, t*1000.0, t1/t, t2/t)
-
-#def testta(n, f):
-# x=np.arange(n) + 1.0
-# tn0 = time.time()
-# z = f(x)
-# return time.time() - tn0
-
-#def testtb(n, f):
-# x=arange(n) + 1.0
-# tn0 = time.time()
-# z = f(x)
-# return time.time() - tn0
-
-#def testtc(n, f):
-# x=arange(n) + 1.0
-# x[0] = masked
-# tn0 = time.time()
-# z = f(x)
-# return time.time() - tn0
-
-#def testf(x):
-# for i in range(25):
-# y = x **2 + 2.0 * x - 1.0
-# w = x **2 + 1.0
-# z = (y / w) ** 2
-# return z
-#testf.test_name = 'Simple arithmetic'
-
-#def testinplace(x):
-# for i in range(25):
-# y = x**2
-# y += 2.0*x
-# y -= 1.0
-# y /= x
-# return y
-#testinplace.test_name = 'Inplace operations'
-
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index d206961..b2b7683 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -7,6 +7,6 @@
__all__ = defmatrix.__all__
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index ffd4578..1a29fb6 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -233,7 +233,7 @@
Examples
--------
>>> a = np.matrix('1 2; 3 4')
- >>> print a
+ >>> print(a)
[[1 2]
[3 4]]
@@ -277,9 +277,9 @@
elif ndim == 1:
shape = (1, shape[0])
- order = False
+ order = 'C'
if (ndim == 2) and arr.flags.fortran:
- order = True
+ order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
@@ -519,10 +519,12 @@
Parameters
----------
- order : {'C', 'F', 'A'}, optional
- Whether to flatten in C (row-major), Fortran (column-major) order,
- or preserve the C/Fortran ordering from `m`.
- The default is 'C'.
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order. 'F' means to
+ flatten in column-major (Fortran-style) order. 'A' means to
+ flatten in column-major order if `m` is Fortran *contiguous* in
+ memory, row-major order otherwise. 'K' means to flatten `m` in
+ the order the elements occur in memory. The default is 'C'.
Returns
-------
@@ -781,7 +783,11 @@
def argmax(self, axis=None, out=None):
"""
- Indices of the maximum values along an axis.
+ Indexes of the maximum values along an axis.
+
+ Return the indexes of the first occurrences of the maximum values
+ along the specified axis. If axis is None, the index is for the
+ flattened matrix.
Parameters
----------
@@ -851,7 +857,11 @@
def argmin(self, axis=None, out=None):
"""
- Return the indices of the minimum values along an axis.
+ Indexes of the minimum values along an axis.
+
+ Return the indexes of the first occurrences of the minimum values
+ along the specified axis. If axis is None, the index is for the
+ flattened matrix.
Parameters
----------
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 1200d1c..82c350e 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -22,6 +22,6 @@
from .hermite_e import HermiteE
from .laguerre import Laguerre
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 388267c..6c7d314 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -117,6 +117,6 @@
"""
return RandomState(seed=0)
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
+from numpy.testing.nosetester import _numpy_tester
+test = _numpy_tester().test
+bench = _numpy_tester().bench
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index 3900417..7c44088 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -188,7 +188,7 @@
if ((a <= 1.0) && (b <= 1.0))
{
double U, V, X, Y;
- /* Use Jonk's algorithm */
+ /* Use Johnk's algorithm */
while (1)
{
diff --git a/numpy/random/mtrand/mt_compat.h b/numpy/random/mtrand/mt_compat.h
new file mode 100644
index 0000000..ab56a55
--- /dev/null
+++ b/numpy/random/mtrand/mt_compat.h
@@ -0,0 +1,68 @@
+/*
+ * This is a convenience header file providing compatibility utilities
+ * for supporting Python 2 and Python 3 in the same code base.
+ *
+ * It can be removed when Python 2.6 is dropped as PyCapsule is available
+ * in both Python 3.1+ and Python 2.7.
+ */
+
+#ifndef _MT_COMPAT_H_
+#define _MT_COMPAT_H_
+
+#include <Python.h>
+#include <numpy/npy_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * PyCObject functions adapted to PyCapsules.
+ *
+ * The main job here is to get rid of the improved error handling
+ * of PyCapsules. It's a shame...
+ */
+#if PY_VERSION_HEX >= 0x03000000
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static NPY_INLINE void *
+NpyCapsule_AsVoidPtr(PyObject *obj)
+{
+ void *ret = PyCapsule_GetPointer(obj, NULL);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+#else
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
+{
+ return PyCObject_FromVoidPtr(ptr, dtor);
+}
+
+static NPY_INLINE void *
+NpyCapsule_AsVoidPtr(PyObject *ptr)
+{
+ return PyCObject_AsVoidPtr(ptr);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COMPAT_H_ */
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 080591e..ff8171d 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -67,6 +67,17 @@
rk_error rk_altfill(void *buffer, size_t size, int strong,
rk_state *state) nogil
double rk_gauss(rk_state *state) nogil
+ void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
+ npy_uint64 *out, rk_state *state) nogil
+ void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
+ npy_uint32 *out, rk_state *state) nogil
+ void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
+ npy_uint16 *out, rk_state *state) nogil
+ void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt,
+ npy_uint8 *out, rk_state *state) nogil
+ void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt,
+ npy_bool *out, rk_state *state) nogil
+
cdef extern from "distributions.h":
# do not need the GIL, but they do need a lock on the state !! */
@@ -131,6 +142,7 @@
import numpy as np
import operator
import warnings
+
try:
from threading import Lock
except ImportError:
@@ -232,7 +244,6 @@
cdef double *oa_data
cdef double *ob_data
cdef ndarray array "arrayObject"
- cdef npy_intp length
cdef npy_intp i
cdef broadcast multi
@@ -288,7 +299,6 @@
cdef double *ob_data
cdef double *oc_data
cdef ndarray array "arrayObject"
- cdef npy_intp length
cdef npy_intp i
cdef broadcast multi
@@ -358,7 +368,6 @@
ndarray on, ndarray op, object lock):
cdef long *array_data
cdef ndarray array "arrayObject"
- cdef npy_intp length
cdef npy_intp i
cdef double *op_data
cdef long *on_data
@@ -412,7 +421,6 @@
ndarray on, ndarray op, object lock):
cdef long *array_data
cdef ndarray array "arrayObject"
- cdef npy_intp length
cdef npy_intp i
cdef double *op_data
cdef double *on_data
@@ -469,7 +477,6 @@
cdef long *om_data
cdef long *oN_data
cdef ndarray array "arrayObject"
- cdef npy_intp length
cdef npy_intp i
cdef broadcast multi
@@ -574,6 +581,300 @@
shape = tuple(size) + (d,)
return shape
+
+# Set up dictionary of integer types and relevant functions.
+#
+# The dictionary is keyed by dtype(...).name and the values
+# are a tuple (low, high, function), where low and high are
+# the bounds of the largest half open interval `[low, high)`
+# and the function is the relevant function to call for
+# that precision.
+#
+# The functions are all the same except for changed types in
+# a few places. It would be easy to template them.
+
+def _rand_bool(low, high, size, rngstate):
+ """
+ _rand_bool(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_bool off, rng, buf
+ cdef npy_bool *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_bool>(high - low)
+ off = <npy_bool>(low)
+ if size is None:
+ rk_random_bool(off, rng, 1, &buf, state)
+ return buf
+ else:
+ array = <ndarray>np.empty(size, np.bool_)
+ cnt = PyArray_SIZE(array)
+ out = <npy_bool *>PyArray_DATA(array)
+ with nogil:
+ rk_random_bool(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_int8(low, high, size, rngstate):
+ """
+ _rand_int8(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint8 off, rng, buf
+ cdef npy_uint8 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint8>(high - low)
+ off = <npy_uint8>(<npy_int8>low)
+ if size is None:
+ rk_random_uint8(off, rng, 1, &buf, state)
+ return <npy_int8>buf
+ else:
+ array = <ndarray>np.empty(size, np.int8)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint8 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint8(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_int16(low, high, size, rngstate):
+ """
+ _rand_int16(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint16 off, rng, buf
+ cdef npy_uint16 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint16>(high - low)
+ off = <npy_uint16>(<npy_int16>low)
+ if size is None:
+ rk_random_uint16(off, rng, 1, &buf, state)
+ return <npy_int16>buf
+ else:
+ array = <ndarray>np.empty(size, np.int16)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint16 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint16(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_int32(low, high, size, rngstate):
+ """
+ _rand_int32(self, low, high, size, rngstate)
+
+ Return random np.int32 integers between `low` and `high`, inclusive.
+
+ Return random integers from the "discrete uniform" distribution in the
+ closed interval [`low`, `high`]. On entry the arguments are presumed
+ to have been validated for size and order for the np.int32 type.
+
+ Parameters
+ ----------
+ low : int
+ Lowest (signed) integer to be drawn from the distribution.
+ high : int
+ Highest (signed) integer to be drawn from the distribution.
+ size : int or tuple of ints
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ rngstate : encapsulated pointer to rk_state
+ The specific type depends on the python version. In Python 2 it is
+ a PyCObject, in Python 3 a PyCapsule object.
+
+ Returns
+ -------
+ out : python scalar or ndarray of np.int32
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ """
+ cdef npy_uint32 off, rng, buf
+ cdef npy_uint32 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint32>(high - low)
+ off = <npy_uint32>(<npy_int32>low)
+ if size is None:
+ rk_random_uint32(off, rng, 1, &buf, state)
+ return <npy_int32>buf
+ else:
+ array = <ndarray>np.empty(size, np.int32)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint32 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint32(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_int64(low, high, size, rngstate):
+ """
+ _rand_int64(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint64 off, rng, buf
+ cdef npy_uint64 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint64>(high - low)
+ off = <npy_uint64>(<npy_int64>low)
+ if size is None:
+ rk_random_uint64(off, rng, 1, &buf, state)
+ return <npy_int64>buf
+ else:
+ array = <ndarray>np.empty(size, np.int64)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint64 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint64(off, rng, cnt, out, state)
+ return array
+
+def _rand_uint8(low, high, size, rngstate):
+ """
+ _rand_uint8(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint8 off, rng, buf
+ cdef npy_uint8 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint8>(high - low)
+ off = <npy_uint8>(low)
+ if size is None:
+ rk_random_uint8(off, rng, 1, &buf, state)
+ return buf
+ else:
+ array = <ndarray>np.empty(size, np.uint8)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint8 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint8(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_uint16(low, high, size, rngstate):
+ """
+ _rand_uint16(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint16 off, rng, buf
+ cdef npy_uint16 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint16>(high - low)
+ off = <npy_uint16>(low)
+ if size is None:
+ rk_random_uint16(off, rng, 1, &buf, state)
+ return buf
+ else:
+ array = <ndarray>np.empty(size, np.uint16)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint16 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint16(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_uint32(low, high, size, rngstate):
+ """
+ _rand_uint32(self, low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint32 off, rng, buf
+ cdef npy_uint32 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint32>(high - low)
+ off = <npy_uint32>(low)
+ if size is None:
+ rk_random_uint32(off, rng, 1, &buf, state)
+ return <npy_uint32>buf
+ else:
+ array = <ndarray>np.empty(size, np.uint32)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint32 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint32(off, rng, cnt, out, state)
+ return array
+
+
+def _rand_uint64(low, high, size, rngstate):
+ """
+ _rand_uint64(low, high, size, rngstate)
+
+ See `_rand_int32` for documentation, only the return type changes.
+
+ """
+ cdef npy_uint64 off, rng, buf
+ cdef npy_uint64 *out
+ cdef ndarray array "arrayObject"
+ cdef npy_intp cnt
+ cdef rk_state *state = <rk_state *>NpyCapsule_AsVoidPtr(rngstate)
+
+ rng = <npy_uint64>(high - low)
+ off = <npy_uint64>(low)
+ if size is None:
+ rk_random_uint64(off, rng, 1, &buf, state)
+ return <npy_uint64>buf
+ else:
+ array = <ndarray>np.empty(size, np.uint64)
+ cnt = PyArray_SIZE(array)
+ out = <npy_uint64 *>PyArray_DATA(array)
+ with nogil:
+ rk_random_uint64(off, rng, cnt, out, state)
+ return array
+
+# Look up table for randint functions keyed by type name. The stored data
+# is a tuple (lbnd, ubnd, func), where lbnd is the smallest value for the
+# type, ubnd is one greater than the largest value, and func is the
+# function to call.
+_randint_type = {
+ 'bool': (0, 2, _rand_bool),
+ 'int8': (-2**7, 2**7, _rand_int8),
+ 'int16': (-2**15, 2**15, _rand_int16),
+ 'int32': (-2**31, 2**31, _rand_int32),
+ 'int64': (-2**63, 2**63, _rand_int64),
+ 'uint8': (0, 2**8, _rand_uint8),
+ 'uint16': (0, 2**16, _rand_uint16),
+ 'uint32': (0, 2**32, _rand_uint32),
+ 'uint64': (0, 2**64, _rand_uint64)
+ }
+
+
cdef class RandomState:
"""
RandomState(seed=None)
@@ -618,11 +919,12 @@
"""
cdef rk_state *internal_state
cdef object lock
+ cdef object state_address
poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
def __init__(self, seed=None):
self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state))
-
+ self.state_address = NpyCapsule_FromVoidPtr(self.internal_state, NULL)
self.lock = Lock()
self.seed(seed)
@@ -885,15 +1187,15 @@
"""
return disc0_array(self.internal_state, rk_long, size, self.lock)
- def randint(self, low, high=None, size=None):
+ def randint(self, low, high=None, size=None, dtype='l'):
"""
- randint(low, high=None, size=None)
+ randint(low, high=None, size=None, dtype='l')
Return random integers from `low` (inclusive) to `high` (exclusive).
- Return random integers from the "discrete uniform" distribution in the
- "half-open" interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`).
+ Return random integers from the "discrete uniform" distribution of
+ the specified dtype in the "half-open" interval [`low`, `high`). If
+ `high` is None (the default), then results are from [0, `low`).
Parameters
----------
@@ -908,6 +1210,13 @@
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
+ dtype : dtype, optional
+ Desired dtype of the result. All dtypes are determined by their
+ name, i.e., 'int64', 'int', etc, so byteorder is not available
+ and a specific precision may have different C types depending
+ on the platform. The default value is 'l' (C long).
+
+ .. versionadded:: 1.11.0
Returns
-------
@@ -936,37 +1245,24 @@
[3, 2, 2, 0]])
"""
- cdef long lo, hi, rv
- cdef unsigned long diff
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
if high is None:
- lo = 0
- hi = low
- else:
- lo = low
- hi = high
+ high = low
+ low = 0
- if lo >= hi :
+ key = np.dtype(dtype).name
+ if not key in _randint_type:
+ raise TypeError('Unsupported dtype "%s" for randint' % key)
+ lowbnd, highbnd, randfunc = _randint_type[key]
+
+ if low < lowbnd:
+ raise ValueError("low is out of bounds for %s" % (key,))
+ if high > highbnd:
+ raise ValueError("high is out of bounds for %s" % (key,))
+ if low >= high:
raise ValueError("low >= high")
- diff = <unsigned long>hi - <unsigned long>lo - 1UL
- if size is None:
- with self.lock:
- rv = lo + <long>rk_interval(diff, self. internal_state)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with self.lock, nogil:
- for i from 0 <= i < length:
- rv = lo + <long>rk_interval(diff, self. internal_state)
- array_data[i] = rv
- return array
+ with self.lock:
+ return randfunc(low, high - 1, size, self.state_address)
def bytes(self, npy_intp length):
"""
@@ -1280,7 +1576,7 @@
Random values in a given shape.
- Create an array of the given shape and propagate it with
+ Create an array of the given shape and populate it with
random samples from a uniform distribution
over ``[0, 1)``.
@@ -1379,11 +1675,17 @@
"""
random_integers(low, high=None, size=None)
- Return random integers between `low` and `high`, inclusive.
+ Random integers of type np.int between `low` and `high`, inclusive.
- Return random integers from the "discrete uniform" distribution in the
- closed interval [`low`, `high`]. If `high` is None (the default),
- then results are from [1, `low`].
+ Return random integers of type np.int from the "discrete uniform"
+ distribution in the closed interval [`low`, `high`]. If `high` is
+ None (the default), then results are from [1, `low`]. The np.int
+ type translates to the C long type used by Python 2 for "short"
+ integers and its precision is platform dependent.
+
+ This function has been deprecated. Use randint instead.
+
+ .. deprecated:: 1.11.0
Parameters
----------
@@ -1450,9 +1752,20 @@
"""
if high is None:
+ warnings.warn(("This function is deprecated. Please call "
+ "randint(1, {low} + 1) instead".format(low=low)),
+ DeprecationWarning)
high = low
low = 1
- return self.randint(low, high+1, size)
+
+ else:
+ warnings.warn(("This function is deprecated. Please call "
+ "randint({low}, {high} + 1) instead".format(
+ low=low, high=high)), DeprecationWarning)
+
+ return self.randint(low, high + 1, size=size, dtype='l')
+
+
# Complicated, continuous distributions:
def standard_normal(self, size=None):
@@ -4459,10 +4772,24 @@
For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
we threw 2 times 1, 4 times 2, etc.
- A loaded dice is more likely to land on number 6:
+ A loaded die is more likely to land on number 6:
- >>> np.random.multinomial(100, [1/7.]*5)
- array([13, 16, 13, 16, 42])
+ >>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
+ array([11, 16, 14, 17, 16, 26])
+
+ The probability inputs should be normalized. As an implementation
+ detail, the value of the last entry is ignored and assumed to take
+ up any leftover probability mass, but this should not be relied on.
+ A biased coin which has twice as much weight on one side as on the
+ other should be sampled like so:
+
+ >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
+ array([38, 62])
+
+ not like:
+
+ >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG
+ array([100, 0])
"""
cdef npy_intp d
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index c54f79c..488278d 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -2,6 +2,12 @@
cdef extern from "numpy/npy_no_deprecated_api.h": pass
+cdef extern from "mt_compat.h":
+
+ object NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(object o))
+ void * NpyCapsule_AsVoidPtr(object o)
+
+
cdef extern from "numpy/arrayobject.h":
cdef enum NPY_TYPES:
@@ -71,7 +77,17 @@
double real
double imag
+ ctypedef int npy_int
ctypedef int npy_intp
+ ctypedef int npy_int64
+ ctypedef int npy_uint64
+ ctypedef int npy_int32
+ ctypedef int npy_uint32
+ ctypedef int npy_int16
+ ctypedef int npy_uint16
+ ctypedef int npy_int8
+ ctypedef int npy_uint8
+ ctypedef int npy_bool
ctypedef extern class numpy.dtype [object PyArray_Descr]: pass
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
index b18897e..3a95efe 100644
--- a/numpy/random/mtrand/randomkit.c
+++ b/numpy/random/mtrand/randomkit.c
@@ -70,6 +70,7 @@
#include <errno.h>
#include <limits.h>
#include <math.h>
+#include <assert.h>
#ifdef _WIN32
/*
@@ -115,6 +116,10 @@
#include <unistd.h>
#endif
+/*
+ * Do not move this include. randomkit.h must be included
+ * after windows timeb.h is included.
+ */
#include "randomkit.h"
#ifndef RK_DEV_URANDOM
@@ -207,7 +212,11 @@
#define UPPER_MASK 0x80000000UL
#define LOWER_MASK 0x7fffffffUL
-/* Slightly optimised reference implementation of the Mersenne Twister */
+/*
+ * Slightly optimised reference implementation of the Mersenne Twister
+ * Note that regardless of the precision of long, only 32 bit random
+ * integers are produced
+ */
unsigned long
rk_random(rk_state *state)
{
@@ -240,6 +249,219 @@
return y;
}
+
+/*
+ * Returns an unsigned 64 bit random integer.
+ */
+NPY_INLINE static npy_uint64
+rk_uint64(rk_state *state)
+{
+ npy_uint64 upper = (npy_uint64)rk_random(state) << 32;
+ npy_uint64 lower = (npy_uint64)rk_random(state);
+ return upper | lower;
+}
+
+
+/*
+ * Returns an unsigned 32 bit random integer.
+ */
+NPY_INLINE static npy_uint32
+rk_uint32(rk_state *state)
+{
+ return (npy_uint32)rk_random(state);
+}
+
+
+/*
+ * Fills an array with cnt random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void
+rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
+ npy_uint64 *out, rk_state *state)
+{
+ npy_uint64 val, mask = rng;
+ npy_intp i;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+ mask |= mask >> 32;
+
+ for (i = 0; i < cnt; i++) {
+ if (rng <= 0xffffffffUL) {
+ while ((val = (rk_uint32(state) & mask)) > rng);
+ }
+ else {
+ while ((val = (rk_uint64(state) & mask)) > rng);
+ }
+ out[i] = off + val;
+ }
+}
+
+
+/*
+ * Fills an array with cnt random npy_uint32 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void
+rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
+ npy_uint32 *out, rk_state *state)
+{
+ npy_uint32 val, mask = rng;
+ npy_intp i;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+
+ for (i = 0; i < cnt; i++) {
+ while ((val = (rk_uint32(state) & mask)) > rng);
+ out[i] = off + val;
+ }
+}
+
+
+/*
+ * Fills an array with cnt random npy_uint16 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void
+rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
+ npy_uint16 *out, rk_state *state)
+{
+ npy_uint16 val, mask = rng;
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+
+ for (i = 0; i < cnt; i++) {
+ do {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 1;
+ }
+ else {
+ buf >>= 16;
+ bcnt--;
+ }
+ val = (npy_uint16)buf & mask;
+ } while (val > rng);
+ out[i] = off + val;
+ }
+}
+
+
+/*
+ * Fills an array with cnt random npy_uint8 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void
+rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt,
+ npy_uint8 *out, rk_state *state)
+{
+ npy_uint8 val, mask = rng;
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+
+ for (i = 0; i < cnt; i++) {
+ do {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 3;
+ }
+ else {
+ buf >>= 8;
+ bcnt--;
+ }
+ val = (npy_uint8)buf & mask;
+ } while (val > rng);
+ out[i] = off + val;
+ }
+}
+
+
+/*
+ * Fills an array with cnt random npy_bool between off and off + rng
+ * inclusive.
+ */
+void
+rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt,
+ npy_bool *out, rk_state *state)
+{
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* If we reach here rng and mask are one and off is zero */
+ assert(rng == 1 && off == 0);
+ for (i = 0; i < cnt; i++) {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 31;
+ }
+ else {
+ buf >>= 1;
+ bcnt--;
+ }
+ out[i] = (buf & 0x00000001) != 0;
+ }
+}
+
+
long
rk_long(rk_state *state)
{
diff --git a/numpy/random/mtrand/randomkit.h b/numpy/random/mtrand/randomkit.h
index e049488..fcdd606 100644
--- a/numpy/random/mtrand/randomkit.h
+++ b/numpy/random/mtrand/randomkit.h
@@ -56,11 +56,13 @@
* defaults to "/dev/urandom"
*/
-#include <stddef.h>
-
#ifndef _RANDOMKIT_
#define _RANDOMKIT_
+#include <stddef.h>
+#include <numpy/npy_common.h>
+
+
#define RK_STATE_LEN 624
typedef struct rk_state_
@@ -149,6 +151,41 @@
extern unsigned long rk_interval(unsigned long max, rk_state *state);
/*
+ * Fills an array with cnt random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+extern void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
+ npy_uint64 *out, rk_state *state);
+
+/*
+ * Fills an array with cnt random npy_uint32 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+extern void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
+ npy_uint32 *out, rk_state *state);
+
+/*
+ * Fills an array with cnt random npy_uint16 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+extern void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
+ npy_uint16 *out, rk_state *state);
+
+/*
+ * Fills an array with cnt random npy_uint8 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+extern void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt,
+ npy_uint8 *out, rk_state *state);
+
+/*
+ * Fills an array with cnt random npy_bool between off and off + rng
+ * inclusive. It is assumed tha npy_bool as the same size as npy_uint8.
+ */
+extern void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt,
+ npy_bool *out, rk_state *state);
+
+/*
* Returns a random double between 0.0 and 1.0, 1.0 excluded.
*/
extern double rk_double(rk_state *state);
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index ab7f90d..37c1876 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -7,6 +7,8 @@
from numpy import random
from numpy.compat import asbytes
import sys
+import warnings
+
class TestSeed(TestCase):
def test_scalar(self):
@@ -26,12 +28,12 @@
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
- # seed must be a unsigned 32 bit integers
+ # seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
- # seed must be a unsigned 32 bit integers
+ # seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
@@ -128,8 +130,85 @@
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
+class TestRandint(TestCase):
+
+ rfunc = np.random.randint
+
+ # valid integer/boolean types
+ itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self):
+ assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
+
+ def test_bounds_checking(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
+ assert_raises(ValueError, self.rfunc, lbnd - 1 , ubnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd , ubnd + 1, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd , lbnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1 , 0, dtype=dt)
+
+ def test_rng_zero_and_extremes(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+ tgt = (lbnd + ubnd)//2
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ def test_in_bounds_fuzz(self):
+ # Don't use fixed seed
+ np.random.seed()
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+ vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_repeatability(self):
+ import hashlib
+ # We use a md5 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but np.bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
+ 'int16': '1b7741b80964bb190c50d541dca1cac1',
+ 'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'int64': '17db902806f448331b5a758d7d2ee672',
+ 'int8': '27dd30c4e08a797063dffac2490b0be6',
+ 'uint16': '1b7741b80964bb190c50d541dca1cac1',
+ 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'uint64': '17db902806f448331b5a758d7d2ee672',
+ 'uint8': '27dd30c4e08a797063dffac2490b0be6'}
+
+ for dt in self.itype[1:]:
+ np.random.seed(1234)
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = self.rfunc(0, 6, size=1000, dtype=dt)
+ else:
+ val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
+
+ res = hashlib.md5(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianess
+ np.random.seed(1234)
+ val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
+ res = hashlib.md5(val).hexdigest()
+ assert_(tgt[np.dtype(np.bool).name] == res)
+
+
class TestRandomDist(TestCase):
- # Make sure the random distrobution return the correct value for a
+ # Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
@@ -167,6 +246,31 @@
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
+ def test_random_integers_max_int(self):
+ # Tests whether random_integers can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ actual = np.random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ desired = np.iinfo('l').max
+ np.testing.assert_equal(actual, desired)
+
+ def test_random_integers_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # DeprecationWarning raised with high == None
+ assert_raises(DeprecationWarning,
+ np.random.random_integers,
+ np.iinfo('l').max)
+
+ # DeprecationWarning raised with high != None
+ assert_raises(DeprecationWarning,
+ np.random.random_integers,
+ np.iinfo('l').max, np.iinfo('l').max)
+
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index dcc02ad..625fdec 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -12,4 +12,4 @@
from . import decorators as dec
from .nosetester import run_module_suite, NoseTester as Tester
from .utils import *
-test = Tester().test
+test = nosetester._numpy_tester().test
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index df3d297..6cde298 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -48,7 +48,7 @@
@dec.slow
def test_big(self):
- print 'Big, slow test'
+ print('Big, slow test')
"""
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index 197e20b..ee9d1b4 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -34,33 +34,24 @@
module.
"""
if module is None:
- #print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
- #print '_fm C2' # dbg
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
- #print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
- #print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
- #print '_fm C3-1' # dbg
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
- #print '_fm C4' # dbg
- #print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
- #print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
- #print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
@@ -95,10 +86,7 @@
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
- #print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
- #valname1 = '%s.%s' % (name, valname) # dbg
- #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 551e630..e320583 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -57,7 +57,7 @@
""" Import nose only when needed.
"""
fine_nose = True
- minimum_nose_version = (0, 10, 0)
+ minimum_nose_version = (1, 0, 0)
try:
import nose
except ImportError:
@@ -158,34 +158,30 @@
- "develop" : equals ``(DeprecationWarning, RuntimeWarning)``
- "release" : equals ``()``, don't raise on any warnings.
- See Notes for more details.
-
- Notes
- -----
- The default for `raise_warnings` is
- ``(DeprecationWarning, RuntimeWarning)`` for development versions of NumPy,
- and ``()`` for released versions. The purpose of this switching behavior
- is to catch as many warnings as possible during development, but not give
- problems for packaging of released versions.
+ Default is "release".
+ depth : int, optional
+ If `package` is None, then this can be used to initialize from the
+ module of the caller of (the caller of (...)) the code that
+ initializes `NoseTester`. Default of 0 means the module of the
+ immediate caller; higher values are useful for utility routines that
+ want to initialize `NoseTester` objects on behalf of other code.
"""
- # Stuff to exclude from tests. These are from numpy.distutils
- excludes = ['f2py_ext',
- 'f2py_f90_ext',
- 'gen_ext',
- 'pyrex_ext',
- 'swig_ext']
-
- def __init__(self, package=None, raise_warnings=None):
- if raise_warnings is None and (
- not hasattr(np, '__version__') or '.dev0' in np.__version__):
- raise_warnings = "develop"
- elif raise_warnings is None:
+ def __init__(self, package=None, raise_warnings="release", depth=0):
+ # Back-compat: 'None' used to mean either "release" or "develop"
+ # depending on whether this was a release or develop version of
+ # numpy. Those semantics were fine for testing numpy, but not so
+ # helpful for downstream projects like scipy that use
+ # numpy.testing. (They want to set this based on whether *they* are a
+ # release or develop version, not whether numpy is.) So we continue to
+ # accept 'None' for back-compat, but it's now just an alias for the
+ # default "release".
+ if raise_warnings is None:
raise_warnings = "release"
package_name = None
if package is None:
- f = sys._getframe(1)
+ f = sys._getframe(1 + depth)
package_path = f.f_locals.get('__file__', None)
if package_path is None:
raise AssertionError
@@ -292,9 +288,6 @@
import_nose()
# compile argv
argv = self._test_argv(label, verbose, extra_argv)
- # bypass tests noted for exclude
- for ename in self.excludes:
- argv += ['--exclude', ename]
# our way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
@@ -514,3 +507,10 @@
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
+
+def _numpy_tester():
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ mode = "develop"
+ else:
+ mode = "release"
+ return NoseTester(raise_warnings=mode, depth=1)
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 13aeffe..23bd491 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -2,6 +2,7 @@
import warnings
import sys
+import os
import numpy as np
from numpy.testing import (
@@ -10,7 +11,7 @@
assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal,
assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, run_module_suite,
- assert_string_equal
+ assert_string_equal, assert_, tempdir, temppath,
)
import unittest
@@ -780,6 +781,40 @@
assert_warn_len_equal(my_mod, 2)
+def test_tempdir():
+ with tempdir() as tdir:
+ fpath = os.path.join(tdir, 'tmp')
+ with open(fpath, 'w'):
+ pass
+ assert_(not os.path.isdir(tdir))
+
+ raised = False
+ try:
+ with tempdir() as tdir:
+ raise ValueError()
+ except ValueError:
+ raised = True
+ assert_(raised)
+ assert_(not os.path.isdir(tdir))
+
+
+
+def test_temppath():
+ with temppath() as fpath:
+ with open(fpath, 'w') as f:
+ pass
+ assert_(not os.path.isfile(fpath))
+
+ raised = False
+ try:
+ with temppath() as fpath:
+ raise ValueError()
+ except ValueError:
+ raised = True
+ assert_(raised)
+ assert_(not os.path.isfile(fpath))
+
+
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 00f7ce4..f545cd3 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -12,10 +12,11 @@
from functools import partial
import shutil
import contextlib
-from tempfile import mkdtemp
+from tempfile import mkdtemp, mkstemp
from .nosetester import import_nose
from numpy.core import float32, empty, arange, array_repr, ndarray
+from numpy.lib.utils import deprecate
if sys.version_info[0] >= 3:
from io import StringIO
@@ -30,7 +31,7 @@
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
- 'SkipTest', 'KnownFailureException']
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir']
class KnownFailureException(Exception):
@@ -122,6 +123,8 @@
raise TypeError("isinf not supported for this type")
return st
+@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
+ "Use numpy.random.rand instead.")
def rand(*args):
"""Returns an array of random numbers with the given shape.
@@ -1293,7 +1296,7 @@
--------
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
... times=times)
- >>> print "Time for a single execution : ", etime / times, "s"
+ >>> print("Time for a single execution : ", etime / times, "s")
Time for a single execution : 0.005 s
"""
@@ -1810,8 +1813,31 @@
"""
tmpdir = mkdtemp(*args, **kwargs)
- yield tmpdir
- shutil.rmtree(tmpdir)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+ """Context manager for temporary files.
+
+ Context manager that returns the path to a closed temporary file. Its
+ parameters are the same as for tempfile.mkstemp and are passed directly
+ to that function. The underlying file is removed when the context is
+ exited, so it should be closed at that time.
+
+ Windows does not allow a temporary file to be opened if it is already
+ open, so the underlying file must be closed after opening before it
+ can be opened again.
+
+ """
+ fd, path = mkstemp(*args, **kwargs)
+ os.close(fd)
+ try:
+ yield path
+ finally:
+ os.remove(path)
class clear_and_catch_warnings(warnings.catch_warnings):
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 552383d..94587e8 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -14,7 +14,7 @@
from numpy.testing.decorators import skipif
from numpy.testing import assert_
-skipif_inplace = skipif(isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')))
+is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
def run_command(cmd, check_code=True):
""" Run command sequence `cmd` returning exit code, stdout, stderr
@@ -58,17 +58,18 @@
return proc.returncode, stdout, stderr
-@skipif_inplace
+@skipif(is_inplace)
def test_f2py():
# test that we can run f2py script
if sys.platform == 'win32':
f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable)
code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v'])
- assert_equal(stdout.strip(), asbytes('2'))
+ success = stdout.strip() == asbytes('2')
+ assert_(success, "Warning: f2py not found in path")
else:
# unclear what f2py cmd was installed as, check plain (f2py) and
# current python version specific one (f2py3.4)
- f2py_cmds = ['f2py', 'f2py' + basename(sys.executable)[6:]]
+ f2py_cmds = ('f2py', 'f2py' + basename(sys.executable)[6:])
success = False
for f2py_cmd in f2py_cmds:
try:
@@ -76,6 +77,7 @@
assert_equal(stdout.strip(), asbytes('2'))
success = True
break
- except FileNotFoundError:
+ except:
pass
- assert_(success, "wasn't able to find f2py or %s on commandline" % f2py_cmds[1])
+ msg = "Warning: neither %s nor %s found in path" % f2py_cmds
+ assert_(success, msg)
diff --git a/pavement.py b/pavement.py
index f4b1b2b..ef6c6af 100644
--- a/pavement.py
+++ b/pavement.py
@@ -54,7 +54,7 @@
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
-from __future__ import division, absolute_import, print_function
+from __future__ import division, print_function
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
diff --git a/runtests.py b/runtests.py
index 9376ae5..957cbef 100755
--- a/runtests.py
+++ b/runtests.py
@@ -24,6 +24,7 @@
$ python runtests.py --lcov-html
"""
+from __future__ import division, print_function
#
# This is a generic test runner script for projects using Numpy's test
diff --git a/setup.py b/setup.py
index 36889cc..80ddd8a 100755
--- a/setup.py
+++ b/setup.py
@@ -237,9 +237,9 @@
FULLVERSION, GIT_REVISION = get_version_info()
metadata['version'] = FULLVERSION
else:
- if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel' or
+ if (len(sys.argv) >= 2 and sys.argv[1] in ('bdist_wheel', 'bdist_egg') or
sys.version_info[0] < 3 and sys.platform == "win32"):
- # bdist_wheel and the MS python2.7 VS sdk needs setuptools
+ # bdist_wheel, bdist_egg and the MS python2.7 VS sdk needs setuptools
# the latter can also be triggered by (see python issue23246)
# SET DISTUTILS_USE_SDK=1
# SET MSSdk=1
diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py
index be4134d..b7783be 100644
--- a/tools/swig/test/testFortran.py
+++ b/tools/swig/test/testFortran.py
@@ -24,16 +24,6 @@
self.typeStr = "double"
self.typeCode = "d"
- # This test used to work before the update to avoid deprecated code. Now it
- # doesn't work. As best I can tell, it never should have worked, so I am
- # commenting it out. --WFS
- # def testSecondElementContiguous(self):
- # "Test Fortran matrix initialized from reshaped default array"
- # print >>sys.stderr, self.typeStr, "... ",
- # second = Fortran.__dict__[self.typeStr + "SecondElement"]
- # matrix = np.arange(9).reshape(3, 3).astype(self.typeCode)
- # self.assertEquals(second(matrix), 3)
-
# Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
def testSecondElementFortran(self):
"Test Fortran matrix initialized from reshaped NumPy fortranarray"
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 795915d..d105c15 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+
set -ex
# Travis legacy boxes give you 1.5 CPUs, container-based boxes give you 2 CPUs
@@ -10,54 +11,97 @@
export LD_PRELOAD=/usr/lib/libeatmydata/libeatmydata.so
fi
+# travis venv tests override python
+PYTHON=${PYTHON:-python}
+PIP=${PIP:-pip}
+
+# explicit python version needed here
+if [ -n "$USE_DEBUG" ]; then
+ PYTHON="python3-dbg"
+fi
+
+if [ -n "$PYTHON_OO" ]; then
+ PYTHON="${PYTHON} -OO"
+fi
+
# make some warnings fatal, mostly to match windows compilers
-werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull"
+werrors="-Werror=declaration-after-statement -Werror=vla "
+werrors+="-Werror=nonnull -Werror=pointer-arith"
setup_base()
{
# We used to use 'setup.py install' here, but that has the terrible
- # behaviour that if a copy of the package is already installed in
- # the install location, then the new copy just gets dropped on top
- # of it. Travis typically has a stable numpy release pre-installed,
- # and if we don't remove it, then we can accidentally end up
- # e.g. running old test modules that were in the stable release but
- # have been removed from master. (See gh-2765, gh-2768.) Using 'pip
- # install' also has the advantage that it tests that numpy is 'pip
- # install' compatible, see e.g. gh-2766...
-if [ -z "$USE_DEBUG" ]; then
- if [ -z "$IN_CHROOT" ]; then
- $PIP install .
+ # behaviour that if a copy of the package is already installed in the
+ # install location, then the new copy just gets dropped on top of it.
+ # Travis typically has a stable numpy release pre-installed, and if we
+ # don't remove it, then we can accidentally end up e.g. running old
+ # test modules that were in the stable release but have been removed
+ # from master. (See gh-2765, gh-2768.) Using 'pip install' also has
+ # the advantage that it tests that numpy is 'pip install' compatible,
+ # see e.g. gh-2766...
+ if [ -z "$USE_DEBUG" ]; then
+ if [ -z "$IN_CHROOT" ]; then
+ $PIP install .
+ else
+ sysflags="$($PYTHON -c "from distutils import sysconfig; \
+ print (sysconfig.get_config_var('CFLAGS'))")"
+ CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log
+ grep -v "_configtest" log \
+ | grep -vE "ld returned 1|no previously-included files matching" \
+ | grep -E "warning\>" \
+ | tee warnings
+ # Check for an acceptable number of warnings. Some warnings are out of
+ # our control, so adjust the number as needed. At the moment a
+ # cython generated code produces a warning about '-2147483648L', but
+ # the code seems to compile OK.
+ [[ $(wc -l < warnings) -lt 2 ]]
+ fi
else
- sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log
- grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>";
- # accept a mysterious memset warning that shows with -flto
- test $(grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>" -c) -lt 2;
+ sysflags="$($PYTHON -c "from distutils import sysconfig; \
+ print (sysconfig.get_config_var('CFLAGS'))")"
+ CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
fi
-else
- sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
-fi
}
setup_chroot()
{
# this can all be replaced with:
# apt-get install libpython2.7-dev:i386
- # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" linux32 python setup.py build
+ # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" \
+ # linux32 python setup.py build
# when travis updates to ubuntu 14.04
+ #
+ # Numpy may not distinquish between 64 and 32 bit atlas in the
+ # configuration stage.
DIR=$1
set -u
- sudo debootstrap --variant=buildd --include=fakeroot,build-essential --arch=$ARCH --foreign $DIST $DIR
+ sudo debootstrap --variant=buildd --include=fakeroot,build-essential \
+ --arch=$ARCH --foreign $DIST $DIR
sudo chroot $DIR ./debootstrap/debootstrap --second-stage
+
+ # put the numpy repo in the chroot directory
sudo rsync -a $TRAVIS_BUILD_DIR $DIR/
- echo deb http://archive.ubuntu.com/ubuntu/ $DIST main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://archive.ubuntu.com/ubuntu/ $DIST-updates main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://security.ubuntu.com/ubuntu $DIST-security main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
+
+ # set up repos in the chroot directory for installing packages
+ echo deb http://archive.ubuntu.com/ubuntu/ \
+ $DIST main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+ echo deb http://archive.ubuntu.com/ubuntu/ \
+ $DIST-updates main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+ echo deb http://security.ubuntu.com/ubuntu \
+ $DIST-security main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+
+ # install needed packages
sudo chroot $DIR bash -c "apt-get update"
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes eatmydata"
- echo /usr/lib/libeatmydata/libeatmydata.so | sudo tee -a $DIR/etc/ld.so.preload
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes libatlas-dev libatlas-base-dev gfortran python3-dev python3-nose python3-pip cython3 cython"
+ sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes \
+ eatmydata libatlas-dev libatlas-base-dev gfortran \
+ python-dev python-nose python-pip cython"
+
+ # faster operation with preloaded eatmydata
+ echo /usr/lib/libeatmydata/libeatmydata.so | \
+ sudo tee -a $DIR/etc/ld.so.preload
}
run_test()
@@ -70,49 +114,48 @@
# of numpy in the source directory.
mkdir -p empty
cd empty
- INSTALLDIR=$($PYTHON -c "import os; import numpy; print(os.path.dirname(numpy.__file__))")
+ INSTALLDIR=$($PYTHON -c \
+ "import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
- $PYTHON ../tools/test-installed-numpy.py # --mode=full
- # - coverage run --source=$INSTALLDIR --rcfile=../.coveragerc $(which $PYTHON) ../tools/test-installed-numpy.py
- # - coverage report --rcfile=../.coveragerc --show-missing
+ $PYTHON ../tools/test-installed-numpy.py
+ if [ -n "$USE_ASV" ]; then
+ pushd ../benchmarks
+ $PYTHON `which asv` machine --machine travis
+ $PYTHON `which asv` dev 2>&1| tee asv-output.log
+ if grep -q Traceback asv-output.log; then
+ echo "Some benchmarks have errors!"
+ exit 1
+ fi
+ popd
+ fi
}
-# travis venv tests override python
-PYTHON=${PYTHON:-python}
-PIP=${PIP:-pip}
-
-if [ -n "$USE_DEBUG" ]; then
- PYTHON=python3-dbg
-fi
-
-if [ -n "$PYTHON_OO" ]; then
- PYTHON="$PYTHON -OO"
-fi
-
export PYTHON
export PIP
if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
# Build wheel
$PIP install wheel
+ # ensure that the pip / setuptools versions deployed inside
+ # the venv are recent enough
+ $PIP install -U virtualenv
$PYTHON setup.py bdist_wheel
# Make another virtualenv to install into
- virtualenv --python=python venv-for-wheel
+ virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- $PIP install --pre --no-index --upgrade --find-links=. numpy
- $PIP install nose
+ pip install --pre --no-index --upgrade --find-links=. numpy
+ pip install nose
popd
run_test
-elif [ "$USE_CHROOT" != "1" ]; then
- setup_base
- run_test
elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then
DIR=/chroot
setup_chroot $DIR
# run again in chroot with this time testing
- sudo linux32 chroot $DIR bash -c "cd numpy && PYTHON=python3 PIP=pip3 IN_CHROOT=1 $0 test"
+ sudo linux32 chroot $DIR bash -c \
+ "cd numpy && PYTHON=python PIP=pip IN_CHROOT=1 $0 test"
else
+ setup_base
run_test
fi
diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh
new file mode 100755
index 0000000..06a8f3e
--- /dev/null
+++ b/tools/travis-upload-wheel.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+set -ex
+
+export CLOUD_CONTAINER_NAME=travis-dev-wheels
+
+if [[ ( ${USE_WHEEL} == 1 ) \
+ && ( "${TRAVIS_BRANCH}" == "master" ) \
+ && ( "${TRAVIS_PULL_REQUEST}" == "false" ) ]]; then
+ pip install wheelhouse_uploader
+ python -m wheelhouse_uploader upload --local-folder \
+ ${TRAVIS_BUILD_DIR}/dist/ ${CLOUD_CONTAINER_NAME}
+fi
diff --git a/tools/win32build/build-cpucaps.py b/tools/win32build/build-cpucaps.py
index d6a9dab..0c0a32d 100644
--- a/tools/win32build/build-cpucaps.py
+++ b/tools/win32build/build-cpucaps.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
import os
import subprocess
# build cpucaps.dll
diff --git a/tools/win32build/misc/x86analysis.py b/tools/win32build/misc/x86analysis.py
index 39b7cca..870e2c9 100644
--- a/tools/win32build/misc/x86analysis.py
+++ b/tools/win32build/misc/x86analysis.py
@@ -132,8 +132,6 @@
return cnt
def main():
- #parser = optparse.OptionParser()
- #parser.add_option("-f", "--filename
args = sys.argv[1:]
filename = args[0]
analyse(filename)
@@ -146,11 +144,6 @@
sse = has_sse(inst)
sse2 = has_sse2(inst)
sse3 = has_sse3(inst)
- #mmx = has_mmx(inst)
- #ppro = has_ppro(inst)
- #print sse
- #print sse2
- #print sse3
print("SSE3 inst %d" % cntset(sse3))
print("SSE2 inst %d" % cntset(sse2))
print("SSE inst %d" % cntset(sse))
@@ -158,5 +151,3 @@
if __name__ == '__main__':
main()
- #filename = "/usr/lib/sse2/libatlas.a"
- ##filename = "/usr/lib/sse2/libcblas.a"