[h5py] 02/10: New upstream version 2.7.1
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Sun Sep 3 16:43:30 UTC 2017
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch master
in repository h5py.
commit 474eb812a13317598f4b262bf6cbeb64215813ac
Author: Ghislain Antony Vaillant <ghisvail at gmail.com>
Date: Sun Sep 3 17:45:47 2017 +0200
New upstream version 2.7.1
---
MANIFEST.in | 35 ++-
PKG-INFO | 3 +-
README.rst | 46 +---
docs/build.rst | 152 ++++++++---
docs/conf.py | 6 +-
docs/high/dataset.rst | 2 +-
docs/high/file.rst | 46 ++++
docs/high/group.rst | 9 +-
docs/swmr.rst | 18 +-
docs/whatsnew/2.7.1.rst | 38 +++
docs/whatsnew/index.rst | 1 +
h5py.egg-info/PKG-INFO | 3 +-
h5py.egg-info/SOURCES.txt | 4 +-
h5py/__init__.py | 10 +
h5py/_conv.pyx | 126 ++++++---
h5py/_errors.pyx | 2 +-
h5py/_hl/attrs.py | 40 +--
h5py/_hl/base.py | 33 ++-
h5py/_hl/compat.py | 36 +++
h5py/_hl/dataset.py | 46 ++--
h5py/_hl/files.py | 58 ++--
h5py/_hl/group.py | 62 +++--
h5py/_objects.pyx | 19 +-
h5py/h5.pyx | 2 +
h5py/h5t.pyx | 101 +++----
h5py/tests/common.py | 59 ++--
h5py/tests/hl/test_dataset_getitem.py | 18 +-
h5py/tests/hl/test_datatype.py | 107 +++++++-
h5py/tests/old/common.py | 117 --------
h5py/tests/old/test_attrs.py | 4 +-
h5py/tests/old/test_attrs_data.py | 10 +-
h5py/tests/old/test_base.py | 6 +-
h5py/tests/old/test_dataset.py | 8 +-
h5py/tests/old/test_datatype.py | 2 +-
h5py/tests/old/test_dimension_scales.py | 2 +-
h5py/tests/old/test_file.py | 31 ++-
h5py/tests/old/test_file_image.py | 2 +-
h5py/tests/old/test_group.py | 24 +-
h5py/tests/old/test_h5.py | 9 +-
h5py/tests/old/test_h5d_direct_chunk_write.py | 2 +-
h5py/tests/old/test_h5f.py | 11 +-
h5py/tests/old/test_h5p.py | 10 +-
h5py/tests/old/test_h5t.py | 2 -
h5py/tests/old/test_objects.py | 9 +-
h5py/tests/old/test_selections.py | 2 +-
h5py/tests/old/test_slicing.py | 10 +-
h5py/version.py | 4 +-
pylintrc | 377 ++++++++++++++++++++++++++
setup.cfg | 1 -
setup.py | 6 +-
setup_build.py | 4 +-
tox.ini | 93 +++++++
52 files changed, 1278 insertions(+), 550 deletions(-)
diff --git a/MANIFEST.in b/MANIFEST.in
index 3c76717..7e17ba5 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,19 +1,32 @@
-recursive-include h5py *.h *.pyx *.pxd *.pxi *.py *.txt
-exclude h5py/defs.pyx
-exclude h5py/defs.pxd
-exclude h5py/config.pxi
-recursive-include examples *.py
-recursive-include lzf *
-recursive-include windows *
-recursive-include licenses *
-include MANIFEST.in
+include ANN.rst
include api_gen.py
+include MANIFEST.in
+include pylintrc
+include README.rst
include setup_build.py
include setup_configure.py
-include ANN.rst
-include README.rst
+include tox.ini
+
recursive-include docs *
prune docs/_build
recursive-include docs_api *
prune docs_api/_build
+recursive-include examples *.py
+
+recursive-include h5py *.h *.pyx *.pxd *.pxi *.py *.txt
+exclude h5py/config.pxi
+exclude h5py/defs.pxd
+exclude h5py/defs.pyx
+
+recursive-include licenses *
+recursive-include lzf *
+recursive-include windows *
+
recursive-exclude * .DS_Store
+
+exclude ci other .github
+recursive-exclude ci *
+recursive-exclude other *
+recursive-exclude .github *
+exclude pavement.py
+exclude *.yml
diff --git a/PKG-INFO b/PKG-INFO
index a9e93a2..468d923 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: h5py
-Version: 2.7.0
+Version: 2.7.1
Summary: Read and write HDF5 files from Python
Home-page: http://www.h5py.org
Author: Andrew Collette
@@ -35,6 +35,7 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Scientific/Engineering
Classifier: Topic :: Database
diff --git a/README.rst b/README.rst
index 948cb80..5449e8b 100644
--- a/README.rst
+++ b/README.rst
@@ -5,6 +5,7 @@
HDF5 for Python
===============
+`h5py` is a thin, pythonic wrapper around the `HDF5 <https://support.hdfgroup.org/HDF5/>`_, which runs on Python 2 (2.6-2.7), and Python 3 (3.3-3.6).
Websites
--------
@@ -13,43 +14,26 @@ Websites
* Source code: http://github.com/h5py/h5py
* Mailing list: https://groups.google.com/d/forum/h5py
-For advanced installation options, see http://docs.h5py.org.
+Installation
+------------
-Prerequisites
--------------
+Pre-build `h5py` can either be installed via your Python Distribution (e.g.
+`Continuum Anaconda`_, `Enthought Canopy`_) or from `PyPI`_ via `pip`_.
+`h5py` is also distributed in many Linux Distributions (e.g. Ubuntu, Fedora),
+and in the MacOS package managers `Homebrew <https://brew.sh/>`_,
+`Macports <https://www.macports.org/>`_, or `Fink <http://finkproject.org/>`_.
-You need, at a minimum:
+More detailed installation instructions, including how to install `h5py` with
+MPI support, can be found at: http://docs.h5py.org/en/latest/build.html.
-* Python 2.6, 2.7, 3.2, 3.3, or 3.4
-* NumPy 1.6.1 or later
-* The "six" package for Python 2/3 compatibility
-To build on UNIX:
-
-* HDF5 1.8.4 or later (on Windows, HDF5 comes with h5py)
-* Cython 0.19 or later
-* If using Python 2.6, unittest2 is needed to run the tests
-
-Installing on Windows
----------------------
-
-Download an installer from http://www.h5py.org and run it.
-
-Installing on UNIX
-------------------
-
-Via pip (recommended)::
-
- pip install h5py
-
-From a release tarball or Git checkout::
-
- python setup.py build
- python setup.py test # optional
- [sudo] python setup.py install
-
Reporting bugs
--------------
Open a bug at http://github.com/h5py/h5py/issues. For general questions, ask
on the list (https://groups.google.com/d/forum/h5py).
+
+.. _`Continuum Anaconda`: http://continuum.io/downloads
+.. _`Enthought Canopy`: https://www.enthought.com/products/canopy/
+.. _`PyPI`: https://pypi.org/project/h5py/
+.. _`pip`: https://pip.pypa.io/en/stable/
diff --git a/docs/build.rst b/docs/build.rst
index 5e3c43e..f14f5ec 100644
--- a/docs/build.rst
+++ b/docs/build.rst
@@ -3,45 +3,94 @@
Installation
============
-Pre-configured installation (recommended)
------------------------------------------
+.. _install_recommends:
-It's strongly recommended that you use a Python distribution or package
-manager to install h5py along with its compiled dependencies. Here are some
-which are popular in the Python community:
+It is highly recommended that you use a pre-built version of h5py, either from a
+Python Distribution, an OS-specific package manager, or a pre-built wheel from
+PyPI.
-* `Anaconda <http://continuum.io/downloads>`_ or `Miniconda <http://conda.pydata.org/miniconda.html>`_ (Mac, Windows, Linux)
-* `Enthought Canopy <https://www.enthought.com/products/canopy/>`_ (Mac, Windows, Linux)
-* `PythonXY <https://code.google.com/p/pythonxy/>`_ (Windows)
+Be aware however that most pre-built versions lack MPI support, and that they
+are built against a specific version of HDF5. If you require MPI support, or
+newer HDF5 features, you will need to build from source.
-::
+After installing h5py, you should run the tests to be sure that everything was
+installed correctly. This can be done in the python interpreter via::
- conda install h5py # Anaconda/Miniconda
- enpkg h5py # Canopy
+ import h5py
+ h5py.run_tests()
-Or, use your package manager:
+On Python 2.6, unittest2 must be installed to run the tests.
-* apt-get (Linux/Debian, including Ubuntu)
-* yum (Linux/Red Hat, including Fedora and CentOS)
-* Homebrew (OS X)
-* pacman (Arch linux)
+.. _prebuilt_install:
+Pre-built installation (recommended)
+-----------------------------------------
-.. _source_install:
+Pre-build h5py can be installed via many Python Distributions, OS-specific
+package managers, or via h5py wheels.
-Source installation on Linux and OS X
--------------------------------------
+Python Distributions
+....................
+If you do not already use a Python Distribution, we recommend either
+`Anaconda <http://continuum.io/downloads>`_/`Miniconda <http://conda.pydata.org/miniconda.html>`_
+or
+`Enthought Canopy <https://www.enthought.com/products/canopy/>`_, both of which
+support most versions of Microsoft Windows, OSX/MacOS, and a variety of Linux
+Distributions. Installation of h5py can be done on the command line via::
-You need, via apt-get, yum or Homebrew:
+ $ conda install h5py
-* Python 2.6, 2.7, 3.3, 3.4, or 3.5 with development headers (``python-dev`` or similar)
-* HDF5 1.8.4 or newer, shared library version with development headers (``libhdf5-dev`` or similar)
-* NumPy 1.6.1 or later
+for Anaconda/MiniConda, and via::
-::
+ $ enpkg h5py
+
+for Canopy.
+
+Wheels
+......
+If you have an existing Python installation (e.g. a python.org download,
+or one that comes with your OS), then on Windows, MacOS/OSX, and
+Linux on Intel computers, pre-built h5py wheels can be installed via pip from
+PyPI::
$ pip install h5py
+Additionally, for Windows users, `Chris Gohlke provides third-party wheels
+which use Intel's MKL <http://www.lfd.uci.edu/~gohlke/pythonlibs/>`_.
+
+OS-Specific Package Managers
+............................
+On OSX/MacOS, h5py can be installed via `Homebrew <https://brew.sh/>`_,
+`Macports <https://www.macports.org/>`_, or `Fink <http://finkproject.org/>`_.
+
+The current state of h5py in various Linux Distributions can be seen at
+https://pkgs.org/download/python-h5py, and can be installed via the package
+manager.
+
+As far as the h5py developers know, none of the Windows package managers (e.g.
+`Chocolatey <https://chocolatey.org/>`_, `nuget <https://www.nuget.org/>`_)
+have h5py included, however they may assist in installing h5py's requirements
+when building from source.
+
+
+.. _source_install:
+
+Source installation
+-------------------
+To install h5py from source, you need three things installed:
+* A supported Python version with development headers
+* HDF5 1.8.4 or newer with development headers
+* A C compiler
+OS-specific instructions for installing HDF5, Python and a C compiler are in the next few
+sections.
+
+Additional Python-level requirements should be installed automatically (which
+will require an internet connection).
+
+The actual installation of h5py should be done via::
+
+ $ pip install --no-binary=h5py h5py
+
or, from a tarball or git :ref:`checkout <git_checkout>` ::
$ pip install -v .
@@ -53,24 +102,51 @@ or ::
If you are working on a development version and the underlying cython files change
it may be necessary to force a full rebuild. The easiest way to achieve this is ::
- $ git clean -xfd
+ $ git clean -xfd
from the top of your clone and then rebuilding.
-
+Source installation on OSX/MacOS
+................................
+HDF5 and Python are most likely in your package manager (e.g. `Homebrew <https://brew.sh/>`_,
+`Macports <https://www.macports.org/>`_, or `Fink <http://finkproject.org/>`_).
+Be sure to install the development headers, as sometimes they are not included
+in the main package.
+
+XCode comes with a C compiler (clang), and your package manager will likely have
+other C compilers for you to install.
+
+Source installation on Linux/Other Unix
+.......................................
+HDF5 and Python are most likely in your package manager. A C compiler almost
+definitely is, usually there is some kind of metapackage to install the
+default build tools, e.g. `build-essential`, which should be sufficient for our
+needs. Make sure that that you have the development headers, as they are
+usually not installed by default. They can usually be found in ``python-dev`` or
+similar and ``libhdf5-dev`` or similar.
Source installation on Windows
-------------------------------
-
-Installing from source on Windows is effectively impossible because of the C
-library dependencies involved.
+..............................
+Installing from source on Windows is a much more difficult prospect than
+installing from source on other OSs, as not only are you likely to need to
+compile HDF5 from source, everything must be built with the correct version of
+Visual Studio. Additional patches are also needed to HDF5 to get HDF5 and Python
+to work together.
-If you don't want to use Anaconda, Canopy, or PythonXY, download
-a `third-party wheel from Chris Gohlke's excellent collection <http://www.lfd.uci.edu/~gohlke/pythonlibs/>`_.
+We recommend examining the appveyor build scripts, and using those to build and
+install HDF5 and h5py.
+.. _custom_install:
Custom installation
-------------------
+.. important:: Remember that pip installs wheels by default.
+ To perform a custom installation with pip, you should use::
+
+ $ pip install --no-binary=h5py h5py
+
+ or build from a git checkout or downloaded tarball to avoid getting
+ a pre-built version of h5py.
You can specify build options for h5py with the ``configure`` option to
setup.py. Options may be given together or separately::
@@ -90,9 +166,9 @@ You can reset to the defaults with the ``--reset`` option::
You can also configure h5py using environment variables. This is handy
when installing via ``pip``, as you don't have direct access to setup.py::
- $ HDF5_DIR=/path/to/hdf5 pip install h5py
- $ HDF5_VERSION=X.Y.Z pip install h5py
- $ CC="mpicc" HDF5_MPI="ON" HDF5_DIR=/path/to/parallel-hdf5 pip install h5py
+ $ HDF5_DIR=/path/to/hdf5 pip install --no-binary=h5py h5py
+ $ HDF5_VERSION=X.Y.Z pip install --no-binary=h5py h5py
+ $ CC="mpicc" HDF5_MPI="ON" HDF5_DIR=/path/to/parallel-hdf5 pip install --no-binary=h5py h5py
Here's a list of all the configure options currently supported:
@@ -104,6 +180,7 @@ Force HDF5 version ``--hdf5-version=X.Y.Z`` ``HDF5_VERSION=X.Y.Z``
Enable MPI mode ``--mpi`` ``HDF5_MPI=ON``
======================= =========================== ===========================
+.. _build_mpi:
Building against Parallel HDF5
------------------------------
@@ -112,7 +189,7 @@ If you just want to build with ``mpicc``, and don't care about using Parallel
HDF5 features in h5py itself::
$ export CC=mpicc
- $ python setup.py install
+ $ pip install --no-binary=h5py h5py
If you want access to the full Parallel HDF5 feature set in h5py
(:ref:`parallel`), you will further have to build in MPI mode. This can either
@@ -128,7 +205,6 @@ export ``HDF5_MPI="ON"`` beforehand::
$ export CC=mpicc
$ export HDF5_MPI="ON"
- $ python setup.py configure
- $ python setup.py build
+ $ pip install --no-binary=h5py h5py
See also :ref:`parallel`.
diff --git a/docs/conf.py b/docs/conf.py
index 837ec95..83c4705 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -28,10 +28,12 @@ import os
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.intersphinx']
+extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.extlinks']
intersphinx_mapping = {'low': ('http://api.h5py.org', None)}
+extlinks = {'issue': ('https://github.com/h5py/h5py/issues/%s',
+ 'GH')}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -55,7 +57,7 @@ copyright = u'2014, Andrew Collette and contributors'
# The short X.Y version.
version = '2.7'
# The full version, including alpha/beta/rc tags.
-release = '2.7.0'
+release = '2.7.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/docs/high/dataset.rst b/docs/high/dataset.rst
index 70a5609..51c3ca0 100644
--- a/docs/high/dataset.rst
+++ b/docs/high/dataset.rst
@@ -317,7 +317,7 @@ Similarly, reading an empty attribute returns ``h5py.Empty``::
>>> obj.attrs["EmptyAttr"]
h5py.Empty(dtype="f")
-Empty datasets can be created by either by defining a ``dtype`` but no
+Empty datasets can be created either by defining a ``dtype`` but no
``shape`` in ``create_dataset``::
>>> grp.create_dataset("EmptyDataset", dtype="f")
diff --git a/docs/high/file.rst b/docs/high/file.rst
index 980c5bf..5414920 100644
--- a/docs/high/file.rst
+++ b/docs/high/file.rst
@@ -118,6 +118,52 @@ of the HDF5 library. However, once the file is closed you are free to read and
write data at the start of the file, provided your modifications don't leave
the user block region.
+
+.. _file_filenames:
+
+Filenames on different systems
+------------------------------
+
+Different operating systems (and different file systems) store filenames with
+different encodings. Additionally, in Python there are at least two different
+representations of filenames, as encoded bytes (via str on Python 2, bytes on
+Python 3) or as a unicode string (via unicode on Python 2 and str on Python 3).
+The safest bet when creating a new file is to use unicode strings on all
+systems.
+
+macOS (OSX)
+...........
+macOS is the simplest system to deal with, it only accepts UTF-8, so using
+unicode paths will just work (and should be preferred).
+
+Linux (and non-macOS Unix)
+..........................
+Unix-like systems use locale settings to determine the correct encoding to use.
+These are set via a number of different environment variables, of which ``LANG``
+and ``LC_ALL`` are the ones of most interest. Of special interest is the ``C``
+locale, which Python will interpret as only allowing ASCII, meaning unicode
+paths should be preencoded. This will likely change in Python 3.7 with
+https://www.python.org/dev/peps/pep-0538/, but this will likely be backported by
+distributions to earlier versions.
+
+To summarise, use unicode strings where possible, but be aware that sometimes
+using encoded bytes may be necessary to read incorrectly encoded filenames.
+
+Windows
+.......
+Windows systems have two different APIs to perform file-related operations, a
+ANSI (char, legacy) interface and a unicode (wchar) interface. HDF5 currently
+only supports the ANSI interface, which is limited in what it can encode. This
+means that it may not be possible to open certain files, and because
+:ref:`group_extlinks` do not specify their encoding, it is possible that opening an
+external link may not work. There is work being done to fix this (see
+https://github.com/h5py/h5py/issues/839), but it is likely there will need to be
+breaking changes make to allow Windows to have the same level of support for
+unicode filenames as other operating systems.
+
+The best suggestion is to use unicode strings, but to keep to ASCII for
+filenames to avoid possible breakage.
+
Reference
---------
diff --git a/docs/high/group.rst b/docs/high/group.rst
index b522199..d2249c8 100644
--- a/docs/high/group.rst
+++ b/docs/high/group.rst
@@ -147,6 +147,13 @@ link resides.
already open. This is related to how HDF5 manages file permissions
internally.
+.. note::
+
+ How the filename is processed is operating system dependent, it is
+ recommended to read :ref:`file_filenames` to understand potential limitations on
+ filenames on your operating system. Note especially that Windows is
+ particularly susceptible to problems with external links, due to possible
+ encoding errors and how filenames are structured.
Reference
---------
@@ -433,4 +440,4 @@ Link classes
.. attribute:: path
- Path to the object in the external file
\ No newline at end of file
+ Path to the object in the external file
diff --git a/docs/swmr.rst b/docs/swmr.rst
index 0e5f1d0..c04bb53 100644
--- a/docs/swmr.rst
+++ b/docs/swmr.rst
@@ -5,21 +5,6 @@ Single Writer Multiple Reader (SWMR)
Starting with version 2.5.0, h5py includes support for the HDF5 SWMR features.
-The SWMR feature is not available in the current release (1.8 series) of HDF5
-library. It is planned to be released for production use in version 1.10. Until
-then it is available as an experimental prototype form from development snapshot
-version 1.9.178 on the
-`HDF Group ftp server <ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/SWMR/>`_ or the
-`HDF Group svn repository <http://svn.hdfgroup.uiuc.edu/hdf5/branches/revise_chunks>`_.
-
-.. Warning:: The SWMR feature is currently in prototype form and available for
- experimenting and testing. Please do not consider this a production
- quality feature until the HDF5 library is released as 1.10.
-
-.. Warning:: FILES PRODUCED BY THE HDF5 1.9.X DEVELOPMENT SNAPSHOTS MAY NOT BE
- READABLE BY OTHER VERSIONS OF HDF5, INCLUDING THE EXISTING 1.8
- SERIES AND ALSO 1.10 WHEN IT IS RELEASED.
-
What is SWMR?
-------------
@@ -40,7 +25,8 @@ user to implement either a file polling mechanism, inotify or any other IPC
mechanism to notify when data has been written.
The SWMR functionality requires use of the latest HDF5 file format: v110. In
-practice this implies setting the libver bounding to "latest" when opening or
+practice this implies using at least HDF5 1.10 (this can be checked via
+`h5py.info`) and setting the libver bounding to "latest" when opening or
creating the file.
diff --git a/docs/whatsnew/2.7.1.rst b/docs/whatsnew/2.7.1.rst
new file mode 100644
index 0000000..bc919f5
--- /dev/null
+++ b/docs/whatsnew/2.7.1.rst
@@ -0,0 +1,38 @@
+What's new in h5py 2.7.1
+========================
+
+2.7.1 is the first bug-fix release in the 2.7.x series.
+
+Bug fixes
+---------
+
+ - :issue:`903` Fixed critical issue with cyclic gc which resulted in segfaults
+ - :issue:`904` Avoid unaligned access fixing h5py on sparc64
+ - :issue:`883` Fixed compilation issues for some library locations
+ - :issue:`868` Fix deadlock between phil and the import lock in py2
+ - :issue:`841` Improve windows handling if filenames
+ - :issue:`874` Allow close to be called on file multiple times
+ - :issue:`867`, :issue:`872` Warn on loaded vs complied hdf5 version issues
+ - :issue:`902` Fix overflow computing size of dataset on windows
+ - :issue:`912` Do not mangle capitalization of filenames in error messages
+ - :issue:`842` Fix longdouble on ppc64le
+ - :issue:`862`, :issue:`916` Fix compounds structs with variable-size members
+
+Fix h5py segfaulting on some Python 3 versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Through an intersection of `Python Issue 30484`_ and :issue:`888`, it was
+possible for the Python Garbage Collector to activate when closing ``h5py``
+objects, which due to how dictionaries were iterated over in Python could
+cause a segfault. :issue:`903` fixes the Garbage Collector activating whilst
+closing, whilst `Python Issue 30484`_ had been fixed upstream (and backported
+to Python 3.3 onwards).
+
+Avoid unaligned memory access in conversion functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some architectures (e.g. SPRAC64) do not allow unaligned memory access, which
+can come up when copying packed structs. :issue:`904` (by James Clarke) uses
+``memcpy`` to avoid said unaligned memory access.
+
+.. _`Python Issue 30484`: https://bugs.python.org/issue30484
diff --git a/docs/whatsnew/index.rst b/docs/whatsnew/index.rst
index 6cf00bd..031db7c 100644
--- a/docs/whatsnew/index.rst
+++ b/docs/whatsnew/index.rst
@@ -8,6 +8,7 @@ These document the changes between minor (or major) versions of h5py.
.. toctree::
+ 2.7.1
2.7
2.6
2.5
diff --git a/h5py.egg-info/PKG-INFO b/h5py.egg-info/PKG-INFO
index a9e93a2..468d923 100644
--- a/h5py.egg-info/PKG-INFO
+++ b/h5py.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: h5py
-Version: 2.7.0
+Version: 2.7.1
Summary: Read and write HDF5 files from Python
Home-page: http://www.h5py.org
Author: Andrew Collette
@@ -35,6 +35,7 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Scientific/Engineering
Classifier: Topic :: Database
diff --git a/h5py.egg-info/SOURCES.txt b/h5py.egg-info/SOURCES.txt
index 769f9d3..c1f484c 100644
--- a/h5py.egg-info/SOURCES.txt
+++ b/h5py.egg-info/SOURCES.txt
@@ -2,9 +2,11 @@ ANN.rst
MANIFEST.in
README.rst
api_gen.py
+pylintrc
setup.py
setup_build.py
setup_configure.py
+tox.ini
docs/Makefile
docs/build.rst
docs/conf.py
@@ -31,6 +33,7 @@ docs/whatsnew/2.3.rst
docs/whatsnew/2.4.rst
docs/whatsnew/2.5.rst
docs/whatsnew/2.6.rst
+docs/whatsnew/2.7.1.rst
docs/whatsnew/2.7.rst
docs/whatsnew/index.rst
docs_api/Makefile
@@ -137,7 +140,6 @@ h5py/tests/hl/test_dims_dimensionproxy.py
h5py/tests/hl/test_file.py
h5py/tests/hl/test_threads.py
h5py/tests/old/__init__.py
-h5py/tests/old/common.py
h5py/tests/old/test_attrs.py
h5py/tests/old/test_attrs_data.py
h5py/tests/old/test_base.py
diff --git a/h5py/__init__.py b/h5py/__init__.py
index 0f5acc4..08e95b4 100644
--- a/h5py/__init__.py
+++ b/h5py/__init__.py
@@ -14,6 +14,8 @@
from __future__ import absolute_import
+from warnings import warn as _warn
+
# --- Library setup -----------------------------------------------------------
@@ -59,6 +61,14 @@ from .version import version as __version__
from .tests import run_tests
+if version.hdf5_version_tuple != version.hdf5_built_version_tuple:
+ _warn(("h5py is running against HDF5 {0} when it was built against {1}, "
+ "this may cause problems").format(
+ '{0}.{1}.{2}'.format(*version.hdf5_version_tuple),
+ '{0}.{1}.{2}'.format(*version.hdf5_built_version_tuple)
+ ))
+
+
def enable_ipython_completer():
""" Call this from an interactive IPython session to enable tab-completion
of group and attribute names.
diff --git a/h5py/_conv.pyx b/h5py/_conv.pyx
index b60bcc7..6b8faca 100644
--- a/h5py/_conv.pyx
+++ b/h5py/_conv.pyx
@@ -162,30 +162,35 @@ cdef int conv_vlen2str(void* ipt, void* opt, void* bkg, void* priv) except -1:
cdef char** buf_cstring = <char**>ipt
cdef PyObject* temp_obj = NULL
cdef conv_size_t *sizes = <conv_size_t*>priv
+ cdef PyObject* bkg_obj0
+ cdef char* buf_cstring0
+
+ memcpy(&bkg_obj0, bkg_obj, sizeof(bkg_obj0))
+ memcpy(&buf_cstring0, buf_cstring, sizeof(buf_cstring0))
# When reading we identify H5T_CSET_ASCII as a byte string and
# H5T_CSET_UTF8 as a utf8-encoded unicode string
if sizes.cset == H5T_CSET_ASCII:
- if buf_cstring[0] == NULL:
+ if buf_cstring0 == NULL:
temp_obj = PyBytes_FromString("")
else:
- temp_obj = PyBytes_FromString(buf_cstring[0])
+ temp_obj = PyBytes_FromString(buf_cstring0)
elif sizes.cset == H5T_CSET_UTF8:
- if buf_cstring[0] == NULL:
+ if buf_cstring0 == NULL:
temp_obj = PyUnicode_DecodeUTF8("", 0, NULL)
else:
- temp_obj = PyUnicode_DecodeUTF8(buf_cstring[0], strlen(buf_cstring[0]), NULL)
+ temp_obj = PyUnicode_DecodeUTF8(buf_cstring0, strlen(buf_cstring0), NULL)
# Since all data conversions are by definition in-place, it
# is our responsibility to free the memory used by the vlens.
- free(buf_cstring[0])
+ free(buf_cstring0)
# HDF5 will eventuallly overwrite this target location, so we
# make sure to decref the object there.
- Py_XDECREF(bkg_obj[0])
+ Py_XDECREF(bkg_obj0)
# Write the new string object to the buffer in-place
- buf_obj[0] = temp_obj
+ memcpy(buf_obj, &temp_obj, sizeof(temp_obj));
return 0
@@ -201,16 +206,21 @@ cdef int conv_str2vlen(void* ipt, void* opt, void* bkg, void* priv) except -1:
cdef char* temp_string = NULL
cdef size_t temp_string_len = 0 # Not including null term
+ cdef PyObject* buf_obj0
+ cdef char* buf_cstring0
+
+ memcpy(&buf_obj0, buf_obj, sizeof(buf_obj0))
+
try:
- if buf_obj[0] == NULL or buf_obj[0] == Py_None:
+ if buf_obj0 == NULL or buf_obj0 == Py_None:
temp_string = ""
temp_string_len = 0
else:
- if PyBytes_CheckExact(buf_obj[0]):
+ if PyBytes_CheckExact(buf_obj0):
# Input is a byte string. If we're using CSET_UTF8, make sure
# it's valid UTF-8. Otherwise just store it.
- temp_object = buf_obj[0]
+ temp_object = buf_obj0
Py_INCREF(temp_object)
if sizes.cset == H5T_CSET_UTF8:
try:
@@ -223,8 +233,8 @@ cdef int conv_str2vlen(void* ipt, void* opt, void* bkg, void* priv) except -1:
# We are given a Unicode object. Encode it to utf-8 regardless of
# the HDF5 character set.
- elif PyUnicode_CheckExact(buf_obj[0]):
- temp_object = buf_obj[0]
+ elif PyUnicode_CheckExact(buf_obj0):
+ temp_object = buf_obj0
Py_INCREF(temp_object)
temp_encoded = PyUnicode_AsUTF8String(temp_object)
temp_string = PyBytes_AsString(temp_encoded)
@@ -232,11 +242,11 @@ cdef int conv_str2vlen(void* ipt, void* opt, void* bkg, void* priv) except -1:
else:
if sizes.cset == H5T_CSET_ASCII:
- temp_object = PyObject_Str(buf_obj[0])
+ temp_object = PyObject_Str(buf_obj0)
temp_string = PyBytes_AsString(temp_object)
temp_string_len = PyBytes_Size(temp_object)
elif sizes.cset == H5T_CSET_UTF8:
- temp_object = PyObject_Str(buf_obj[0])
+ temp_object = PyObject_Str(buf_obj0)
Py_INCREF(temp_object)
temp_encoded = PyUnicode_AsUTF8String(temp_object)
Py_INCREF(temp_encoded)
@@ -248,8 +258,9 @@ cdef int conv_str2vlen(void* ipt, void* opt, void* bkg, void* priv) except -1:
if strlen(temp_string) != temp_string_len:
raise ValueError("VLEN strings do not support embedded NULLs")
- buf_cstring[0] = <char*>malloc(temp_string_len+1)
- memcpy(buf_cstring[0], temp_string, temp_string_len+1)
+ buf_cstring0 = <char*>malloc(temp_string_len+1)
+ memcpy(buf_cstring0, temp_string, temp_string_len+1)
+ memcpy(buf_cstring, &buf_cstring0, sizeof(buf_cstring0));
return 0
finally:
@@ -294,9 +305,12 @@ cdef int conv_vlen2fixed(void* ipt, void* opt, void* bkg, void* priv) except -1:
cdef char* temp_string = NULL
cdef size_t temp_string_len = 0 # Without null term
cdef conv_size_t *sizes = <conv_size_t*>priv
+ cdef char* buf_vlen0
- if buf_vlen[0] != NULL:
- temp_string = buf_vlen[0]
+ memcpy(&buf_vlen0, buf_vlen, sizeof(buf_vlen0));
+
+ if buf_vlen0 != NULL:
+ temp_string = buf_vlen0
temp_string_len = strlen(temp_string)
if temp_string_len <= sizes[0].dst_size:
@@ -322,7 +336,7 @@ cdef int conv_fixed2vlen(void* ipt, void* opt, void* bkg, void* priv) except -1:
memcpy(temp_string, buf_fixed, sizes[0].src_size)
temp_string[sizes[0].src_size] = c'\0'
- buf_vlen[0] = temp_string
+ memcpy(buf_vlen, &temp_string, sizeof(temp_string));
return 0
@@ -337,14 +351,14 @@ cdef int conv_objref2pyref(void* ipt, void* opt, void* bkg, void* priv) except -
cdef Reference ref = Reference()
cdef PyObject* ref_ptr = NULL
- ref.ref.obj_ref = buf_ref[0]
+ memcpy(&ref.ref.obj_ref, buf_ref, sizeof(ref.ref.obj_ref))
ref.typecode = H5R_OBJECT
ref_ptr = <PyObject*>ref
Py_INCREF(ref_ptr) # because Cython discards its reference when the
# function exits
- buf_obj[0] = ref_ptr
+ memcpy(buf_obj, &ref_ptr, sizeof(ref_ptr))
return 0
@@ -356,12 +370,16 @@ cdef int conv_pyref2objref(void* ipt, void* opt, void* bkg, void* priv) except -
cdef object obj
cdef Reference ref
- if buf_obj[0] != NULL and buf_obj[0] != Py_None:
- obj = <object>(buf_obj[0])
+ cdef PyObject* buf_obj0
+
+ memcpy(&buf_obj0, buf_obj, sizeof(buf_obj0));
+
+ if buf_obj0 != NULL and buf_obj0 != Py_None:
+ obj = <object>(buf_obj0)
if not isinstance(obj, Reference):
raise TypeError("Can't convert incompatible object to HDF5 object reference")
- ref = <Reference>(buf_obj[0])
- buf_ref[0] = ref.ref.obj_ref
+ ref = <Reference>(buf_obj0)
+ memcpy(buf_ref, &ref.ref.obj_ref, sizeof(ref.ref.obj_ref))
else:
memset(buf_ref, c'\0', sizeof(hobj_ref_t))
@@ -376,6 +394,9 @@ cdef int conv_regref2pyref(void* ipt, void* opt, void* bkg, void* priv) except -
cdef RegionReference ref = RegionReference()
cdef PyObject* ref_ptr = NULL
+ cdef PyObject* bkg_obj0
+
+ memcpy(&bkg_obj0, bkg_obj, sizeof(bkg_obj0));
memcpy(ref.ref.reg_ref, buf_ref, sizeof(hdset_reg_ref_t))
ref.typecode = H5R_DATASET_REGION
@@ -384,8 +405,8 @@ cdef int conv_regref2pyref(void* ipt, void* opt, void* bkg, void* priv) except -
Py_INCREF(ref_ptr) # because Cython discards its reference when the
# function exits
- Py_XDECREF(bkg_obj[0])
- buf_obj[0] = ref_ptr
+ Py_XDECREF(bkg_obj0)
+ memcpy(buf_obj, &ref_ptr, sizeof(ref_ptr))
return 0
@@ -397,11 +418,15 @@ cdef int conv_pyref2regref(void* ipt, void* opt, void* bkg, void* priv) except -
cdef object obj
cdef RegionReference ref
- if buf_obj[0] != NULL and buf_obj[0] != Py_None:
- obj = <object>(buf_obj[0])
+ cdef PyObject* buf_obj0
+
+ memcpy(&buf_obj0, buf_obj, sizeof(buf_obj0));
+
+ if buf_obj0 != NULL and buf_obj0 != Py_None:
+ obj = <object>(buf_obj0)
if not isinstance(obj, RegionReference):
raise TypeError("Can't convert incompatible object to HDF5 region reference")
- ref = <RegionReference>(buf_obj[0])
+ ref = <RegionReference>(buf_obj0)
memcpy(buf_ref, ref.ref.reg_ref, sizeof(hdset_reg_ref_t))
else:
memset(buf_ref, c'\0', sizeof(hdset_reg_ref_t))
@@ -657,22 +682,27 @@ cdef int conv_vlen2ndarray(void* ipt, void* opt, np.dtype elem_dtype,
cdef np.npy_intp dims[1]
cdef void* data
cdef np.ndarray ndarray
+ cdef PyObject* ndarray_obj
+ cdef vlen_t in_vlen0
- dims[0] = in_vlen[0].len
- data = in_vlen[0].ptr
+ memcpy(&in_vlen0, in_vlen, sizeof(in_vlen0))
+
+ dims[0] = in_vlen0.len
+ data = in_vlen0.ptr
if outtype.get_size() > intype.get_size():
- data = realloc(data, outtype.get_size() * in_vlen[0].len)
- H5Tconvert(intype.id, outtype.id, in_vlen[0].len, data, NULL, H5P_DEFAULT)
+ data = realloc(data, outtype.get_size() * in_vlen0.len)
+ H5Tconvert(intype.id, outtype.id, in_vlen0.len, data, NULL, H5P_DEFAULT)
Py_INCREF(<PyObject*>elem_dtype)
ndarray = PyArray_NewFromDescr(&PyArray_Type, elem_dtype, 1,
dims, NULL, data, flags, <object>NULL)
ndarray.flags |= np.NPY_OWNDATA
- Py_INCREF(<PyObject*>ndarray)
+ ndarray_obj = <PyObject*>ndarray
+ Py_INCREF(ndarray_obj)
# Write the new object to the buffer in-place
- in_vlen[0].ptr = NULL
- buf_obj[0] = <PyObject*>ndarray
+ in_vlen0.ptr = NULL
+ memcpy(buf_obj, &ndarray_obj, sizeof(ndarray_obj))
return 0
@@ -687,6 +717,7 @@ cdef herr_t ndarray2vlen(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
cdef np.dtype dt
cdef int i
cdef PyObject **pdata = <PyObject **> buf_i
+ cdef PyObject *pdata_elem
cdef char* buf = <char*>buf_i
@@ -697,9 +728,10 @@ cdef herr_t ndarray2vlen(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
return -2
supertype = typewrap(H5Tget_super(dst_id))
for i from 0 <= i < nl:
- if supertype != py_create((<np.ndarray> pdata[i]).dtype, 1):
+ memcpy(&pdata_elem, pdata+i, sizeof(pdata_elem))
+ if supertype != py_create((<np.ndarray> pdata_elem).dtype, 1):
return -2
- if (<np.ndarray> pdata[i]).ndim != 1:
+ if (<np.ndarray> pdata_elem).ndim != 1:
return -2
elif command == H5T_CONV_FREE:
@@ -709,7 +741,8 @@ cdef herr_t ndarray2vlen(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
elif command == H5T_CONV_CONV:
# need to pass element dtype to converter
- supertype = py_create((<np.ndarray> pdata[0]).dtype)
+ memcpy(&pdata_elem, pdata, sizeof(pdata_elem))
+ supertype = py_create((<np.ndarray> pdata_elem).dtype)
outtype = typewrap(H5Tget_super(dst_id))
if buf_stride == 0:
@@ -751,8 +784,13 @@ cdef int conv_ndarray2vlen(void* ipt, void* opt,
cdef int flags = np.NPY_WRITEABLE | np.NPY_C_CONTIGUOUS
cdef np.npy_intp dims[1]
cdef void* data
- cdef np.ndarray ndarray = <np.ndarray> buf_obj[0]
- cdef size_t len = ndarray.shape[0]
+ cdef np.ndarray ndarray
+ cdef size_t len
+ cdef PyObject* buf_obj0
+
+ memcpy(&buf_obj0, buf_obj, sizeof(buf_obj0))
+ ndarray = <np.ndarray> buf_obj0
+ len = ndarray.shape[0]
if outtype.get_size() > intype.get_size():
data = malloc(outtype.get_size() * len)
@@ -761,8 +799,8 @@ cdef int conv_ndarray2vlen(void* ipt, void* opt,
memcpy(data, ndarray.data, intype.get_size() * len)
H5Tconvert(intype.id, outtype.id, len, data, NULL, H5P_DEFAULT)
- in_vlen[0].len = len
- in_vlen[0].ptr = data
+ memcpy(&in_vlen[0].len, &len, sizeof(len))
+ memcpy(&in_vlen[0].ptr, &data, sizeof(data))
return 0
diff --git a/h5py/_errors.pyx b/h5py/_errors.pyx
index b5c6aea..e98ebfe 100644
--- a/h5py/_errors.pyx
+++ b/h5py/_errors.pyx
@@ -120,7 +120,7 @@ cdef int set_exception() except -1:
if desc_bottom is NULL:
raise RuntimeError("Failed to extract bottom-level error description")
- msg = ("%s (%s)" % (desc.decode('utf-8').capitalize(), desc_bottom.decode('utf-8').capitalize())).encode('utf-8')
+ msg = ("%s (%s)" % (desc.decode('utf-8').capitalize(), desc_bottom.decode('utf-8'))).encode('utf-8')
# Finally, set the exception. We do this with the Python C function
# so that the traceback doesn't point here.
diff --git a/h5py/_hl/attrs.py b/h5py/_hl/attrs.py
index b15b4d0..dbff313 100644
--- a/h5py/_hl/attrs.py
+++ b/h5py/_hl/attrs.py
@@ -9,7 +9,7 @@
"""
Implements high-level operations for attributes.
-
+
Provides the AttributeManager class, available on high-level objects
as <obj>.attrs.
"""
@@ -17,6 +17,8 @@
from __future__ import absolute_import
import numpy
+import uuid
+
from .. import h5s, h5t, h5a
from . import base
@@ -62,7 +64,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
dtype = readtime_dtype(attr.dtype, [])
shape = attr.shape
-
+
# Do this first, as we'll be fiddling with the dtype for top-level
# array types
htype = h5t.py_create(dtype)
@@ -74,7 +76,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
subdtype, subshape = dtype.subdtype
shape = attr.shape + subshape # (5, 3)
dtype = subdtype # 'f'
-
+
arr = numpy.ndarray(shape, dtype=dtype, order='C')
attr.read(arr, mtype=htype)
@@ -112,21 +114,19 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
are given.
"""
- import uuid
-
with phil:
-
+
# First, make sure we have a NumPy array. We leave the data
# type conversion for HDF5 to perform.
if not isinstance(data, Empty):
data = numpy.asarray(data, order='C')
-
+
if shape is None:
shape = data.shape
-
+
use_htype = None # If a committed type is given, we must use it
# in the call to h5a.create.
-
+
if isinstance(dtype, Datatype):
use_htype = dtype.id
dtype = dtype.dtype
@@ -134,16 +134,16 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
dtype = data.dtype
else:
dtype = numpy.dtype(dtype) # In case a string, e.g. 'i8' is passed
-
+
original_dtype = dtype # We'll need this for top-level array types
# Where a top-level array type is requested, we have to do some
# fiddling around to present the data as a smaller array of
- # subarrays.
+ # subarrays.
if dtype.subdtype is not None:
-
+
subdtype, subshape = dtype.subdtype
-
+
# Make sure the subshape matches the last N axes' sizes.
if shape[-len(subshape):] != subshape:
raise ValueError("Array dtype shape %s is incompatible with data shape %s" % (subshape, shape))
@@ -151,11 +151,11 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
# New "advertised" shape and dtype
shape = shape[0:len(shape)-len(subshape)]
dtype = subdtype
-
+
# Not an array type; make sure to check the number of elements
# is compatible, and reshape if needed.
else:
-
+
if shape is not None and numpy.product(shape) != numpy.product(data.shape):
raise ValueError("Shape of new attribute conflicts with shape of data")
@@ -165,7 +165,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
# We need this to handle special string types.
if not isinstance(data, Empty):
data = numpy.asarray(data, dtype=dtype)
-
+
# Make HDF5 datatype and dataspace for the H5A calls
if use_htype is None:
htype = h5t.py_create(original_dtype, logical=True)
@@ -173,7 +173,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
else:
htype = use_htype
htype2 = None
-
+
if isinstance(data, Empty):
space = h5s.create(h5s.NULL)
else:
@@ -181,7 +181,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
# This mess exists because you can't overwrite attributes in HDF5.
# So we write to a temporary attribute first, and then rename.
-
+
tempname = uuid.uuid4().hex
try:
@@ -206,7 +206,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
attr.close()
h5a.delete(self._id, self._e(tempname))
raise
-
+
def modify(self, name, value):
""" Change the value of an attribute while preserving its type.
@@ -242,7 +242,7 @@ class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
def __iter__(self):
""" Iterate over the names of attributes. """
with phil:
-
+
attrlist = []
def iter_cb(name, *args):
""" Callback to gather attribute names """
diff --git a/h5py/_hl/base.py b/h5py/_hl/base.py
index d88ff7e..f2a8e9b 100644
--- a/h5py/_hl/base.py
+++ b/h5py/_hl/base.py
@@ -16,16 +16,15 @@ from __future__ import absolute_import
import posixpath
import os
import six
-from collections import (Mapping, MutableMapping, KeysView,
+from collections import (Mapping, MutableMapping, KeysView,
ValuesView, ItemsView)
-from .compat import fspath
-from .compat import fsencode
+from .compat import fspath, filename_encode
from .. import h5d, h5i, h5r, h5p, h5f, h5t, h5s
# The high-level interface is serialized; every public API function & method
-# is wrapped in a lock. We re-use the low-level lock because (1) it's fast,
+# is wrapped in a lock. We re-use the low-level lock because (1) it's fast,
# and (2) it eliminates the possibility of deadlocks due to out-of-order
# lock acquisition.
from .._objects import phil, with_phil
@@ -37,7 +36,7 @@ def is_hdf5(fname):
fname = os.path.abspath(fspath(fname))
if os.path.isfile(fname):
- return h5f.is_hdf5(fsencode(fname))
+ return h5f.is_hdf5(filename_encode(fname))
return False
@@ -180,13 +179,13 @@ class _RegionProxy(object):
def __init__(self, obj):
self.id = obj.id
- @with_phil
def __getitem__(self, args):
if not isinstance(self.id, h5d.DatasetID):
raise TypeError("Region references can only be made to datasets")
from . import selections
- selection = selections.select(self.id.shape, args, dsid=self.id)
- return h5r.create(self.id, b'.', h5r.DATASET_REGION, selection.id)
+ with phil:
+ selection = selections.select(self.id.shape, args, dsid=self.id)
+ return h5r.create(self.id, b'.', h5r.DATASET_REGION, selection.id)
def shape(self, ref):
""" Get the shape of the target dataspace referred to by *ref*. """
@@ -197,8 +196,8 @@ class _RegionProxy(object):
def selection(self, ref):
""" Get the shape of the target dataspace selection referred to by *ref*
"""
+ from . import selections
with phil:
- from . import selections
sid = h5r.get_region(ref, self.id)
return selections.guess_shape(sid)
@@ -210,11 +209,11 @@ class HLObject(CommonStateObject):
"""
@property
- @with_phil
def file(self):
""" Return a File instance associated with this object """
from . import files
- return files.File(self.id)
+ with phil:
+ return files.File(self.id)
@property
@with_phil
@@ -261,11 +260,11 @@ class HLObject(CommonStateObject):
return _RegionProxy(self)
@property
- @with_phil
def attrs(self):
""" Attributes attached to this object """
from . import attrs
- return attrs.AttributeManager(self)
+ with phil:
+ return attrs.AttributeManager(self)
@with_phil
def __init__(self, oid):
@@ -306,11 +305,11 @@ class ValuesViewHDF5(ValuesView):
"""
Wraps e.g. a Group or AttributeManager to provide a value view.
-
+
Note that __contains__ will have poor performance as it has
to scan all the links or attributes.
"""
-
+
def __contains__(self, value):
with phil:
for key in self._mapping:
@@ -329,7 +328,7 @@ class ItemsViewHDF5(ItemsView):
"""
Wraps e.g. a Group or AttributeManager to provide an items view.
"""
-
+
def __contains__(self, item):
with phil:
key, val = item
@@ -348,7 +347,7 @@ class MappingHDF5(Mapping):
"""
Wraps a Group, AttributeManager or DimensionManager object to provide
an immutable mapping interface.
-
+
We don't inherit directly from MutableMapping because certain
subclasses, for example DimensionManager, are read-only.
"""
diff --git a/h5py/_hl/compat.py b/h5py/_hl/compat.py
index fb7d5d6..6b60392 100644
--- a/h5py/_hl/compat.py
+++ b/h5py/_hl/compat.py
@@ -4,6 +4,8 @@ Compatibility module for high-level h5py
import sys
import six
+WINDOWS_ENCODING = "mbcs"
+
try:
from os import fspath
@@ -96,3 +98,37 @@ try:
from os import fsdecode
except ImportError:
fsdecode = _fsdecode
+
+
+def filename_encode(filename):
+ """
+ Encode filename for use in the HDF5 library.
+
+ Due to how HDF5 handles filenames on different systems, this should be
+ called on any filenames passed to the HDF5 library. See the documentation on
+ filenames in h5py for more information.
+ """
+ filename = fspath(filename)
+ if sys.platform == "win32":
+ if isinstance(filename, six.text_type):
+ return filename.encode(WINDOWS_ENCODING, "strict")
+ return filename
+ return fsencode(filename)
+
+
+def filename_decode(filename):
+ """
+ Decode filename used by HDF5 library.
+
+ Due to how HDF5 handles filenames on different systems, this should be
+ called on any filenames passed from the HDF5 library. See the documentation
+ on filenames in h5py for more information.
+ """
+ if sys.platform == "win32":
+ if isinstance(filename, six.binary_type):
+ return filename.decode(WINDOWS_ENCODING, "strict")
+ elif isinstance(filename, six.text_type):
+ return filename
+ else:
+ raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
+ return fsdecode(filename)
diff --git a/h5py/_hl/dataset.py b/h5py/_hl/dataset.py
index ee15759..bff42b3 100644
--- a/h5py/_hl/dataset.py
+++ b/h5py/_hl/dataset.py
@@ -16,6 +16,8 @@ from __future__ import absolute_import
import posixpath as pp
import sys
+from threading import local
+
import six
from six.moves import xrange # pylint: disable=redefined-builtin
@@ -31,6 +33,7 @@ from .datatype import Datatype
_LEGACY_GZIP_COMPRESSION_VALS = frozenset(range(10))
MPI = h5.get_config().mpi
+
def readtime_dtype(basetype, names):
""" Make a NumPy dtype appropriate for reading """
@@ -147,7 +150,7 @@ class AstypeContext(object):
"""
Context manager which allows changing the type read from a dataset.
"""
-
+
def __init__(self, dset, dtype):
self._dset = dset
self._dtype = numpy.dtype(dtype)
@@ -160,6 +163,7 @@ class AstypeContext(object):
# pylint: disable=protected-access
self._dset._local.astype = None
+
if MPI:
class CollectiveContext(object):
@@ -178,12 +182,13 @@ if MPI:
# pylint: disable=protected-access
self._dset._dxpl.set_dxpl_mpio(h5fd.MPIO_INDEPENDENT)
+
class Dataset(HLObject):
"""
Represents an HDF5 dataset
"""
-
+
def astype(self, dtype):
""" Get a context manager allowing you to perform reads to a
different destination type, e.g.:
@@ -200,13 +205,12 @@ class Dataset(HLObject):
""" Context manager for MPI collective reads & writes """
return CollectiveContext(self)
-
@property
- @with_phil
def dims(self):
""" Access dimension scales attached to this dataset. """
from .dims import DimensionManager
- return DimensionManager(self)
+ with phil:
+ return DimensionManager(self)
@property
@with_phil
@@ -229,7 +233,7 @@ class Dataset(HLObject):
@with_phil
def size(self):
"""Numpy-style attribute giving the total dataset size"""
- return numpy.prod(self.shape)
+ return numpy.prod(self.shape, dtype=numpy.intp)
@property
@with_phil
@@ -314,8 +318,6 @@ class Dataset(HLObject):
def __init__(self, bind):
""" Create a new Dataset object by binding to a low-level DatasetID.
"""
- from threading import local
-
if not isinstance(bind, h5d.DatasetID):
raise ValueError("%s is not a DatasetID" % bind)
HLObject.__init__(self, bind)
@@ -453,7 +455,7 @@ class Dataset(HLObject):
# These are the only access methods NumPy allows for such objects
if args == (Ellipsis,) or args == tuple():
return numpy.empty(self.shape, dtype=new_dtype)
-
+
# === Scalar dataspaces =================
if self.shape == ():
@@ -582,7 +584,7 @@ class Dataset(HLObject):
if len(mismatch) != 0:
mismatch = ", ".join('"%s"'%x for x in mismatch)
raise ValueError("Illegal slicing argument (fields %s not in dataset type)" % mismatch)
-
+
# Write non-compound source into a single dataset field
if len(names) == 1 and val.dtype.fields is None:
subtype = h5t.py_create(val.dtype)
@@ -697,39 +699,37 @@ class Dataset(HLObject):
@with_phil
def __repr__(self):
if not self:
- r = six.u('<Closed HDF5 dataset>')
+ r = u'<Closed HDF5 dataset>'
else:
if self.name is None:
- namestr = six.u('("anonymous")')
+ namestr = u'("anonymous")'
else:
name = pp.basename(pp.normpath(self.name))
- namestr = six.u('"%s"') % (
- name if name != six.u('') else six.u('/'))
- r = six.u('<HDF5 dataset %s: shape %s, type "%s">') % \
- (namestr, self.shape, self.dtype.str)
+ namestr = u'"%s"' % (name if name != u'' else u'/')
+ r = u'<HDF5 dataset %s: shape %s, type "%s">' % (
+ namestr, self.shape, self.dtype.str
+ )
if six.PY2:
return r.encode('utf8')
return r
-
+
if hasattr(h5d.DatasetID, "refresh"):
@with_phil
def refresh(self):
""" Refresh the dataset metadata by reloading from the file.
-
+
This is part of the SWMR features and only exist when the HDF5
librarary version >=1.9.178
"""
self._id.refresh()
-
+
if hasattr(h5d.DatasetID, "flush"):
@with_phil
def flush(self):
""" Flush the dataset data and metadata to the file.
If the dataset is chunked, raw data chunks are written to the file.
-
- This is part of the SWMR features and only exist when the HDF5
+
+ This is part of the SWMR features and only exist when the HDF5
librarary version >=1.9.178
"""
self._id.flush()
-
-
diff --git a/h5py/_hl/files.py b/h5py/_hl/files.py
index 55b7aec..6e17e0c 100644
--- a/h5py/_hl/files.py
+++ b/h5py/_hl/files.py
@@ -16,9 +16,7 @@ from __future__ import absolute_import
import sys
import os
-from .compat import fspath
-from .compat import fsencode
-from .compat import fsdecode
+from .compat import filename_decode, filename_encode
import six
@@ -146,19 +144,19 @@ class File(Group):
"""
@property
- @with_phil
def attrs(self):
""" Attributes attached to this object """
# hdf5 complains that a file identifier is an invalid location for an
# attribute. Instead of self, pass the root group to AttributeManager:
from . import attrs
- return attrs.AttributeManager(self['/'])
+ with phil:
+ return attrs.AttributeManager(self['/'])
@property
@with_phil
def filename(self):
"""File name on disk"""
- return fsdecode(h5f.get_name(self.fid))
+ return filename_decode(h5f.get_name(self.fid))
@property
@with_phil
@@ -261,12 +259,12 @@ class File(Group):
if swmr and not swmr_support:
raise ValueError("The SWMR feature is not available in this version of the HDF5 library")
- with phil:
- if isinstance(name, _objects.ObjectID):
+ if isinstance(name, _objects.ObjectID):
+ with phil:
fid = h5i.get_file_id(name)
- else:
- name = fsencode(fspath(name))
-
+ else:
+ name = filename_encode(name)
+ with phil:
fapl = make_fapl(driver, libver, **kwds)
fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
@@ -275,31 +273,33 @@ class File(Group):
if swmr and mode == 'r':
self._swmr_mode = True
- Group.__init__(self, fid)
+ Group.__init__(self, fid)
def close(self):
""" Close the file. All open objects become invalid """
with phil:
- # We have to explicitly murder all open objects related to the file
+ # Check that the file is still open, otherwise skip
+ if self.id.valid:
+ # We have to explicitly murder all open objects related to the file
- # Close file-resident objects first, then the files.
- # Otherwise we get errors in MPI mode.
- id_list = h5f.get_obj_ids(self.id, ~h5f.OBJ_FILE)
- file_list = h5f.get_obj_ids(self.id, h5f.OBJ_FILE)
+ # Close file-resident objects first, then the files.
+ # Otherwise we get errors in MPI mode.
+ id_list = h5f.get_obj_ids(self.id, ~h5f.OBJ_FILE)
+ file_list = h5f.get_obj_ids(self.id, h5f.OBJ_FILE)
- id_list = [x for x in id_list if h5i.get_file_id(x).id == self.id.id]
- file_list = [x for x in file_list if h5i.get_file_id(x).id == self.id.id]
+ id_list = [x for x in id_list if h5i.get_file_id(x).id == self.id.id]
+ file_list = [x for x in file_list if h5i.get_file_id(x).id == self.id.id]
- for id_ in id_list:
- while id_.valid:
- h5i.dec_ref(id_)
+ for id_ in id_list:
+ while id_.valid:
+ h5i.dec_ref(id_)
- for id_ in file_list:
- while id_.valid:
- h5i.dec_ref(id_)
+ for id_ in file_list:
+ while id_.valid:
+ h5i.dec_ref(id_)
- self.id.close()
- _objects.nonlocal_close()
+ self.id.close()
+ _objects.nonlocal_close()
def flush(self):
""" Tell the HDF5 library to flush its buffers.
@@ -319,14 +319,14 @@ class File(Group):
@with_phil
def __repr__(self):
if not self.id:
- r = six.u('<Closed HDF5 file>')
+ r = u'<Closed HDF5 file>'
else:
# Filename has to be forced to Unicode if it comes back bytes
# Mode is always a "native" string
filename = self.filename
if isinstance(filename, bytes): # Can't decode fname
filename = filename.decode('utf8', 'replace')
- r = six.u('<HDF5 file "%s" (mode %s)>') % (os.path.basename(filename),
+ r = u'<HDF5 file "%s" (mode %s)>' % (os.path.basename(filename),
self.mode)
if six.PY2:
diff --git a/h5py/_hl/group.py b/h5py/_hl/group.py
index f919ef0..4328d7c 100644
--- a/h5py/_hl/group.py
+++ b/h5py/_hl/group.py
@@ -17,9 +17,7 @@ import posixpath as pp
import six
import numpy
-from .compat import fsdecode
-from .compat import fsencode
-from .compat import fspath
+from .compat import filename_decode, filename_encode
from .. import h5g, h5i, h5o, h5r, h5t, h5l, h5p
from . import base
@@ -232,20 +230,21 @@ class Group(HLObject, MutableMappingHDF5):
return SoftLink
linkbytes = self.id.links.get_val(self._e(name))
return SoftLink(self._d(linkbytes))
-
+
elif typecode == h5l.TYPE_EXTERNAL:
if getclass:
return ExternalLink
filebytes, linkbytes = self.id.links.get_val(self._e(name))
- return ExternalLink(fsdecode(filebytes), self._d(linkbytes))
-
+ return ExternalLink(
+ filename_decode(filebytes), self._d(linkbytes)
+ )
+
elif typecode == h5l.TYPE_HARD:
return HardLink if getclass else HardLink()
-
+
else:
raise TypeError("Unknown link type")
- @with_phil
def __setitem__(self, name, obj):
""" Add an object to the group. The name must not already be in use.
@@ -270,26 +269,33 @@ class Group(HLObject, MutableMappingHDF5):
values are stored as scalar datasets. Raise ValueError if we
can't understand the resulting array dtype.
"""
- name, lcpl = self._e(name, lcpl=True)
+ do_link = False
+ with phil:
+ name, lcpl = self._e(name, lcpl=True)
- if isinstance(obj, HLObject):
- h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)
+ if isinstance(obj, HLObject):
+ h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl)
- elif isinstance(obj, SoftLink):
- self.id.links.create_soft(name, self._e(obj.path),
- lcpl=lcpl, lapl=self._lapl)
+ elif isinstance(obj, SoftLink):
+ self.id.links.create_soft(name, self._e(obj.path),
+ lcpl=lcpl, lapl=self._lapl)
- elif isinstance(obj, ExternalLink):
- self.id.links.create_external(name, fsencode(obj.filename),
- self._e(obj.path), lcpl=lcpl, lapl=self._lapl)
+ elif isinstance(obj, ExternalLink):
+ do_link = True
- elif isinstance(obj, numpy.dtype):
- htype = h5t.py_create(obj, logical=True)
- htype.commit(self.id, name, lcpl=lcpl)
+ elif isinstance(obj, numpy.dtype):
+ htype = h5t.py_create(obj, logical=True)
+ htype.commit(self.id, name, lcpl=lcpl)
- else:
- ds = self.create_dataset(None, data=obj, dtype=base.guess_dtype(obj))
- h5o.link(ds.id, self.id, name, lcpl=lcpl)
+ else:
+ ds = self.create_dataset(None, data=obj, dtype=base.guess_dtype(obj))
+ h5o.link(ds.id, self.id, name, lcpl=lcpl)
+
+ if do_link:
+ fn = filename_encode(obj.filename)
+ with phil:
+ self.id.links.create_external(name, fn, self._e(obj.path),
+ lcpl=lcpl, lapl=self._lapl)
@with_phil
def __delitem__(self, name):
@@ -465,12 +471,12 @@ class Group(HLObject, MutableMappingHDF5):
@with_phil
def __repr__(self):
if not self:
- r = six.u("<Closed HDF5 group>")
+ r = u"<Closed HDF5 group>"
else:
namestr = (
- six.u('"%s"') % self.name
- ) if self.name is not None else six.u("(anonymous)")
- r = six.u('<HDF5 group %s (%d members)>') % (namestr, len(self))
+ u'"%s"' % self.name
+ ) if self.name is not None else u"(anonymous)"
+ r = u'<HDF5 group %s (%d members)>' % (namestr, len(self))
if six.PY2:
return r.encode('utf8')
@@ -525,7 +531,7 @@ class ExternalLink(object):
return self._filename
def __init__(self, filename, path):
- self._filename = fspath(filename)
+ self._filename = filename_decode(filename_encode(filename))
self._path = str(path)
def __repr__(self):
diff --git a/h5py/_objects.pyx b/h5py/_objects.pyx
index 6f4f9fd..cdb9cf2 100644
--- a/h5py/_objects.pyx
+++ b/h5py/_objects.pyx
@@ -88,6 +88,7 @@ def with_phil(func):
#
# See also __cinit__ and __dealloc__ for class ObjectID.
+import gc
import weakref
import warnings
@@ -114,9 +115,23 @@ def nonlocal_close():
""" Find dead ObjectIDs and set their integer identifiers to 0.
"""
cdef ObjectID obj
+ cdef list reg_ids
+
+ # create a cached list of ids whilst the gc is disabled to avoid hitting
+ # the cyclic gc while iterating through the registry dict
+ gc.disable()
+ try:
+ reg_ids = list(registry)
+ finally:
+ gc.enable()
+
+ for python_id in reg_ids:
+ ref = registry.get(python_id)
+
+ # registry dict has changed underneath us, skip to next item
+ if ref is None:
+ continue
- # list() needed because the registry can be mutated concurrently
- for python_id, ref in list(registry.items()):
obj = ref()
# Object died while walking the registry list, presumably because
diff --git a/h5py/h5.pyx b/h5py/h5.pyx
index bb4ecdb..562d607 100644
--- a/h5py/h5.pyx
+++ b/h5py/h5.pyx
@@ -19,6 +19,8 @@ ITER_NATIVE = H5_ITER_NATIVE # No particular order, whatever is fastest
INDEX_NAME = H5_INDEX_NAME # Index on names
INDEX_CRT_ORDER = H5_INDEX_CRT_ORDER # Index on creation order
+HDF5_VERSION_COMPILED_AGAINST = HDF5_VERSION
+
class ByteStringContext(object):
def __init__(self):
diff --git a/h5py/h5t.pyx b/h5py/h5t.pyx
index 20755e4..cc2344e 100644
--- a/h5py/h5t.pyx
+++ b/h5py/h5t.pyx
@@ -14,7 +14,6 @@
subclasses which represent things like integer/float/compound identifiers.
The majority of the H5T API is presented as methods on these identifiers.
"""
-
# Pyrex compile-time imports
include "config.pxi"
from _objects cimport pdefault
@@ -952,21 +951,6 @@ cdef class TypeFloatID(TypeAtomicID):
cdef object py_dtype(self):
# Translation function for floating-point types
- if MACHINE == 'ppc64le':
- size = self.get_size() # int giving number of bytes
- order = _order_map[self.get_order()] # string with '<' or '>'
-
- if size == 2 and not hasattr(np, 'float16'):
- # This build doesn't have float16; promote to float32
- return dtype(order+"f4")
-
- if size > 8:
- # The native NumPy longdouble is used for 96 and 128-bit floats
- return dtype(order + "f" + str(np.longdouble(1).dtype.itemsize))
-
- return dtype( _order_map[self.get_order()] + "f" + \
- str(self.get_size()) )
-
order = _order_map[self.get_order()] # string with '<' or '>'
s_offset, e_offset, e_size, m_offset, m_size = self.get_fields()
@@ -978,14 +962,20 @@ cdef class TypeFloatID(TypeAtomicID):
maxexp = finfo.maxexp
minexp = finfo.minexp
# workaround for numpy's buggy finfo on float128 on ppc64 archs
- if size == 16 and MACHINE.startswith('ppc64'):
+ if size == 16 and MACHINE == 'ppc64':
+ # values reported by hdf5
nmant = 116
maxexp = 1024
minexp = -1022
+ elif size == 16 and MACHINE == 'ppc64le':
+ # values reported by hdf5
+ nmant = 52
+ maxexp = 1024
+ minexp = -1022
elif nmant == 63 and finfo.nexp == 15:
# This is an 80-bit float, correct mantissa size
nmant += 1
- if (m_size <= nmant and
+ if (size >= self.get_size() and m_size <= nmant and
(2**e_size - e_bias - 1) <= maxexp and (1 - e_bias) >= minexp):
break
else:
@@ -1464,57 +1454,54 @@ cdef TypeCompoundID _c_compound(dtype dt, int logical, int aligned):
# Compound datatypes
cdef hid_t tid
- cdef TypeID type_tmp
- cdef dtype dt_tmp
- cdef size_t offset
- cdef size_t offset_step = 0
+ cdef TypeID member_type
+ cdef dtype member_dt
+ cdef size_t member_offset = 0
- cdef tuple names = dt.names
- cdef dict fields = {}
- cdef list offsets
+ cdef dict offsets = {}
+ cdef list fields = []
# The challenge with correctly converting a numpy/h5py dtype to a HDF5 type
# which is composed of subtypes has three aspects we must consider
- # 1. numpy/h5py dtypes do not always have the same size and HDF5, even when
+ # 1. numpy/h5py dtypes do not always have the same size as HDF5, even when
# equivalent (can result in overlapping elements if not careful)
# 2. For correct round-tripping of aligned dtypes, we need to consider how
- # much padding we need
+ # much padding we need by looking at the field offsets
# 3. There is no requirement that the offsets be monotonically increasing
+ # (so we start by sorting the names as a function of increasing offset)
#
# The code below tries to cover these aspects
+ # Get offsets for each compound member
for name, field in dt.fields.items():
- dt_tmp = field[0]
- offset = field[1]
- fields[offset] = {
- "name": name.encode('utf8') if isinstance(name, unicode) else name,
- "dtype": dtype(dt_tmp),
- "size": py_create(dt_tmp, logical=logical).get_size(),
- }
-
- offsets = list(sorted(fields))
- # Set initial size to itemsize or last offset plus itemsize, whichever is
- # bigger
- tid = H5Tcreate(H5T_COMPOUND,
- max(dt.itemsize, offsets[-1] + fields[offsets[-1]]["size"])
- )
-
- for i, offset in enumerate(offsets):
- dt_tmp = fields[offset]["dtype"]
- type_tmp = py_create(dt_tmp, logical=logical, aligned=aligned)
- if aligned and type_tmp.get_size() > dt_tmp.itemsize:
+ offsets[name] = field[1]
+
+ # Build list of names, offsets, and types, sorted by increasing offset
+ # (i.e. the position of the member in the struct)
+ for name in sorted(dt.names, key=offsets.__getitem__):
+ field = dt.fields[name]
+ name = name.encode('utf8') if isinstance(name, unicode) else name
+
+ # Get HDF5 data types and set the offset for each member
+ member_dt = field[0]
+ member_offset = max(member_offset, field[1])
+ member_type = py_create(member_dt, logical=logical, aligned=aligned)
+ if aligned and (member_offset > field[1]
+ or member_dt.itemsize != member_type.get_size()):
raise TypeError("Enforced alignment not compatible with HDF5 type")
- # Increase size if initial too small, which can happen if there are out
- # of order fields (as determined by offsets)
- if H5Tget_size(tid) < (offset + offset_step + type_tmp.get_size()):
- H5Tset_size(tid, offset + offset_step + type_tmp.get_size())
- H5Tinsert(tid, fields[offset]["name"], offset + offset_step, type_tmp.id)
-
- if (i + 1 < len(offsets)) and fields[offset]["size"] > offsets[i + 1]:
- if aligned:
- raise TypeError("dtype results in overlapping fields")
- else:
- offset_step += fields[offset]["size"] - offsets[i + 1]
+ fields.append((name, member_offset, member_type))
+
+ # Update member offset based on the HDF5 type size
+ member_offset += member_type.get_size()
+
+ member_offset = max(member_offset, dt.itemsize)
+ if aligned and member_offset > dt.itemsize:
+ raise TypeError("Enforced alignment not compatible with HDF5 type")
+
+ # Create compound with the necessary size, and insert its members
+ tid = H5Tcreate(H5T_COMPOUND, member_offset)
+ for (name, member_offset, member_type) in fields:
+ H5Tinsert(tid, name, member_offset, member_type.id)
return TypeCompoundID(tid)
diff --git a/h5py/tests/common.py b/h5py/tests/common.py
index 1c2ad53..a93e40c 100644
--- a/h5py/tests/common.py
+++ b/h5py/tests/common.py
@@ -20,16 +20,13 @@ from six import unichr
import numpy as np
import h5py
-if sys.version_info >= (2, 7) or sys.version_info >= (3, 2):
- import unittest as ut
-else:
+if sys.version_info[:2] == (2, 6):
try:
import unittest2 as ut
except ImportError:
- raise ImportError(
- 'unittest2 is required to run the test suite with python-%d.%d'
- % (sys.version_info[:2])
- )
+ raise ImportError( "unittest2 is required to run tests with Python 2.6")
+else:
+ import unittest as ut
# Check if non-ascii filenames are supported
@@ -53,7 +50,7 @@ class TestCase(ut.TestCase):
"""
Base class for unit tests.
"""
-
+
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_')
@@ -66,10 +63,10 @@ class TestCase(ut.TestCase):
if dir is None:
dir = self.tempdir
return tempfile.mktemp(suffix, prefix, dir=self.tempdir)
-
+
def setUp(self):
self.f = h5py.File(self.mktemp(), 'w')
-
+
def tearDown(self):
try:
if self.f:
@@ -77,24 +74,22 @@ class TestCase(ut.TestCase):
except:
pass
- if not hasattr(ut.TestCase, 'assertSameElements'):
- # shim until this is ported into unittest2
- def assertSameElements(self, a, b):
- for x in a:
- match = False
- for y in b:
- if x == y:
- match = True
- if not match:
- raise AssertionError("Item '%s' appears in a but not b" % x)
-
- for x in b:
- match = False
- for y in a:
- if x == y:
- match = True
- if not match:
- raise AssertionError("Item '%s' appears in b but not a" % x)
+ def assertSameElements(self, a, b):
+ for x in a:
+ match = False
+ for y in b:
+ if x == y:
+ match = True
+ if not match:
+ raise AssertionError("Item '%s' appears in a but not b" % x)
+
+ for x in b:
+ match = False
+ for y in a:
+ if x == y:
+ match = True
+ if not match:
+ raise AssertionError("Item '%s' appears in b but not a" % x)
def assertArrayEqual(self, dset, arr, message=None, precision=None):
""" Make sure dset and arr have the same shape, dtype and contents, to
@@ -128,7 +123,7 @@ class TestCase(ut.TestCase):
dset.dtype == arr.dtype,
"Dtype mismatch (%s vs %s)%s" % (dset.dtype, arr.dtype, message)
)
-
+
if arr.dtype.names is not None:
for n in arr.dtype.names:
message = '[FIELD %s] %s' % (n, message)
@@ -146,10 +141,10 @@ class TestCase(ut.TestCase):
def assertNumpyBehavior(self, dset, arr, s):
""" Apply slicing arguments "s" to both dset and arr.
-
+
Succeeds if the results of the slicing are identical, or the
exception raised is of the same type for both.
-
+
"arr" must be a Numpy array; "dset" may be a NumPy array or dataset.
"""
exc = None
@@ -157,7 +152,7 @@ class TestCase(ut.TestCase):
arr_result = arr[s]
except Exception as e:
exc = type(e)
-
+
if exc is None:
self.assertArrayEqual(dset[s], arr_result)
else:
diff --git a/h5py/tests/hl/test_dataset_getitem.py b/h5py/tests/hl/test_dataset_getitem.py
index 127e65e..9906f3d 100644
--- a/h5py/tests/hl/test_dataset_getitem.py
+++ b/h5py/tests/hl/test_dataset_getitem.py
@@ -41,6 +41,7 @@
"""
from __future__ import absolute_import
+import sys
import numpy as np
import h5py
@@ -452,18 +453,27 @@ class Test2DZeroFloat(TestCase):
TestCase.setUp(self)
self.data = np.ones((0,3), dtype='f')
self.dset = self.f.create_dataset('x', data=self.data)
-
+
def test_ndim(self):
""" Verify number of dimensions """
self.assertEquals(self.dset.ndim, 2)
-
+
def test_shape(self):
""" Verify shape """
self.assertEquals(self.dset.shape, (0, 3))
-
+
@ut.expectedFailure
def test_indexlist(self):
""" see issue #473 """
self.assertNumpyBehavior(self.dset, self.data, np.s_[:,[0,1,2]])
-
+
+class TestVeryLargeArray(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.dset = self.f.create_dataset('x', shape=(2**15, 2**16))
+
+ @ut.skipIf(sys.maxsize < 2**31, 'Maximum integer size >= 2**31 required')
+ def test_size(self):
+ self.assertEqual(self.dset.size, 2**31)
diff --git a/h5py/tests/hl/test_datatype.py b/h5py/tests/hl/test_datatype.py
index d8a9bea..1518a25 100644
--- a/h5py/tests/hl/test_datatype.py
+++ b/h5py/tests/hl/test_datatype.py
@@ -4,6 +4,7 @@
from __future__ import absolute_import
+from itertools import count
import numpy as np
import h5py
@@ -14,7 +15,14 @@ class TestVlen(TestCase):
"""
Check that storage of vlen strings is carried out correctly.
"""
-
+ def assertVlenArrayEqual(self, dset, arr, message=None, precision=None):
+ self.assert_(
+ dset.shape == arr.shape,
+ "Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
+ )
+ for (i, d, a) in zip(count(), dset, arr):
+ self.assertArrayEqual(d, a, message, precision)
+
def test_compound(self):
fields = []
@@ -24,7 +32,75 @@ class TestVlen(TestCase):
self.f['mytype'] = np.dtype(dt)
dt_out = self.f['mytype'].dtype.fields['field_1'][0]
self.assertEqual(h5py.check_dtype(vlen=dt_out), str)
-
+
+ def test_compound_vlen_bool(self):
+ vidt = h5py.special_dtype(vlen=np.uint8)
+ def a(items):
+ return np.array(items, dtype=np.uint8)
+
+ f = self.f
+
+ dt_vb = np.dtype([
+ ('foo', vidt),
+ ('logical', np.bool)])
+ vb = f.create_dataset('dt_vb', shape=(4,), dtype=dt_vb)
+ data = np.array([(a([1,2,3]), True),
+ (a([1 ]), False),
+ (a([1,5 ]), True),
+ (a([], ), False),],
+ dtype=dt_vb)
+ vb[:] = data
+ actual = f['dt_vb'][:]
+ self.assertVlenArrayEqual(data['foo'], actual['foo'])
+ self.assertArrayEqual(data['logical'], actual['logical'])
+
+ dt_vv = np.dtype([
+ ('foo', vidt),
+ ('bar', vidt)])
+ f.create_dataset('dt_vv', shape=(4,), dtype=dt_vv)
+
+ dt_vvb = np.dtype([
+ ('foo', vidt),
+ ('bar', vidt),
+ ('logical', np.bool)])
+ vvb = f.create_dataset('dt_vvb', shape=(2,), dtype=dt_vvb)
+
+ dt_bvv = np.dtype([
+ ('logical', np.bool),
+ ('foo', vidt),
+ ('bar', vidt)])
+ bvv = f.create_dataset('dt_bvv', shape=(2,), dtype=dt_bvv)
+ data = np.array([(True, a([1,2,3]), a([1,2]) ),
+ (False, a([]), a([2,4,6])),],
+ dtype=bvv)
+ bvv[:] = data
+ actual = bvv[:]
+ self.assertVlenArrayEqual(data['foo'], actual['foo'])
+ self.assertVlenArrayEqual(data['bar'], actual['bar'])
+ self.assertArrayEqual(data['logical'], actual['logical'])
+
+ def test_compound_vlen_enum(self):
+ eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
+ vidt = h5py.special_dtype(vlen=np.uint8)
+ def a(items):
+ return np.array(items, dtype=np.uint8)
+
+ f = self.f
+
+ dt_vve = np.dtype([
+ ('foo', vidt),
+ ('bar', vidt),
+ ('switch', eidt)])
+ vve = f.create_dataset('dt_vve', shape=(2,), dtype=dt_vve)
+ data = np.array([(a([1,2,3]), a([1,2]), 1),
+ (a([]), a([2,4,6]), 0),],
+ dtype=dt_vve)
+ vve[:] = data
+ actual = vve[:]
+ self.assertVlenArrayEqual(data['foo'], actual['foo'])
+ self.assertVlenArrayEqual(data['bar'], actual['bar'])
+ self.assertArrayEqual(data['switch'], actual['switch'])
+
def test_vlen_enum(self):
fname = self.mktemp()
arr1 = [[1],[1,2]]
@@ -51,6 +127,30 @@ class TestOffsets(TestCase):
correctly.
"""
+ def test_compound_vlen(self):
+ vidt = h5py.special_dtype(vlen=np.uint8)
+ eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
+
+ for np_align in (False, True):
+ dt = np.dtype([
+ ('a', eidt),
+ ('foo', vidt),
+ ('bar', vidt),
+ ('switch', eidt)], align=np_align)
+ np_offsets = [dt.fields[i][1] for i in dt.names]
+
+ for logical in (False, True):
+ if logical and np_align:
+ # Vlen types have different size in the numpy struct
+ self.assertRaises(TypeError, h5py.h5t.py_create, dt,
+ logical=logical)
+ else:
+ ht = h5py.h5t.py_create(dt, logical=logical)
+ offsets = [ht.get_member_offset(i)
+ for i in range(ht.get_nmembers())]
+ if np_align:
+ self.assertEqual(np_offsets, offsets)
+
def test_aligned_offsets(self):
dt = np.dtype('i2,i8', align=True)
ht = h5py.h5t.py_create(dt)
@@ -65,7 +165,8 @@ class TestOffsets(TestCase):
dt = np.dtype('i2,f8', align=True)
data = np.empty(10, dtype=dt)
- data['f0'] = np.array(np.random.randint(-100, 100, size=data.size), dtype='i2')
+ data['f0'] = np.array(np.random.randint(-100, 100, size=data.size),
+ dtype='i2')
data['f1'] = np.random.rand(data.size)
fname = self.mktemp()
diff --git a/h5py/tests/old/common.py b/h5py/tests/old/common.py
deleted file mode 100644
index 04e2d37..0000000
--- a/h5py/tests/old/common.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# This file is part of h5py, a Python interface to the HDF5 library.
-#
-# http://www.h5py.org
-#
-# Copyright 2008-2013 Andrew Collette and contributors
-#
-# License: Standard 3-clause BSD; see "license.txt" for full license terms
-# and contributor agreement.
-
-from __future__ import absolute_import
-
-import sys
-
-from six import unichr, PY3
-
-if sys.version_info >= (2, 7) or sys.version_info >= (3, 2):
- import unittest as ut
-else:
- try:
- import unittest2 as ut
- except ImportError:
- raise ImportError(
- 'unittest2 is required to run the test suite with python-%d.%d'
- % (sys.version_info[:2])
- )
-
-import shutil
-import tempfile
-import numpy as np
-import os
-
-
-class TestCase(ut.TestCase):
-
- @classmethod
- def setUpClass(cls):
- cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_')
-
- @classmethod
- def tearDownClass(cls):
- shutil.rmtree(cls.tempdir)
-
- def mktemp(self, suffix='.hdf5', prefix='', dir=None):
- if dir is None:
- dir = self.tempdir
- return tempfile.mktemp(suffix, prefix, dir=self.tempdir)
-
- if not hasattr(ut.TestCase, 'assertSameElements'):
- # shim until this is ported into unittest2
- def assertSameElements(self, a, b):
- for x in a:
- match = False
- for y in b:
- if x == y:
- match = True
- if not match:
- raise AssertionError("Item '%s' appears in a but not b" % x)
-
- for x in b:
- match = False
- for y in a:
- if x == y:
- match = True
- if not match:
- raise AssertionError("Item '%s' appears in b but not a" % x)
-
- def assertArrayEqual(self, dset, arr, message=None, precision=None):
- """ Make sure dset and arr have the same shape, dtype and contents, to
- within the given precision.
-
- Note that dset may be a NumPy array or an HDF5 dataset.
- """
- if precision is None:
- precision = 1e-5
- if message is None:
- message = ''
- else:
- message = ' (%s)' % message
-
- if np.isscalar(dset) or np.isscalar(arr):
- self.assert_(
- np.isscalar(dset) and np.isscalar(arr),
- 'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message)
- )
- self.assert_(
- dset - arr < precision,
- "Scalars differ by more than %.3f%s" % (precision, message)
- )
- return
-
- self.assert_(
- dset.shape == arr.shape,
- "Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
- )
- self.assert_(
- dset.dtype == arr.dtype,
- "Dtype mismatch (%s vs %s)%s" % (dset.dtype, arr.dtype, message)
- )
- self.assert_(
- np.all(np.abs(dset[...] - arr[...]) < precision),
- "Arrays differ by more than %.3f%s" % (precision, message)
- )
-
-# Check if non-ascii filenames are supported
-# Evidently this is the most reliable way to check
-# See also h5py issue #263 and ipython #466
-# To test for this, run the testsuite with LC_ALL=C
-try:
- testfile, fname = tempfile.mkstemp(unichr(0x03b7))
-except UnicodeError:
- unicode_filenames = False
-else:
- unicode_filenames = True
- os.close(testfile)
- os.unlink(fname)
- del fname
- del testfile
diff --git a/h5py/tests/old/test_attrs.py b/h5py/tests/old/test_attrs.py
index d67df4b..bb597b6 100644
--- a/h5py/tests/old/test_attrs.py
+++ b/h5py/tests/old/test_attrs.py
@@ -22,7 +22,7 @@ import six
import numpy as np
import collections
-from .common import TestCase, ut
+from ..common import TestCase, ut
from h5py.highlevel import File
from h5py import h5a, h5t
@@ -116,7 +116,7 @@ class TestUnicode(BaseAttrs):
def test_unicode(self):
""" Access via Unicode string with non-ascii characters """
- name = six.u("Omega") + six.unichr(0x03A9)
+ name = u"Omega" + six.unichr(0x03A9)
self.f.attrs[name] = 42
out = self.f.attrs[name]
self.assertEqual(out, 42)
diff --git a/h5py/tests/old/test_attrs_data.py b/h5py/tests/old/test_attrs_data.py
index 5330e18..9d3e565 100644
--- a/h5py/tests/old/test_attrs_data.py
+++ b/h5py/tests/old/test_attrs_data.py
@@ -19,7 +19,7 @@ import six
import numpy as np
-from .common import TestCase, ut
+from ..common import TestCase, ut
import h5py
from h5py import h5a, h5s, h5t
@@ -178,9 +178,9 @@ class TestTypes(BaseAttrs):
def test_unicode_scalar(self):
""" Storage of variable-length unicode strings (auto-creation) """
- self.f.attrs['x'] = six.u("Hello") + six.unichr(0x2340) + six.u("!!")
+ self.f.attrs['x'] = u"Hello" + six.unichr(0x2340) + u"!!"
out = self.f.attrs['x']
- self.assertEqual(out, six.u("Hello") + six.unichr(0x2340) + six.u("!!"))
+ self.assertEqual(out, u"Hello" + six.unichr(0x2340) + u"!!")
self.assertEqual(type(out), six.text_type)
aid = h5py.h5a.open(self.f.id, b"x")
@@ -223,7 +223,7 @@ class TestEmpty(BaseAttrs):
def test_items(self):
items = list(self.f.attrs.items())
self.assertEqual(
- [(six.u("x"), self.empty_obj)], items
+ [(u"x", self.empty_obj)], items
)
def test_itervalues(self):
@@ -235,7 +235,7 @@ class TestEmpty(BaseAttrs):
def test_iteritems(self):
items = list(six.iteritems(self.f.attrs))
self.assertEqual(
- [(six.u("x"), self.empty_obj)], items
+ [(u"x", self.empty_obj)], items
)
diff --git a/h5py/tests/old/test_base.py b/h5py/tests/old/test_base.py
index 08f5a7a..9f1b8a7 100644
--- a/h5py/tests/old/test_base.py
+++ b/h5py/tests/old/test_base.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import six
from h5py import File
-from .common import ut, TestCase, unicode_filenames
+from ..common import ut, TestCase, UNICODE_FILENAMES
import numpy as np
import os
@@ -74,10 +74,10 @@ class TestRepr(BaseTest):
typ = self.f['type']
self._check_type(typ)
- @ut.skipIf(not unicode_filenames, "Filesystem unicode support required")
+ @ut.skipIf(not UNICODE_FILENAMES, "Filesystem unicode support required")
def test_file(self):
""" File object repr() with unicode """
- fname = tempfile.mktemp(self.USTRING+six.u('.hdf5'))
+ fname = tempfile.mktemp(self.USTRING+u'.hdf5')
try:
with File(fname,'w') as f:
self._check_type(f)
diff --git a/h5py/tests/old/test_dataset.py b/h5py/tests/old/test_dataset.py
index 05726fc..193b22e 100644
--- a/h5py/tests/old/test_dataset.py
+++ b/h5py/tests/old/test_dataset.py
@@ -24,7 +24,7 @@ import six
import numpy as np
-from .common import ut, TestCase
+from ..common import ut, TestCase
from h5py.highlevel import File, Group, Dataset
from h5py._hl.base import is_empty_dataspace
from h5py import h5t
@@ -546,7 +546,7 @@ class TestAutoCreate(BaseDataset):
def test_vlen_unicode(self):
""" Assignment of a unicode string produces a vlen unicode dataset """
- self.f['x'] = six.u("Hello there") + six.unichr(0x2034)
+ self.f['x'] = u"Hello there" + six.unichr(0x2034)
ds = self.f['x']
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
@@ -728,7 +728,7 @@ class TestStrings(BaseDataset):
"""
dt = h5py.special_dtype(vlen=six.text_type)
ds = self.f.create_dataset('x', (100,), dtype=dt)
- data = six.u("Hello") + six.unichr(0x2034)
+ data = u"Hello" + six.unichr(0x2034)
ds[0] = data
out = ds[0]
self.assertEqual(type(out), six.text_type)
@@ -760,7 +760,7 @@ class TestStrings(BaseDataset):
"""
dt = h5py.special_dtype(vlen=six.text_type)
ds = self.f.create_dataset('x', (100,), dtype=dt)
- data = six.u("Hello there") + six.unichr(0x2034)
+ data = u"Hello there" + six.unichr(0x2034)
ds[0] = data.encode('utf8')
out = ds[0]
self.assertEqual(type(out), six.text_type)
diff --git a/h5py/tests/old/test_datatype.py b/h5py/tests/old/test_datatype.py
index 84e015c..c2f71b9 100644
--- a/h5py/tests/old/test_datatype.py
+++ b/h5py/tests/old/test_datatype.py
@@ -19,7 +19,7 @@ import six
import numpy as np
-from .common import ut, TestCase
+from ..common import ut, TestCase
from h5py import File
from h5py._hl.datatype import Datatype
diff --git a/h5py/tests/old/test_dimension_scales.py b/h5py/tests/old/test_dimension_scales.py
index 309a9d6..4c3d8b3 100644
--- a/h5py/tests/old/test_dimension_scales.py
+++ b/h5py/tests/old/test_dimension_scales.py
@@ -13,7 +13,7 @@ import sys
import numpy as np
-from .common import ut, TestCase
+from ..common import ut, TestCase
from h5py.highlevel import File, Group, Dataset
import h5py
diff --git a/h5py/tests/old/test_file.py b/h5py/tests/old/test_file.py
index 698b6ff..48edcb8 100644
--- a/h5py/tests/old/test_file.py
+++ b/h5py/tests/old/test_file.py
@@ -21,8 +21,7 @@ import tempfile
import six
-from .common import ut, TestCase, unicode_filenames
-from ..common import closed_tempfile
+from ..common import ut, TestCase, UNICODE_FILENAMES, closed_tempfile
from h5py.highlevel import File
import h5py
@@ -401,13 +400,13 @@ class TestContextManager(TestCase):
self.assertTrue(fid)
self.assertTrue(not fid)
+ at ut.skipIf(not UNICODE_FILENAMES, "Filesystem unicode support required")
class TestUnicode(TestCase):
"""
Feature: Unicode filenames are supported
"""
- @ut.skipIf(not unicode_filenames, "Filesystem unicode support required")
def test_unicode(self):
""" Unicode filenames can be used, and retrieved properly via .filename
"""
@@ -419,6 +418,14 @@ class TestUnicode(TestCase):
finally:
fid.close()
+ def test_unicode_hdf5_python_consistent(self):
+ """ Unicode filenames can be used, and seen correctly from python
+ """
+ fname = self.mktemp(prefix = six.unichr(0x201a))
+ with File(fname, 'w') as f:
+ self.assertTrue(os.path.exists(fname))
+
+
class TestFileProperty(TestCase):
"""
@@ -477,6 +484,24 @@ class TestClose(TestCase):
with self.assertRaises(ValueError):
fid.create_group('foo')
+ def test_close_multiple_default_driver(self):
+ fname = self.mktemp()
+ f = h5py.File(fname, 'w')
+ f.create_group("test")
+ f.close()
+ f.close()
+
+ @ut.skipUnless(mpi, "Parallel HDF5 is required for MPIO driver test")
+ def test_close_multiple_mpio_driver(self):
+ """ MPIO driver and options """
+ from mpi4py import MPI
+
+ fname = self.mktemp()
+ f = File(fname, 'w', driver='mpio', comm=MPI.COMM_WORLD)
+ f.create_group("test")
+ f.close()
+ f.close()
+
class TestFlush(TestCase):
"""
diff --git a/h5py/tests/old/test_file_image.py b/h5py/tests/old/test_file_image.py
index 93b6e9e..dfce2fd 100644
--- a/h5py/tests/old/test_file_image.py
+++ b/h5py/tests/old/test_file_image.py
@@ -3,7 +3,7 @@ from __future__ import absolute_import
import h5py
from h5py import h5f, h5p
-from .common import ut, TestCase
+from ..common import ut, TestCase
@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 8, 9), 'file image operations require HDF5 >= 1.8.9')
class TestFileImage(TestCase):
diff --git a/h5py/tests/old/test_group.py b/h5py/tests/old/test_group.py
index bfe19c1..4aae44e 100644
--- a/h5py/tests/old/test_group.py
+++ b/h5py/tests/old/test_group.py
@@ -28,17 +28,17 @@ from tempfile import mkdtemp
import six
-from .common import ut, TestCase
+from ..common import ut, TestCase
import h5py
from h5py.highlevel import File, Group, SoftLink, HardLink, ExternalLink
from h5py.highlevel import Dataset, Datatype
from h5py import h5t
-from h5py._hl.compat import fsencode
+from h5py._hl.compat import filename_encode
# If we can't encode unicode filenames, there's not much point failing tests
# which must fail
try:
- fsencode(u"α")
+ filename_encode(u"α")
except UnicodeEncodeError:
NO_FS_UNICODE = True
else:
@@ -91,7 +91,7 @@ class TestCreate(BaseGroup):
def test_unicode(self):
""" Unicode names are correctly stored """
- name = six.u("/Name") + six.unichr(0x4500)
+ name = u"/Name" + six.unichr(0x4500)
group = self.f.create_group(name)
self.assertEqual(group.name, name)
self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_UTF8)
@@ -99,7 +99,7 @@ class TestCreate(BaseGroup):
def test_unicode_default(self):
""" Unicode names convertible to ASCII are stored as ASCII (issue 239)
"""
- name = six.u("/Hello, this is a name")
+ name = u"/Hello, this is a name"
group = self.f.create_group(name)
self.assertEqual(group.name, name)
self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_ASCII)
@@ -311,33 +311,33 @@ class TestContains(BaseGroup):
""" "in" builtin works for containership (byte and Unicode) """
self.f.create_group('a')
self.assertIn(b'a', self.f)
- self.assertIn(six.u('a'), self.f)
+ self.assertIn(u'a', self.f)
self.assertIn(b'/a', self.f)
- self.assertIn(six.u('/a'), self.f)
+ self.assertIn(u'/a', self.f)
self.assertNotIn(b'mongoose', self.f)
- self.assertNotIn(six.u('mongoose'), self.f)
+ self.assertNotIn(u'mongoose', self.f)
def test_exc(self):
""" "in" on closed group returns False (see also issue 174) """
self.f.create_group('a')
self.f.close()
self.assertFalse(b'a' in self.f)
- self.assertFalse(six.u('a') in self.f)
+ self.assertFalse(u'a' in self.f)
def test_empty(self):
""" Empty strings work properly and aren't contained """
- self.assertNotIn(six.u(''), self.f)
+ self.assertNotIn(u'', self.f)
self.assertNotIn(b'', self.f)
def test_dot(self):
""" Current group "." is always contained """
self.assertIn(b'.', self.f)
- self.assertIn(six.u('.'), self.f)
+ self.assertIn(u'.', self.f)
def test_root(self):
""" Root group (by itself) is contained """
self.assertIn(b'/', self.f)
- self.assertIn(six.u('/'), self.f)
+ self.assertIn(u'/', self.f)
def test_trailing_slash(self):
""" Trailing slashes are unconditionally ignored """
diff --git a/h5py/tests/old/test_h5.py b/h5py/tests/old/test_h5.py
index bbbb88d..265ab38 100644
--- a/h5py/tests/old/test_h5.py
+++ b/h5py/tests/old/test_h5.py
@@ -9,18 +9,15 @@
from __future__ import absolute_import
-try:
- import unittest2 as ut
-except ImportError:
- import unittest as ut
-
from h5py import h5
+from ..common import TestCase
+
def fixnames():
cfg = h5.get_config()
cfg.complex_names = ('r','i')
-class TestH5(ut.TestCase):
+class TestH5(TestCase):
def test_config(self):
cfg = h5.get_config()
diff --git a/h5py/tests/old/test_h5d_direct_chunk_write.py b/h5py/tests/old/test_h5d_direct_chunk_write.py
index 8407f9e..a1b3b77 100644
--- a/h5py/tests/old/test_h5d_direct_chunk_write.py
+++ b/h5py/tests/old/test_h5d_direct_chunk_write.py
@@ -3,7 +3,7 @@ from __future__ import absolute_import
import h5py
import numpy
-from .common import ut, TestCase
+from ..common import ut, TestCase
@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 8, 11), 'Direct Chunk Writing requires HDF5 >= 1.8.11')
diff --git a/h5py/tests/old/test_h5f.py b/h5py/tests/old/test_h5f.py
index 5dbbe02..000067b 100644
--- a/h5py/tests/old/test_h5f.py
+++ b/h5py/tests/old/test_h5f.py
@@ -9,18 +9,15 @@
from __future__ import absolute_import
-try:
- import unittest2 as ut
-except ImportError:
- import unittest as ut
-
import tempfile
import shutil
import os
from h5py import File
+from ..common import TestCase
+
-class TestFileID(ut.TestCase):
+class TestFileID(TestCase):
def test_descriptor_core(self):
with File('TestFileID.test_descriptor_core', driver='core', backing_store=False) as f:
with self.assertRaises(NotImplementedError):
@@ -38,7 +35,7 @@ class TestFileID(ut.TestCase):
shutil.rmtree(dn_tmp)
-class TestCacheConfig(ut.TestCase):
+class TestCacheConfig(TestCase):
def test_simple_gets(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_simple_gets')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
diff --git a/h5py/tests/old/test_h5p.py b/h5py/tests/old/test_h5p.py
index 468ca54..6c10232 100644
--- a/h5py/tests/old/test_h5p.py
+++ b/h5py/tests/old/test_h5p.py
@@ -16,7 +16,9 @@ except ImportError:
from h5py import h5p, h5f
-class TestLibver(ut.TestCase):
+from ..common import TestCase
+
+class TestLibver(TestCase):
"""
Feature: Setting/getting lib ver bounds
@@ -30,7 +32,7 @@ class TestLibver(ut.TestCase):
plist.get_libver_bounds())
-class TestDA(ut.TestCase):
+class TestDA(TestCase):
'''
Feature: setting/getting chunk cache size on a dataset access property list
'''
@@ -45,7 +47,7 @@ class TestDA(ut.TestCase):
self.assertEqual((nslots, nbytes, w0),
dalist.get_chunk_cache())
-class TestFA(ut.TestCase):
+class TestFA(TestCase):
'''
Feature: setting/getting mdc config on a file access property list
'''
@@ -67,7 +69,7 @@ class TestFA(ut.TestCase):
falist.get_alignment())
-class TestPL(ut.TestCase):
+class TestPL(TestCase):
def test_obj_track_times(self):
"""
tests if the object track times set/get
diff --git a/h5py/tests/old/test_h5t.py b/h5py/tests/old/test_h5t.py
index 08b2503..1223047 100644
--- a/h5py/tests/old/test_h5t.py
+++ b/h5py/tests/old/test_h5t.py
@@ -67,8 +67,6 @@ class TestTypeFloatID(TestCase):
def test_custom_float_promotion(self):
"""Custom floats are correctly promoted to standard floats on read."""
- if h5t.MACHINE == 'ppc64le':
- return
test_filename = self.mktemp()
dataset = 'DS1'
diff --git a/h5py/tests/old/test_objects.py b/h5py/tests/old/test_objects.py
index bd80517..b338ceb 100644
--- a/h5py/tests/old/test_objects.py
+++ b/h5py/tests/old/test_objects.py
@@ -9,14 +9,11 @@
from __future__ import absolute_import
-try:
- import unittest2 as ut
-except ImportError:
- import unittest as ut
-
from h5py import _objects as o
-class TestObjects(ut.TestCase):
+from ..common import TestCase
+
+class TestObjects(TestCase):
def test_invalid(self):
# Check for segfault on close
diff --git a/h5py/tests/old/test_selections.py b/h5py/tests/old/test_selections.py
index 4a9dc21..c58be1d 100644
--- a/h5py/tests/old/test_selections.py
+++ b/h5py/tests/old/test_selections.py
@@ -17,7 +17,7 @@ import numpy as np
import h5py
import h5py._hl.selections2 as sel
-from .common import TestCase, ut
+from ..common import TestCase, ut
class TestTypeGeneration(TestCase):
diff --git a/h5py/tests/old/test_slicing.py b/h5py/tests/old/test_slicing.py
index d36a54e..93e7424 100644
--- a/h5py/tests/old/test_slicing.py
+++ b/h5py/tests/old/test_slicing.py
@@ -22,7 +22,7 @@ import six
import numpy as np
-from .common import ut, TestCase
+from ..common import ut, TestCase
import h5py
from h5py import h5s, h5t, h5d
@@ -284,15 +284,15 @@ class TestFieldNames(BaseSlicing):
if six.PY2:
# Byte strings are only allowed for field names on Py2
self.assertArrayEqual(self.dset[b'a'], self.data['a'])
- self.assertArrayEqual(self.dset[six.u('a')], self.data['a'])
+ self.assertArrayEqual(self.dset[u'a'], self.data['a'])
def test_unicode_names(self):
""" Unicode field names for for read and write """
- self.assertArrayEqual(self.dset[six.u('a')], self.data['a'])
- self.dset[six.u('a')] = 42
+ self.assertArrayEqual(self.dset[u'a'], self.data['a'])
+ self.dset[u'a'] = 42
data = self.data.copy()
data['a'] = 42
- self.assertArrayEqual(self.dset[six.u('a')], data['a'])
+ self.assertArrayEqual(self.dset[u'a'], data['a'])
def test_write(self):
""" Test write with field selections """
diff --git a/h5py/version.py b/h5py/version.py
index f5567a1..86d5ca2 100644
--- a/h5py/version.py
+++ b/h5py/version.py
@@ -22,7 +22,9 @@ import numpy
# needed for our use case
_H5PY_VERSION_CLS = namedtuple("_H5PY_VERSION_CLS", "major minor bugfix pre post dev")
-version_tuple = _H5PY_VERSION_CLS(2, 7, 0, None, None, None)
+hdf5_built_version_tuple = _h5.HDF5_VERSION_COMPILED_AGAINST
+
+version_tuple = _H5PY_VERSION_CLS(2, 7, 1, None, None, None)
version = "{0.major:d}.{0.minor:d}.{0.bugfix:d}".format(version_tuple)
if version_tuple.pre is not None:
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..045df2f
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,377 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=tests
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=numpy,h5py
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+#
+# | Checkers | Broken import checks | Other random garbage
+disable=format,design,similarities,cyclic-import,import-error,broad-except,no-self-use,no-name-in-module,invalid-name,abstract-method,star-args,import-self,no-init,locally-disabled,unidiomatic-typecheck
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#
+# | Some format checks which are OK
+enable=bad-indentation,mixed-indentation,unnecessary-semicolon,superfluous-parens
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=stringprep,optparse
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/setup.cfg b/setup.cfg
index 861a9f5..8bfd5a1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,4 @@
[egg_info]
tag_build =
tag_date = 0
-tag_svn_revision = 0
diff --git a/setup.py b/setup.py
index b4dc5b7..c56f4b5 100755
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@ import os.path as op
import setup_build, setup_configure
-VERSION = '2.7.0'
+VERSION = '2.7.1'
NUMPY_DEP = 'numpy>=1.7'
@@ -36,7 +36,8 @@ SETUP_REQUIRES = RUN_REQUIRES + [NUMPY_DEP, 'Cython>=0.19', 'pkgconfig']
# Needed to avoid trying to install numpy/cython on pythons which the latest
# versions don't support
-if "sdist" in sys.argv and "bdist_wheel" not in sys.argv and "install" not in sys.argv:
+if ("sdist" in sys.argv and "bdist_wheel" not in sys.argv and
+ "install" not in sys.argv) or "check" in sys.argv:
use_setup_requires = False
else:
use_setup_requires = True
@@ -113,6 +114,7 @@ Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
+Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Topic :: Scientific/Engineering
Topic :: Database
diff --git a/setup_build.py b/setup_build.py
index ffe19b7..34d456e 100644
--- a/setup_build.py
+++ b/setup_build.py
@@ -49,8 +49,8 @@ if sys.platform.startswith('win'):
('H5_BUILT_AS_DYNAMIC_LIB', None)
])
else:
- COMPILER_SETTINGS['include_dirs'].extend(['/opt/local/include''/usr/local/include'])
- COMPILER_SETTINGS['library_dirs'].extend(['/opt/local/include''/usr/local/include'])
+ COMPILER_SETTINGS['include_dirs'].extend(['/opt/local/include', '/usr/local/include'])
+ COMPILER_SETTINGS['library_dirs'].extend(['/opt/local/include', '/usr/local/include'])
class h5py_build_ext(build_ext):
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..9dbbe8f
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,93 @@
+[tox]
+envlist = {py26,py27,py33,py34,py35,py36,pypy}-{test}-{deps,mindeps},docs,check-manifest,checkreadme
+
+[testenv]
+deps =
+ deps: numpy>=1.7
+ deps: cython>=0.19
+ mindeps: numpy==1.7
+ mindeps: cython==0.19
+commands =
+ test: python {toxinidir}/ci/fix_paths.py {envsitepackagesdir}
+ test: python -c "from sys import exit; import h5py; exit(0) if h5py.run_tests().wasSuccessful() else exit(1)"
+changedir =
+ test: {toxworkdir}
+passenv =
+ HDF5_DIR
+ TOXPYTHON
+basepython =
+ pypy: {env:TOXPYTHON:pypy}
+ py26: {env:TOXPYTHON:python2.6}
+ py27: {env:TOXPYTHON:python2.7}
+ py33: {env:TOXPYTHON:python3.3}
+ py34: {env:TOXPYTHON:python3.4}
+ py35: {env:TOXPYTHON:python3.5}
+ py36: {env:TOXPYTHON:python3.6}
+
+[testenv:py26-test-deps]
+deps =
+ unittest2
+ numpy>=1.7,<1.11
+ cython>=0.19
+
+[testenv:py33-test-deps]
+deps =
+ numpy>=1.7,<1.12
+ cython>=0.19
+
+[testenv:py26-test-mindeps]
+deps =
+ unittest2
+ numpy==1.7
+ cython==0.19
+
+[testenv:py33-test-mindeps]
+deps =
+ numpy==1.7
+ cython==0.19
+
+[testenv:py34-test-mindeps]
+deps =
+ numpy==1.9
+ cython==0.19
+
+[testenv:py35-test-mindeps]
+deps =
+ numpy==1.10.0.post2
+ cython==0.19
+
+[testenv:py36-test-mindeps]
+deps =
+ numpy==1.12
+ cython==0.19
+
+[testenv:py34-test-mpi4py]
+deps =
+ numpy==1.9
+ cython==0.19
+ mpi4py>=1.3.1
+
+[testenv:docs]
+skip_install=True
+basepython = {env:TOXPYTHON:python}
+changedir=docs
+deps=
+ sphinx
+commands=
+ sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
+
+[testenv:check-manifest]
+skip_install=True
+basepython = {env:TOXPYTHON:python}
+deps=check-manifest
+setenv =
+ CHECK_MANIFEST=true
+commands=
+ check-manifest
+
+[testenv:checkreadme]
+skip_install=True
+basepython = {env:TOXPYTHON:python}
+deps=readme_renderer
+commands=
+ python setup.py check -s -r
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git
More information about the debian-science-commits
mailing list