[h5py] 01/03: Imported Upstream version 2.5.0

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Thu Apr 16 23:19:21 UTC 2015


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/experimental
in repository h5py.

commit 2b6b9bad3fbab2ee79d484686370b18c668925b4
Author: Ghislain Antony Vaillant <ghisvail at gmail.com>
Date:   Fri Apr 17 00:15:09 2015 +0100

    Imported Upstream version 2.5.0
---
 .gitignore                                |  22 ++
 .travis.yml                               |  23 ++
 ANN.rst                                   |  60 ++--
 MANIFEST.in                               |   3 +
 PKG-INFO                                  |  38 ---
 README.rst                                |   1 +
 api_gen.py                                |  17 +-
 docs/Makefile                             | 177 ++++++++++++
 docs/build.rst                            | 160 +++++++++++
 docs/conf.py                              | 260 +++++++++++++++++
 docs/config.rst                           |  65 +++++
 docs/contributing.rst                     | 338 +++++++++++++++++++++++
 docs/faq.rst                              | 209 ++++++++++++++
 docs/high/attr.rst                        | 122 ++++++++
 docs/high/dataset.rst                     | 445 ++++++++++++++++++++++++++++++
 docs/high/dims.rst                        |  92 ++++++
 docs/high/file.rst                        | 183 ++++++++++++
 docs/high/group.rst                       | 436 +++++++++++++++++++++++++++++
 docs/high/index.rst                       |  16 ++
 docs/index.rst                            |  93 +++++++
 docs/licenses.rst                         | 232 ++++++++++++++++
 docs/low.rst                              |   4 +
 docs/mpi.rst                              | 149 ++++++++++
 docs/quick.rst                            | 146 ++++++++++
 docs/refs.rst                             | 129 +++++++++
 docs/special.rst                          | 128 +++++++++
 docs/strings.rst                          | 162 +++++++++++
 docs/swmr.rst                             | 168 +++++++++++
 docs/whatsnew/2.0.rst                     | 177 ++++++++++++
 docs/whatsnew/2.1.rst                     |  61 ++++
 docs/whatsnew/2.2.rst                     | 101 +++++++
 docs/whatsnew/2.3.rst                     |  85 ++++++
 docs/whatsnew/2.4.rst                     |  47 ++++
 docs/whatsnew/index.rst                   |  15 +
 docs_api/Makefile                         | 177 ++++++++++++
 docs_api/automod.py                       | 258 +++++++++++++++++
 docs_api/conf.py                          | 260 +++++++++++++++++
 docs_api/h5.rst                           |  44 +++
 docs_api/h5a.rst                          |  30 ++
 docs_api/h5d.rst                          |  47 ++++
 docs_api/h5f.rst                          |  87 ++++++
 docs_api/h5fd.rst                         |  61 ++++
 docs_api/h5g.rst                          |  63 +++++
 docs_api/h5i.rst                          |  26 ++
 docs_api/h5l.rst                          |  18 ++
 docs_api/h5o.rst                          |  57 ++++
 docs_api/h5p.rst                          |  95 +++++++
 docs_api/h5r.rst                          |  36 +++
 docs_api/h5s.rst                          |  62 +++++
 docs_api/h5t.rst                          | 234 ++++++++++++++++
 docs_api/h5z.rst                          |  65 +++++
 docs_api/index.rst                        |  41 +++
 docs_api/objects.rst                      |   6 +
 examples/swmr_inotify_example.py          |  85 ++++++
 examples/swmr_multiprocess.py             | 116 ++++++++
 h5py.egg-info/PKG-INFO                    |  38 ---
 h5py.egg-info/SOURCES.txt                 | 121 --------
 h5py.egg-info/dependency_links.txt        |   1 -
 h5py.egg-info/requires.txt                |   2 -
 h5py.egg-info/top_level.txt               |   1 -
 h5py/__init__.py                          |  34 ++-
 h5py/_conv.pyx                            |  70 ++---
 h5py/_errors.pyx                          |   6 +-
 h5py/_hdf5.pxd                            | 346 -----------------------
 h5py/_hl/__init__.py                      |   3 +
 h5py/_hl/attrs.py                         | 137 ++++++---
 h5py/_hl/base.py                          | 107 ++++---
 h5py/_hl/dataset.py                       |  90 +++---
 h5py/_hl/datatype.py                      |   2 +
 h5py/_hl/dims.py                          |   6 +-
 h5py/_hl/files.py                         |  68 +++--
 h5py/_hl/filters.py                       |  10 +-
 h5py/_hl/group.py                         |  22 +-
 h5py/_hl/selections.py                    |  28 +-
 h5py/_hl/selections2.py                   |   4 +-
 h5py/_proxy.pyx                           |   4 +-
 h5py/api_functions.txt                    |  19 +-
 h5py/api_types_hdf5.pxd                   |   2 +
 h5py/h5.pyx                               |   5 +
 h5py/h5a.pyx                              |  23 +-
 h5py/h5d.pyx                              |  40 +++
 h5py/h5f.pyx                              |  37 +++
 h5py/h5l.pyx                              |   4 +-
 h5py/h5o.pyx                              |   4 +-
 h5py/h5p.pyx                              |   4 +-
 h5py/h5s.pyx                              |   2 +-
 h5py/highlevel.py                         |  16 +-
 h5py/ipy_completer.py                     |   2 +
 h5py/tests/__init__.py                    |   2 +-
 h5py/tests/common.py                      |  15 +-
 h5py/tests/hl/__init__.py                 |  11 +-
 h5py/tests/hl/test_attribute_create.py    |  47 ++++
 h5py/tests/hl/test_dataset_getitem.py     |   3 +-
 h5py/tests/hl/test_dataset_swmr.py        | 159 +++++++++++
 h5py/tests/hl/test_dims_dimensionproxy.py |   4 +-
 h5py/tests/hl/test_file.py                |   4 +-
 h5py/tests/old/__init__.py                |   5 +-
 h5py/tests/old/common.py                  |  10 +-
 h5py/tests/old/test_attrs.py              |  10 +-
 h5py/tests/old/test_attrs_data.py         |  16 +-
 h5py/tests/old/test_base.py               |  14 +-
 h5py/tests/old/test_dataset.py            | 103 ++++++-
 h5py/tests/old/test_datatype.py           |   8 +-
 h5py/tests/old/test_dimension_scales.py   |   2 +
 h5py/tests/old/test_file.py               |  22 +-
 h5py/tests/old/test_group.py              | 126 +++++++--
 h5py/tests/old/test_h5.py                 |   2 +
 h5py/tests/old/test_h5f.py                |   2 +
 h5py/tests/old/test_h5p.py                |   2 +
 h5py/tests/old/test_h5t.py                |   2 +
 h5py/tests/old/test_objects.py            |   2 +
 h5py/tests/old/test_selections.py         |   4 +-
 h5py/tests/old/test_slicing.py            |  16 +-
 h5py/utils.pyx                            |  29 --
 h5py/version.py                           |   4 +-
 lzf/README.txt                            |  20 +-
 lzf/lzf_filter.c                          |  52 ++--
 other/garbage.py                          |  29 ++
 other/iterate_deadlock.py                 |  36 +++
 other/vlen_leak.py                        |  90 ++++++
 pavement.py                               |  35 +++
 setup.cfg                                 |   5 -
 setup.py                                  |  15 +-
 setup_build.py                            |  44 +++
 setup_configure.py                        |   7 +
 tox.ini                                   |  12 +
 126 files changed, 7471 insertions(+), 988 deletions(-)

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4bc92a9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,22 @@
+h5py/h5*.c
+h5py/utils.c
+h5py/_conv.c
+h5py/_proxy.c
+h5py/_objects.c
+h5py/_errors.c
+h5py/config.pxi
+h5py/_hdf5.*
+h5py/*.dll
+*.hdf5
+*.pkl
+h5py/defs.*
+build/
+*.pyc
+dist/
+MANIFEST
+.DS_Store
+/docs/_build
+/docs_api/_build
+/.tox
+/h5py.egg-info
+/*.egg
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..f5d6d1c
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,23 @@
+language: python
+
+notifications:
+  email: false
+
+python:
+  - "2.6"
+  - "2.7"
+  - "3.2"
+  - "3.3"
+  - "3.4"
+
+before_install:
+    - sudo apt-get update -qq
+    - sudo apt-get install -qq libhdf5-serial-dev
+    - if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install unittest2; fi
+    - pip install numpy
+    - pip install --install-option="--no-cython-compile" cython
+
+install: 
+    - python setup.py build -f
+
+script: "python setup.py test"
diff --git a/ANN.rst b/ANN.rst
index a15d730..8f12ab2 100644
--- a/ANN.rst
+++ b/ANN.rst
@@ -1,7 +1,22 @@
-Announcing HDF5 for Python (h5py) 2.4.0
+Announcing HDF5 for Python (h5py) 2.5.0
 ========================================
 
-The h5py team is happy to announce the availability of h5py 2.4.0 (final).
+The h5py team is happy to announce the availability of h5py 2.5.0.
+
+This release introduces experimental support for the highly-anticipated
+"Single Writer Multiple Reader" (SWMR) feature in the upcoming HDF5 1.10
+release.  SWMR allows sharing of a single HDF5 file between multiple processes
+without the complexity of MPI or multiprocessing-based solutions.  
+
+This is an experimental feature that should NOT be used in production code.
+We are interested in getting feedback from the broader community with respect
+to performance and the API design.
+
+For more details, check out the h5py user guide:
+http://docs.h5py.org/en/latest/swmr.html
+
+SWMR support was contributed by Ulrik Pedersen.
+
 
 What's h5py?
 ------------
@@ -18,43 +33,30 @@ Documentation is at:
 
 http://docs.h5py.org
 
+
 Changes
 -------
 
-This release incorporates a total re-write of the identifier management
-system in h5py.  As part of this refactoring, the entire API is also now
-protected by threading locks.  User-visible changes include:
-
-* Files are now automatically closed when all objects within them
-  are unreachable. Previously, if File.close() was not explicitly called,
-  files would remain open and "leaks" were possible if the File object
-  was lost.
-
-* The entire API is now believed to be thread-safe (feedback welcome!).
-
-* External links now work if the target file is already open.  Previously
-  this was not possible because of a mismatch in the file close strengths.
-  
-* The options to setup.py have changed; a new top-level "configure"
-  command handles options like --hdf5=/path/to/hdf5 and --mpi.  Setup.py 
-  now works correctly under Python 3 when these options are used.
+* Experimental SWMR support
+* Group and AttributeManager classes now inherit from the appropriate ABCs
+* Fixed an issue with 64-bit float VLENS
+* Cython warning cleanups related to "const"
+* Entire code base ported to "six"; 2to3 removed from setup.py
   
-* Cython (0.17+) is now required when building from source.
-  
-* The minimum NumPy version is now 1.6.1.
 
-* Various other enhancements and bug fixes
-  
 Acknowlegements
 ---------------
 
 This release incorporates changes from, among others:
 
-* Matthieu Brucher
-* Laurence Hole
-* John Tyree
-* Pierre de Buyl
-* Matthew Brett
+* Ulrik Pedersen
+* James Tocknell
+* Will Parkin
+* Antony Lee
+* Peter H. Li
+* Peter Colberg
+* Ghislain Antony Vaillant
+
 
 Where to get it
 ---------------
diff --git a/MANIFEST.in b/MANIFEST.in
index d851840..d7a23d2 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,3 +12,6 @@ include setup_build.py
 include setup_configure.py
 include ANN.rst
 include README.rst
+recursive-include docs *
+recursive-include docs_api *
+recursive-exclude * .DS_Store
diff --git a/PKG-INFO b/PKG-INFO
deleted file mode 100644
index 0ed11d1..0000000
--- a/PKG-INFO
+++ /dev/null
@@ -1,38 +0,0 @@
-Metadata-Version: 1.1
-Name: h5py
-Version: 2.4.0
-Summary: Read and write HDF5 files from Python
-Home-page: http://www.h5py.org
-Author: Andrew Collette
-Author-email: andrew dot collette at gmail dot com
-License: UNKNOWN
-Download-URL: https://pypi.python.org/pypi/h5py
-Description: 
-        The h5py package provides both a high- and low-level interface to the HDF5
-        library from Python. The low-level interface is intended to be a complete
-        wrapping of the HDF5 API, while the high-level component supports  access to
-        HDF5 files, datasets and groups using established Python and NumPy concepts.
-        
-        A strong emphasis on automatic conversion between Python (Numpy) datatypes and
-        data structures and their HDF5 equivalents vastly simplifies the process of
-        reading and writing data from Python.
-        
-        Supports HDF5 versions 1.8.4 and higher.  On Windows, HDF5 is included with
-        the installer.
-        
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: Information Technology
-Classifier: Intended Audience :: Science/Research
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Programming Language :: Python
-Classifier: Topic :: Scientific/Engineering
-Classifier: Topic :: Database
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Operating System :: Unix
-Classifier: Operating System :: POSIX :: Linux
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Operating System :: Microsoft :: Windows
-Requires: numpy (>=1.6.1)
-Requires: Cython (>=0.17)
diff --git a/README.rst b/README.rst
index 7bc1303..50a3b80 100644
--- a/README.rst
+++ b/README.rst
@@ -20,6 +20,7 @@ You need, at a minimum:
 
 * Python 2.6, 2.7, 3.2, 3.3, or 3.4
 * NumPy 1.6.1 or later
+* The "six" package for Python 2/3 compatibility
 
 To build on UNIX:
 
diff --git a/api_gen.py b/api_gen.py
index fbf0d08..e379f9c 100644
--- a/api_gen.py
+++ b/api_gen.py
@@ -56,7 +56,7 @@ class Line(object):
         
     PATTERN = re.compile("""(?P<mpi>(MPI)[ ]+)?
                             (?P<error>(ERROR)[ ]+)?
-                            (?P<version>([0-9]\.[0-9]\.[0-9]))?
+                            (?P<version>([0-9]+\.[0-9]+\.[0-9]+))?
                             ([ ]+)?
                             (?P<code>(unsigned[ ]+)?[a-zA-Z_]+[a-zA-Z0-9_]*\**)[ ]+
                             (?P<fname>[a-zA-Z_]+[a-zA-Z0-9_]*)[ ]*
@@ -64,11 +64,11 @@ class Line(object):
                             """, re.VERBOSE)
 
     SIG_PATTERN = re.compile("""
-                            (unsigned[ ]+)?
-                            (?:[a-zA-Z_]+[a-zA-Z0-9_]*\**)
-                            [ ]+[ *]*
-                            (?P<param>[a-zA-Z_]+[a-zA-Z0-9_]*)
-                            """, re.VERBOSE)
+                             (?:unsigned[ ]+)?
+                             (?:[a-zA-Z_]+[a-zA-Z0-9_]*\**)
+                             [ ]+[ *]*
+                             (?P<param>[a-zA-Z_]+[a-zA-Z0-9_]*)
+                             """, re.VERBOSE)
                             
     def __init__(self, text):
         """ Break the line into pieces and populate object attributes.
@@ -91,10 +91,11 @@ class Line(object):
         self.fname = parts['fname']
         self.sig = parts['sig']
 
-        self.args = self.SIG_PATTERN.findall(self.sig)
+        sig_const_stripped = self.sig.replace('const', '')
+        self.args = self.SIG_PATTERN.findall(sig_const_stripped)
         if self.args is None:
             raise ValueError("Invalid function signature: {0}".format(self.sig))
-        self.args = ", ".join(x[1] for x in self.args)
+        self.args = ", ".join(self.args)
 
 
 raw_preamble = """\
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..e728e9b
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/h5py.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/h5py.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/h5py"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/h5py"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs/build.rst b/docs/build.rst
new file mode 100644
index 0000000..f626b45
--- /dev/null
+++ b/docs/build.rst
@@ -0,0 +1,160 @@
+.. _install:
+
+Installation
+============
+
+
+For Python beginners
+--------------------
+
+It can be a pain to install NumPy, HDF5, h5py, Cython and other dependencies.
+If you're just starting out, by far the easiest approach is to install h5py via
+your package manager (``apt-get`` or similar), or by using one of the major
+science-oriented Python distributions:
+
+* `Anaconda <http://continuum.io/downloads>`_ (Linux, Mac, Windows)
+* `PythonXY <https://code.google.com/p/pythonxy/>`_ (Windows)
+
+
+Installing on Windows
+---------------------
+
+You will need:
+
+  * Python 2.6, 2.7, 3.2, 3.3 or 3.4 (from Python.org)
+  * NumPy 1.6.1 or newer
+  * The "six" Python 2/3 compatibility package
+
+Download the installer from http://www.h5py.org and run it.  HDF5 is
+included.
+
+.. note::
+
+    If you are using Anaconda, PythonXY or another non-Python.org
+    distribution, you should instead install h5py via your distribution's
+    own pacakge manager.
+
+
+Installing on Linux and Mac OS X
+--------------------------------
+
+System dependencies
+~~~~~~~~~~~~~~~~~~~
+
+You will need:
+
+* Python 2.6, 2.7, 3.2, 3.3, or 3.4 with development headers (``python-dev`` or similar)
+* HDF5 1.8.4 or newer, shared library version with development headers (``libhdf5-dev`` or similar)
+
+On Mac OS X, `homebrew <http://brew.sh>`_ is a reliable way of getting
+Python, HDF5 and other dependencies set up.  It is also safe to use h5py
+with the OS X system Python.
+
+Install with pip
+~~~~~~~~~~~~~~~~
+
+Simply run::
+
+    $ pip install h5py
+    
+All dependencies are installed automatically.
+
+Via setup.py
+~~~~~~~~~~~~
+
+You will need:
+
+* The h5py tarball from http://www.h5py.org.
+* NumPy 1.6.1 or newer
+* `Cython <http://cython.org>`_ 0.17 or newer
+
+::
+
+    $ tar xzf h5py-X.Y.Z.tar.gz
+    $ cd h5py
+    $ python setup.py install
+
+
+Running the test suite
+----------------------
+
+With the tarball version of h5py::
+
+    $ python setup.py build
+    $ python setup.py test
+
+After installing h5py::
+
+    >>> import h5py
+    >>> h5py.run_tests()
+
+
+Custom installation
+-------------------
+
+You can specify build options for h5py with the ``configure`` option to
+setup.py.  Options may be given together or separately::
+
+    $ python setup.py configure --hdf5=/path/to/hdf5
+    $ python setup.py configure --hdf5-version=X.Y.Z
+    $ python setup.py configure --mpi
+    
+Note the ``--hdf5-version`` option is generally not needed, as h5py 
+auto-detects the installed version of HDF5 (even for custom locations).
+
+Once set, build options apply to all future builds in the source directory.
+You can reset to the defaults with the ``--reset`` option::
+
+    $ python setup.py configure --reset
+
+You can also configure h5py using environment variables.  This is handy
+when installing via ``pip``, as you don't have direct access to setup.py::
+
+    $ HDF5_DIR=/path/to/hdf5 pip install h5py
+    $ HDF5_VERSION=X.Y.Z pip install h5py
+    
+Here's a list of all the configure options currently supported:
+
+======================= =========================== ===========================
+Option                  Via setup.py                Via environment variable
+======================= =========================== ===========================
+Custom path to HDF5     ``--hdf5=/path/to/hdf5``    ``HDF5_DIR=/path/to/hdf5``
+Force HDF5 version      ``--hdf5-version=X.Y.Z``    ``HDF5_VERSION=X.Y.Z``
+Enable MPI mode         ``--mpi``                   (none)
+======================= =========================== ===========================
+
+
+Building against Parallel HDF5
+------------------------------
+
+If you just want to build with ``mpicc``, and don't care about using Parallel
+HDF5 features in h5py itself::
+
+    $ export CC=mpicc
+    $ python setup.py install
+
+If you want access to the full Parallel HDF5 feature set in h5py
+(:ref:`parallel`), you will have to build in MPI mode.  Right now this must
+be done with command-line options from the h5py tarball.
+
+**You will need a shared-library build of Parallel HDF5 (i.e. built with
+./configure --enable-shared --enable-parallel).**
+
+To build in MPI mode, use the ``--mpi`` option to ``setup.py configure``::
+
+    $ export CC=mpicc
+    $ python setup.py configure --mpi
+    $ python setup.py build
+
+See also :ref:`parallel`.
+
+
+Help! It didn't work!
+---------------------
+
+You may wish to check the :ref:`faq` first for common installation problems.
+
+Then, feel free to ask the discussion group
+`at Google Groups <http://groups.google.com/group/h5py>`_. There's
+only one discussion group for h5py, so you're likely to get help directly
+from the maintainers.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..e95bf26
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+#
+# h5py documentation build configuration file, created by
+# sphinx-quickstart on Fri Jan 31 11:23:59 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.intersphinx']
+
+intersphinx_mapping = {'low': ('http://api.h5py.org', None)}
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'h5py'
+copyright = u'2014, Andrew Collette and contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '2.5'
+# The full version, including alpha/beta/rc tags.
+release = '2.5.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'h5pydoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'h5py.tex', u'h5py Documentation',
+   u'Andrew Collette and contributors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'h5py', u'h5py Documentation',
+     [u'Andrew Collette and contributors'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'h5py', u'h5py Documentation',
+   u'Andrew Collette and contributors', 'h5py', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/docs/config.rst b/docs/config.rst
new file mode 100644
index 0000000..02f9d37
--- /dev/null
+++ b/docs/config.rst
@@ -0,0 +1,65 @@
+Configuring h5py
+================
+
+Library configuration
+---------------------
+
+A few library options are available to change the behavior of the library.
+You can get a reference to the global library configuration object via the
+function ``h5py.get_config()``.  This object supports the following attributes:
+
+    **complex_names**
+        Set to a 2-tuple of strings (real, imag) to control how complex numbers
+        are saved.  The default is ('r','i').
+
+    **bool_names**
+        Booleans are saved as HDF5 enums.  Set this to a 2-tuple of strings
+        (false, true) to control the names used in the enum.  The default
+        is ("FALSE", "TRUE").
+
+
+IPython
+-------
+
+H5py ships with a custom ipython completer, which provides object introspection
+and tab completion for h5py objects in an ipython session. For example, if a
+file contains 3 groups, "foo", "bar", and "baz"::
+
+   In [4]: f['b<TAB>
+   bar   baz
+
+   In [4]: f['f<TAB>
+   # Completes to:
+   In [4]: f['foo'
+
+   In [4]: f['foo'].<TAB>
+   f['foo'].attrs            f['foo'].items            f['foo'].ref
+   f['foo'].copy             f['foo'].iteritems        f['foo'].require_dataset
+   f['foo'].create_dataset   f['foo'].iterkeys         f['foo'].require_group
+   f['foo'].create_group     f['foo'].itervalues       f['foo'].values
+   f['foo'].file             f['foo'].keys             f['foo'].visit
+   f['foo'].get              f['foo'].name             f['foo'].visititems
+   f['foo'].id               f['foo'].parent
+
+The easiest way to enable the custom completer is to do the following in an
+IPython session::
+
+   In  [1]: import h5py
+
+   In [2]: h5py.enable_ipython_completer()
+
+It is also possible to configure IPython to enable the completer every time you
+start a new session. For >=ipython-0.11, "h5py.ipy_completer" just needs to be
+added to the list of extensions in your ipython config file, for example
+:file:`~/.config/ipython/profile_default/ipython_config.py` (if this file does
+not exist, you can create it by invoking `ipython profile create`)::
+
+   c = get_config()
+   c.InteractiveShellApp.extensions = ['h5py.ipy_completer']
+
+For <ipython-0.11, the completer can be enabled by adding the following lines
+to the :func:`main` in :file:`.ipython/ipy_user_conf.py`::
+
+   def main():
+       ip.ex('from h5py import ipy_completer')
+       ip.ex('ipy_completer.load_ipython_extension()')
diff --git a/docs/contributing.rst b/docs/contributing.rst
new file mode 100644
index 0000000..4898464
--- /dev/null
+++ b/docs/contributing.rst
@@ -0,0 +1,338 @@
+Bug Reports & Contributions
+===========================
+
+Contributions and bug reports are welcome from anyone!  Some of the best
+features in h5py, including thread support, dimension scales, and the
+scale-offset filter, came from user code contributions.
+
+Since we use GitHub, the workflow will be familiar to many people.
+If you have questions about the process or about the details of implementing
+your feature, always feel free to ask on the Google Groups list, either
+by emailing:
+
+     h5py at googlegroups.com
+
+or via the web interface at:
+
+    https://groups.google.com/forum/#!forum/h5py
+
+Anyone can post to this list. Your first message will be approved by a
+moderator, so don't worry if there's a brief delay.
+
+This guide is divided into three sections.  The first describes how to file
+a bug report.
+
+The second describes the mechanics of
+how to submit a contribution to the h5py project; for example, how to 
+create a pull request, which branch to base your work on, etc.
+We assume you're are familiar with Git, the version control system used by h5py.
+If not, `here's a great place to start <http://git-scm.com/book>`_.
+
+Finally, we describe the various subsystems inside h5py, and give
+technical guidance as to how to implement your changes.  
+
+
+How to File a Bug Report
+------------------------
+
+Bug reports are always welcome!  The issue tracker is at:
+
+    http://github.com/h5py/h5py/issues
+
+
+If you're unsure whether you've found a bug
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Always feel free to ask on the mailing list (h5py at Google Groups). 
+Discussions there are seen by lots of people and are archived by Google.
+Even if the issue you're having turns out not to be a bug in the end, other
+people can benefit from a record of the conversation.
+
+By the way, nobody will get mad if you file a bug and it turns out to be
+something else.  That's just how software development goes.
+
+
+What to include
+~~~~~~~~~~~~~~~
+
+When filing a bug, there are two things you should include.  The first is
+the output of ``h5py.version.info``::
+
+    >>> import h5py
+    >>> print h5py.version.info
+
+The second is a detailed explanation of what went wrong.  Unless the bug
+is really trivial, **include code if you can**, either via GitHub's
+inline markup::
+
+    ```
+        import h5py
+        h5py.explode()    # Destroyed my computer!
+    ```
+
+or by uploading a code sample to `Github Gist <http://gist.github.com>`_.
+
+How to Get Your Code into h5py
+------------------------------
+
+This section describes how to contribute changes to the h5py code base.
+Before you start, be sure to read the h5py license and contributor
+agreement in "license.txt".  You can find this in the source distribution,
+or view it online at the main h5py repository at GitHub.
+
+The basic workflow is to clone h5py with git, make your changes in a topic
+branch, and then create a pull request at GitHub asking to merge the changes
+into the main h5py project.
+
+Here are some tips to getting your pull requests accepted:
+
+1. Let people know you're working on something.  This could mean posting a
+   comment in an open issue, or sending an email to the mailing list.  There's
+   nothing wrong with just opening a pull request, but it might save you time
+   if you ask for advice first.
+2. Keep your changes focused.  If you're fixing multiple issues, file multiple
+   pull requests.  Try to keep the amount of reformatting clutter small so
+   the maintainers can easily see what you've changed in a diff.
+3. Unit tests are mandatory for new features.  This doesn't mean hundreds
+   (or even dozens) of tests!  Just enough to make sure the feature works as
+   advertised.  The maintainers will let you know if more are needed.
+
+
+Clone the h5py repository
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The best way to do this is by signing in to GitHub and cloning the
+h5py project directly.  You'll end up with a new repository under your
+account; for example, if your username is ``yourname``, the repository 
+would be at http://github.com/yourname/h5py.
+
+Then, clone your new copy of h5py to your local machine::
+
+    $ git clone http://github.com/yourname/h5py
+
+
+Create a topic branch for your feature
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you're fixing a bug, you'll want to check out a branch against the
+appropriate stable branch.  For example, to fix a bug you found in version
+2.1.3, you'll want to check out against branch "2.1"::
+
+    $ git checkout -b bugfix 2.1
+
+If you're contributing a new feature, it's appropriate to develop against the
+"master" branch, so you would instead do::
+
+    $ git checkout -b newfeature master
+
+The exact name of the branch can be anything you want.  For bug fixes, one
+approach is to put the issue number in the branch name.
+
+
+Implement the feature!
+~~~~~~~~~~~~~~~~~~~~~~
+
+You can implement the feature as a number of small changes, or as one big
+commit; there's no project policy.  Double-check to make sure you've
+included all your files; run ``git status`` and check the output.
+
+
+Push your changes back and open a pull request
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Push your topic branch back up to your GitHub clone::
+
+    $ git push origin newfeature
+
+Then, `create a pull request <https://help.github.com/articles/creating-a-pull-request>`_ based on your topic branch. 
+
+
+Work with the maintainers
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Your pull request might be accepted right away.  More commonly, the maintainers
+will post comments asking you to fix minor things, like add a few tests, clean
+up the style to be PEP-8 compliant, etc.
+
+The pull request page also shows whether the project builds correctly,
+using Travis CI. Check to see if the build succeeded (takes about 5 minutes),
+and if not, try to modify your changes to make it work.
+
+When making changes after creating your pull request, just add commits to
+your topic branch and push them to your GitHub repository.  Don't try to
+rebase or open a new pull request!  We don't mind having a few extra
+commits in the history, and it's helpful to keep all the history together
+in one place.
+
+
+How to Modify h5py
+------------------
+
+This section is a little more involved, and provides tips on how to modify
+h5py.  The h5py package is built in layers.  Starting from the bottom, they
+are:
+
+1. The HDF5 C API (provided by libhdf5)
+2. Auto-generated Cython wrappers for the C API (``api_gen.py``)
+3. Low-level interface, written in Cython, using the wrappers from (2)
+4. High-level interface, written in Python, with things like ``h5py.File``.
+5. Unit test code
+
+Rather than talk about the layers in an abstract way, the parts below are
+guides to adding specific functionality to various parts of h5py.
+Most sections span at least two or three of these layers.
+
+
+Adding a function from the HDF5 C API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is one of the most common contributed changes.  The example below shows
+how one would add the function ``H5Dget_storage_size``,
+which determines the space on disk used by an HDF5 dataset.  This function
+is already partially wrapped in h5py, so you can see how it works.
+
+It's recommended that
+you follow along, if not by actually adding the feature then by at least
+opening the various files as we work through the example.
+
+First, get ahold of
+the function signature; the easiest place for this is at the `online
+HDF5 Reference Manual <http://www.hdfgroup.org/HDF5/doc/RM/RM_H5Front.html>`_.
+Then, add the function's C signature to the file ``api_functions.txt``::
+
+  hsize_t   H5Dget_storage_size(hid_t dset_id)
+
+This particular signature uses types (``hsize_t``, ``hid_t``) which are already
+defined elsewhere.  But if
+the function you're adding needs a struct or enum definition, you can
+add it using Cython code to the file ``api_types_hdf5.pxd``.
+
+The next step is to add a Cython function or method which calls the function
+you added.  The h5py modules follow the naming convention
+of the C API; functions starting with ``H5D`` are wrapped in ``h5d.pyx``.
+
+Opening ``h5d.pyx``, we notice that since this function takes a dataset
+identifier as the first argument, it belongs as a method on the DatasetID
+object.  We write a wrapper method::
+
+    def get_storage_size(self):
+        """ () => LONG storage_size
+
+            Determine the amount of file space required for a dataset.  Note
+            this only counts the space which has actually been allocated; it
+            may even be zero.
+        """
+        return H5Dget_storage_size(self.id)
+
+The first line of the docstring gives the method signature.
+This is necessary because Cython will use a "generic" signature like
+``method(*args, **kwds)`` when the file is compiled.  The h5py documentation
+system will extract the first line and use it as the signature.
+
+Next, we decide whether we want to add access to this function to the 
+high-level interface.  That means users of the top-level ``h5py.Dataset``
+object will be able to see how much space on disk their files use.  The
+high-level interface is implemented in the subpackage ``h5py._hl``, and
+the Dataset object is in module ``dataset.py``.  Opening it up, we add
+a property on the ``Dataset`` object::
+
+    @property
+    def storagesize(self):
+        """ Size (in bytes) of this dataset on disk. """
+        return self.id.get_storage_size()
+
+You'll see that the low-level ``DatasetID`` object is available on the
+high-level ``Dataset`` object as ``obj.id``.  This is true of all the
+high-level objects, like ``File`` and ``Group`` as well.
+
+Finally (and don't skip this step), we write **unit tests** for this feature.
+Since the feature is ultimately exposed at the high-level interface, it's OK
+to write tests for the ``Dataset.storagesize`` property only.  Unit tests for
+the high-level interface are located in the "tests" subfolder, right near
+``dataset.py``.  
+
+It looks like the right file is ``test_dataset.py``. Unit tests are
+implemented as methods on custom ``unittest.UnitTest`` subclasses;
+each new feature should be tested by its own new class.  In the
+``test_dataset`` module, we see there's already a subclass called
+``BaseDataset``, which implements some simple set-up and cleanup methods and
+provides a ``h5py.File`` object as ``obj.f``.  We'll base our test class on
+that::
+
+    class TestStorageSize(BaseDataset):
+
+        """
+            Feature: Dataset.storagesize indicates how much space is used.
+        """
+
+        def test_empty(self):
+            """ Empty datasets take no space on disk """
+            dset = self.f.create_dataset("x", (100,100))
+            self.assertEqual(dset.storagesize, 0)
+
+        def test_data(self):
+            """ Storage size is correct for non-empty datasets """
+            dset = self.f.create_dataset("x", (100,), dtype='uint8')
+            dset[...] = 42
+            self.assertEqual(dset.storagesize, 100)
+
+This set of tests would be adequate to get a pull request approved.  We don't
+test every combination under the sun (different ranks, datasets with more
+than 2**32 elements, datasets with the string "kumquat" in the name...), but
+the basic, commonly encountered set of conditions.
+
+To build and test our changes, we have to do a few things.  First of all, 
+run the file ``api_gen.py`` to re-generate the Cython wrappers from
+``api_functions.txt``::
+
+    $ python api_gen.py
+
+Then build the project, which recompiles ``h5d.pyx``::
+
+    $ python setup.py build
+
+Finally, run the test suite, which includes the two methods we just wrote::
+
+    $ python setup.py test
+
+If the tests pass, the feature is ready for a pull request.
+
+
+Adding a function only available in certain versions of HDF5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+At the moment, h5py must be backwards-compatible all the way back to
+HDF5 1.8.4.  Starting with h5py 2.2.0, it's possible to conditionally
+include functions which only appear in newer versions of HDF5.  It's also
+possible to mark functions which requre Parallel HDF5.  For example, the
+function ``H5Fset_mpi_atomicity`` was introduced in HDF5 1.8.9 and requires
+Parallel HDF5.  Specifiers before the signature in ``api_functions.txt``
+communicate this::
+
+  MPI 1.8.9 herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag)
+
+You can specify either, both or none of "MPI" or a version number in "X.Y.Z"
+format.
+
+In the Cython code, these show up as "preprocessor" defines ``MPI`` and
+``HDF5_VERSION``.  So the low-level implementation (as a method on
+``h5py.h5f.FileID``) looks like this::
+
+    IF MPI and HDF5_VERSION >= (1, 8, 9):
+
+        def set_mpi_atomicity(self, bint atomicity):
+            """ (BOOL atomicity)
+
+            For MPI-IO driver, set to atomic (True), which guarantees sequential 
+            I/O semantics, or non-atomic (False), which improves  performance.
+
+            Default is False.
+
+            Feature requires: 1.8.9 and Parallel HDF5
+            """
+            H5Fset_mpi_atomicity(self.id, <hbool_t>atomicity)
+
+High-level code can check the version of the HDF5 library, or check to see if
+the method is present on ``FileID`` objects.
+
+
diff --git a/docs/faq.rst b/docs/faq.rst
new file mode 100644
index 0000000..58b75fc
--- /dev/null
+++ b/docs/faq.rst
@@ -0,0 +1,209 @@
+.. _faq:
+
+FAQ
+===
+
+
+What datatypes are supported?
+-----------------------------
+
+Below is a complete list of types for which h5py supports reading, writing and
+creating datasets. Each type is mapped to a native NumPy type.
+
+Fully supported types:
+
+=========================           ============================================    ======================
+Type                                Precisions                                      Notes
+=========================           ============================================    ======================
+Integer                             1, 2, 4 or 8 byte, BE/LE, signed/unsigned
+Float                               2, 4, 8, 12, 16 byte, BE/LE
+Complex                             8 or 16 byte, BE/LE                             Stored as HDF5 struct
+Compound                            Arbitrary names and offsets
+Strings (fixed-length)              Any length
+Strings (variable-length)           Any length, ASCII or Unicode
+Opaque (kind 'V')                   Any length
+Boolean                             NumPy 1-byte bool                               Stored as HDF5 enum
+Array                               Any supported type
+Enumeration                         Any NumPy integer type                          Read/write as integers
+References                          Region and object
+=========================           ============================================    ======================
+
+Unsupported types:
+
+=========================           ============================================
+Type                                Status                                 
+=========================           ============================================
+HDF5 VLEN (non-string)              Pull requests welcome
+HDF5 "time" type
+NumPy "U" strings                   No HDF5 equivalent
+NumPy generic "O"                   Not planned
+=========================           ============================================
+
+
+What compression/processing filters are supported?
+--------------------------------------------------
+
+=================================== =========================================== ============================
+Filter                              Function                                    Availability
+=================================== =========================================== ============================
+DEFLATE/GZIP                        Standard HDF5 compression                   All platforms
+SHUFFLE                             Increase compression ratio                  All platforms
+FLETCHER32                          Error detection                             All platforms
+Scale-offset                        Integer/float scaling and truncation        All platforms
+SZIP                                Fast, patented compression for int/float    * UNIX: if supplied with HDF5.
+                                                                                * Windows: read-only
+`LZF <http://alfven.org/lzf>`_      Very fast compression, all types            Ships with h5py, C source
+                                                                                available
+=================================== =========================================== ============================
+
+
+What file drivers are available?
+--------------------------------
+
+A number of different HDF5 "drivers", which provide different modes of access
+to the filesystem, are accessible in h5py via the high-level interface. The 
+currently supported drivers are:
+
+=================================== =========================================== ============================
+Driver                              Purpose                                     Notes
+=================================== =========================================== ============================
+sec2                                Standard optimized driver                   Default on UNIX/Windows
+stdio                               Buffered I/O using stdio.h
+core                                In-memory file (optionally backed to disk)
+family                              Multi-file driver
+mpio                                Parallel HDF5 file access
+=================================== =========================================== ============================
+
+
+What's the difference between h5py and PyTables?
+------------------------------------------------
+
+The two projects have different design goals. PyTables presents a database-like
+approach to data storage, providing features like indexing and fast "in-kernel"
+queries on dataset contents. It also has a custom system to represent data types.
+
+In contrast, h5py is an attempt to map the HDF5 feature set to NumPy as closely
+as possible. For example, the high-level type system uses NumPy dtype objects
+exclusively, and method and attribute naming follows Python and NumPy
+conventions for dictionary and array access (i.e. ".dtype" and ".shape"
+attributes for datasets, ``group[name]`` indexing syntax for groups, etc).
+
+Underneath the "high-level" interface to h5py (i.e. NumPy-array-like objects;
+what you'll typically be using) is a large Cython layer which calls into C.
+This "low-level" interface provides access to nearly all of the HDF5 C API.
+This layer is object-oriented with respect to HDF5 identifiers, supports
+reference counting, automatic translation between NumPy and HDF5 type objects,
+translation between the HDF5 error stack and Python exceptions, and more.
+
+This greatly simplifies the design of the complicated high-level interface, by 
+relying on the "Pythonicity" of the C API wrapping.
+
+There's also a PyTables perspective on this question at the 
+`PyTables FAQ <http://www.pytables.org/moin/FAQ#HowdoesPyTablescomparewiththeh5pyproject.3F>`_.
+
+
+Does h5py support Parallel HDF5?
+--------------------------------
+
+Starting with version 2.2, h5py supports Parallel HDF5 on UNIX platforms.
+``mpi4py`` is required, as well as an MPIO-enabled build of HDF5.
+Check out :ref:`parallel` for details.
+
+
+Variable-length (VLEN) data
+---------------------------
+
+Variable-length byte and unicode strings are supported by h5py. However, generic
+(non-string) VLEN data cannot yet be processed. Please note that since strings
+in HDF5 are encoded as ASCII or UTF-8, NUL bytes are not allowed in strings.
+
+
+Enumerated types
+----------------
+HDF5 enumerated types are supported as. As NumPy has no native enum type, they
+are treated on the Python side as integers with a small amount of metadata
+attached to the dtype.
+
+NumPy object types
+------------------
+Storage of generic objects (NumPy dtype "O") is not implemented and not
+planned to be implemented, as the design goal for h5py is to expose the HDF5
+feature set, not add to it.  However, objects picked to the "plain-text" protocol
+(protocol 0) can be stored in HDF5 as strings.
+
+Appending data to a dataset
+---------------------------
+
+The short response is that h5py is NumPy-like, not database-like. Unlike the
+HDF5 packet-table interface (and PyTables), there is no concept of appending
+rows. Rather, you can expand the shape of the dataset to fit your needs. For
+example, if I have a series of time traces 1024 points long, I can create an 
+extendable dataset to store them:
+
+    >>> dset = myfile.create_dataset("MyDataset", (10, 1024), maxshape=(None, 1024))
+    >>> dset.shape
+    (10,1024)
+
+The keyword argument "maxshape" tells HDF5 that the first dimension of the 
+dataset can be expanded to any size, while the second dimension is limited to a
+maximum size of 1024. We create the dataset with room for an initial ensemble
+of 10 time traces. If we later want to store 10 more time traces, the dataset
+can be expanded along the first axis:
+
+    >>> dset.resize(20, axis=0)   # or dset.resize((20,1024))
+    >>> dset.shape
+    (20, 1024)
+
+Each axis can be resized up to the maximum values in "maxshape". Things to note:
+
+* Unlike NumPy arrays, when you resize a dataset the indices of existing data
+  do not change; each axis grows or shrinks independently
+* The dataset rank (number of dimensions) is fixed when it is created
+
+Unicode
+-------
+As of h5py 2.0.0, Unicode is supported for file names as well as for objects 
+in the file. When object names are read, they are returned as Unicode by default.
+
+However, HDF5 has no predefined datatype to represent fixed-width UTF-16 or
+UTF-32 (NumPy format) strings. Therefore, the NumPy 'U' datatype is not supported.
+
+Development
+-----------
+
+Building from Git
+~~~~~~~~~~~~~~~~~
+
+We moved to GitHub in December of 2012 (http://github.com/h5py/h5py).
+
+We use the following conventions for branches and tags:
+
+* master: integration branch for the next minor (or major) version
+* 2.0, 2.1, 2.2, etc: bugfix branches for released versions
+* tags 2.0.0, 2.0.1, etc: Released bugfix versions
+
+To build from a Git checkout:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Clone the project::
+
+    $ git clone https://github.com/h5py/h5py.git
+    $ cd h5py
+
+(Optional) Choose which branch to build from (e.g. a stable branch)::
+
+    $ git checkout 2.1
+
+Build the project. If given, /path/to/hdf5 should point to a directory
+containing a compiled, shared-library build of HDF5 (containing things like "include" and "lib")::
+
+    $ python setup.py build [--hdf5=/path/to/hdf5]
+
+(Optional) Run the unit tests::
+
+    $ python setup.py test
+
+Report any failing tests to the mailing list (h5py at googlegroups), or by filing a bug report at GitHub.
+
+
+
diff --git a/docs/high/attr.rst b/docs/high/attr.rst
new file mode 100644
index 0000000..98f1fcb
--- /dev/null
+++ b/docs/high/attr.rst
@@ -0,0 +1,122 @@
+.. _attributes:
+
+
+HDF5 Attributes
+===============
+
+Attributes are a critical part of what makes HDF5 a "self-describing"
+format.  They are small named pieces of data attached directly to
+:class:`Group` and :class:`Dataset` objects.  This is the official way to
+store metadata in HDF5.
+
+Each Group or Dataset has a small proxy object attached to it, at
+``<obj>.attrs``.  Attributes have the following properties:
+
+- They may be created from any scalar or NumPy array
+- Each attribute should be small (generally < 64k)
+- There is no partial I/O (i.e. slicing); the entire attribute must be read.
+
+The ``.attrs`` proxy objects are of class :class:`AttributeManager`, below.
+This class supports a dictionary-style interface.
+
+Reference
+---------
+
+.. class:: AttributeManager(parent)
+
+    AttributeManager objects are created directly by h5py.  You should
+    access instances by ``group.attrs`` or ``dataset.attrs``, not by manually
+    creating them.
+
+    .. method:: __iter__()
+
+        Get an iterator over attribute names.
+
+    .. method:: __contains__(name)
+
+        Determine if attribute `name` is attached to this object.
+
+    .. method:: __getitem__(name)
+
+        Retrieve an attribute.
+
+    .. method:: __setitem__(name, val)
+
+        Create an attribute, overwriting any existing attribute.  The type
+        and shape of the attribute are determined automatically by h5py.
+
+    .. method:: __delitem__(name)
+
+        Delete an attribute.  KeyError if it doesn't exist.
+
+    .. method:: keys()
+
+        Get the names of all attributes attached to this object.  On Py2, this
+        is a list.  On Py3, it's a set-like object.
+
+    .. method:: values()
+
+        Get the values of all attributes attached to this object.  On Py2, this
+        is a list.  On Py3, it's a collection or bag-like object.
+
+    .. method:: items()
+
+        Get ``(name, value)`` tuples for all attributes attached to this object.
+        On Py2, this is a list of tuples.  On Py3, it's a collection or
+        set-like object.
+
+    .. method:: iterkeys()
+
+        (Py2 only) Get an iterator over attribute names.
+
+    .. method:: itervalues()
+
+        (Py2 only) Get an iterator over attribute values.
+
+    .. method:: iteritems()
+
+        (Py2 only) Get an iterator over ``(name, value)`` pairs.
+
+    .. method:: get(name, default=None)
+
+        Retrieve `name`, or `default` if no such attribute exists.
+
+    .. method:: create(name, data, shape=None, dtype=None)
+
+        Create a new attribute, with control over the shape and type.  Any
+        existing attribute will be overwritten.
+
+        :param name:    Name of the new attribute
+        :type name:     String
+
+        :param data:    Value of the attribute; will be put through
+                        ``numpy.array(data)``.
+
+        :param shape:   Shape of the attribute.  Overrides ``data.shape`` if
+                        both are given, in which case the total number of
+                        points must be unchanged.
+        :type shape:    Tuple
+
+        :param dtype:   Data type for the attribute.  Overrides ``data.dtype``
+                        if both are given.
+        :type dtype:    NumPy dtype
+
+
+    .. method:: modify(name, value)
+
+        Change the value of an attribute while preserving its type and shape.
+        Unlike :meth:`AttributeManager.__setitem__`, if the attribute already
+        exists, only its value will be changed.  This can be useful for
+        interacting with externally generated files, where the type and shape
+        must not be altered.
+
+        If the attribute doesn't exist, it will be created with a default
+        shape and type.
+
+        :param name:    Name of attribute to modify.
+        :type name:     String
+
+        :param value:   New value.  Will be put through ``numpy.array(value)``.
+
+
+
diff --git a/docs/high/dataset.rst b/docs/high/dataset.rst
new file mode 100644
index 0000000..c5cfc25
--- /dev/null
+++ b/docs/high/dataset.rst
@@ -0,0 +1,445 @@
+.. _dataset:
+
+
+HDF5 Datasets
+=============
+
+Datasets are very similar to NumPy arrays.  They are homogenous collections of
+data elements, with an immutable datatype and (hyper)rectangular shape.
+Unlike NumPy arrays, they support a variety of transparent storage features
+such as compression, error-detection, and chunked I/O.
+
+They are represented in h5py by a thin proxy class which supports familiar
+NumPy operations like slicing, along with a variety of descriptive attributes:
+
+  - **shape** attribute
+  - **size** attribute
+  - **dtype** attribute
+
+
+.. _dataset_create:
+
+Creating datasets
+-----------------
+
+New datasets are created using either :meth:`Group.create_dataset` or
+:meth:`Group.require_dataset`.  Existing datasets should be retrieved using
+the group indexing syntax (``dset = group["name"]``).
+
+To make an empty dataset, all you have to do is specify a name, shape, and
+optionally the data type (defaults to ``'f'``)::
+
+    >>> dset = f.create_dataset("default", (100,))
+    >>> dset = f.create_dataset("ints", (100,), dtype='i8')
+
+You may initialize the dataset to an existing NumPy array::
+
+    >>> arr = np.arange(100)
+    >>> dset = f.create_dataset("init", data=arr)
+
+Keywords ``shape`` and ``dtype`` may be specified along with ``data``; if so,
+they will override ``data.shape`` and ``data.dtype``.  It's required that
+(1) the total number of points in ``shape`` match the total number of points
+in ``data.shape``, and that (2) it's possible to cast ``data.dtype`` to
+the requested ``dtype``.
+
+
+.. _dataset_chunks:
+
+Chunked storage
+---------------
+
+An HDF5 dataset created with the default settings will be `contiguous`; in
+other words, laid out on disk in traditional C order.  Datasets may also be
+created using HDF5's `chunked` storage layout.  This means the dataset is
+divided up into regularly-sized pieces which are stored haphazardly on disk,
+and indexed using a B-tree.
+
+Chunked storage makes it possible to resize datasets, and because the data
+is stored in fixed-size chunks, to use compression filters.
+
+To enable chunked storage, set the keyword ``chunks`` to a tuple indicating
+the chunk shape::
+
+    >>> dset = f.create_dataset("chunked", (1000, 1000), chunks=(100, 100))
+
+Data will be read and written in blocks with shape (100,100); for example,
+the data in ``dset[0:100,0:100]`` will be stored together in the file, as will
+the data points in range ``dset[400:500, 100:200]``.
+
+Chunking has performance implications.  It's recommended to keep the total
+size of your chunks between 10 KiB and 1 MiB, larger for larger datasets.
+Also keep in mind that when any element in a chunk is accessed, the entire
+chunk is read from disk.
+
+Since picking a chunk shape can be confusing, you can have h5py guess a chunk
+shape for you::
+
+    >>> dset = f.create_dataset("autochunk", (1000, 1000), chunks=True)
+
+Auto-chunking is also enabled when using compression or ``maxshape``, etc.,
+if a chunk shape is not manually specified.
+
+
+.. _dataset_resize:
+
+Resizable datasets
+------------------
+
+In HDF5, datasets can be resized once created up to a maximum size,
+by calling :meth:`Dataset.resize`.  You specify this maximum size when creating
+the dataset, via the keyword ``maxshape``::
+
+    >>> dset = f.create_dataset("resizable", (10,10), maxshape=(500, 20))
+
+Any (or all) axes may also be marked as "unlimited", in which case they may 
+be increased up to the HDF5 per-axis limit of 2**64 elements.  Indicate these
+axes using ``None``::
+
+    >>> dset = f.create_dataset("unlimited", (10, 10), maxshape=(None, 10))
+
+.. note:: Resizing an array with existing data works differently than in NumPy; if
+    any axis shrinks, the data in the missing region is discarded.  Data does
+    not "rearrange" itself as it does when resizing a NumPy array.
+
+
+.. _dataset_compression:
+
+Filter pipeline
+---------------
+
+Chunked data may be transformed by the HDF5 `filter pipeline`.  The most
+common use is applying transparent compression.  Data is compressed on the
+way to disk, and automatically decompressed when read.  Once the dataset
+is created with a particular compression filter applied, data may be read
+and written as normal with no special steps required.
+
+Enable compression with the ``compression`` keyword to
+:meth:`Group.create_dataset`::
+
+    >>> dset = f.create_dataset("zipped", (100, 100), compression="gzip")
+
+Options for each filter may be specified with ``compression_opts``::
+
+    >>> dset = f.create_dataset("zipped_max", (100, 100), compression="gzip", compression_opts=9)
+
+Lossless compression filters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+GZIP filter (``"gzip"``)
+    Available with every installation of HDF5, so it's best where portability is
+    required.  Good compression, moderate speed.  ``compression_opts`` sets the
+    compression level and may be an integer from 0 to 9, default is 4.
+
+
+LZF filter (``"lzf"``)
+    Available with every installation of h5py (C source code also available).
+    Low to moderate compression, very fast.  No options.
+
+
+SZIP filter (``"szip"``)
+    Patent-encumbered filter used in the NASA community.  Not available with all
+    installations of HDF5 due to legal reasons.  Consult the HDF5 docs for filter
+    options.
+
+Custom compression filters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to the compression filters listed above, compression filters can be
+dynamically loaded by the underlying HDF5 library. This is done by passing a
+filter number to :meth:`Group.create_dataset` as the ``compression`` parameter.
+The ``compression_opts`` parameter will then be passed to this filter.
+
+.. note:: The underlying implementation of the compression filter will have the
+    ``H5Z_FLAG_OPTIONAL`` flag set. This indicates that if the compression
+    filter doesn't compress a block while writing, no error will be thrown. The
+    filter will then be skipped when subsequently reading the block.
+
+
+.. _dataset_scaleoffset:
+
+Scale-Offset filter
+~~~~~~~~~~~~~~~~~~~
+
+Filters enabled with the ``compression`` keywords are _lossless_; what comes
+out of the dataset is exactly what you put in.  HDF5 also includes a lossy
+filter which trades precision for storage space.  
+
+Works with integer and floating-point data only.  Enable the scale-offset
+filter by setting :meth:`Group.create_dataset` keyword ``scaleoffset`` to an
+integer.  
+
+For integer data, this specifies the number of bits to retain.  Set to 0 to have
+HDF5 automatically compute the number of bits required for lossless compression
+of the chunk.  For floating-point data, indicates the number of digits after
+the decimal point to retain.
+
+
+.. _dataset_shuffle:
+
+Shuffle filter
+~~~~~~~~~~~~~~
+
+Block-oriented compressors like GZIP or LZF work better when presented with
+runs of similar values.  Enabling the shuffle filter rearranges the bytes in
+the chunk and may improve compression ratio.  No significant speed penalty,
+lossless.
+
+Enable by setting :meth:`Group.create_dataset` keyword ``shuffle`` to True.
+
+
+.. _dataset_fletcher32:
+
+Fletcher32 filter
+~~~~~~~~~~~~~~~~~
+
+Adds a checksum to each chunk to detect data corruption.  Attempts to read
+corrupted chunks will fail with an error.  No significant speed penalty.
+Obviously shouldn't be used with lossy compression filters.
+
+Enable by setting :meth:`Group.create_dataset` keyword ``fletcher32`` to True.
+
+
+.. _dataset_slicing:
+
+Reading & writing data
+----------------------
+
+HDF5 datasets re-use the NumPy slicing syntax to read and write to the file.
+Slice specifications are translated directly to HDF5 "hyperslab"
+selections, and are a fast and efficient way to access data in the file. The
+following slicing arguments are recognized:
+
+    * Indices: anything that can be converted to a Python long
+    * Slices (i.e. ``[:]`` or ``[0:10]``)
+    * Field names, in the case of compound data
+    * At most one ``Ellipsis`` (``...``) object
+
+Here are a few examples (output omitted)
+
+    >>> dset = f.create_dataset("MyDataset", (10,10,10), 'f')
+    >>> dset[0,0,0]
+    >>> dset[0,2:10,1:9:3]
+    >>> dset[:,::2,5]
+    >>> dset[0]
+    >>> dset[1,5]
+    >>> dset[0,...]
+    >>> dset[...,6]
+
+For compound data, you can specify multiple field names alongside the
+numeric slices:
+
+    >>> dset["FieldA"]
+    >>> dset[0,:,4:5, "FieldA", "FieldB"]
+    >>> dset[0, ..., "FieldC"]
+
+To retrieve the contents of a `scalar` dataset, you can use the same
+syntax as in NumPy:  ``result = dset[()]``.  In other words, index into
+the dataset using an empty tuple.
+
+For simple slicing, broadcasting is supported:
+
+    >>> dset[0,:,:] = np.arange(10)  # Broadcasts to (10,10)
+
+Broadcasting is implemented using repeated hyperslab selections, and is
+safe to use with very large target selections.  It is supported for the above
+"simple" (integer, slice and ellipsis) slicing only.
+
+
+.. _dataset_fancy:
+
+Fancy indexing
+--------------
+
+A subset of the NumPy fancy-indexing syntax is supported.  Use this with
+caution, as the underlying HDF5 mechanisms may have different performance
+than you expect.
+
+For any axis, you can provide an explicit list of points you want; for a
+dataset with shape (10, 10)::
+
+    >>> dset.shape
+    (10, 10)
+    >>> result = dset[0, [1,3,8]]
+    >>> result.shape
+    (3,)
+    >>> result = dset[1:6, [5,8,9]]
+    >>> result.shape
+    (5, 3)
+
+The following restrictions exist:
+
+* List selections may not be empty
+* Selection coordinates must be given in increasing order
+* Duplicate selections are ignored
+* Very long lists (> 1000 elements) may produce poor performance
+
+NumPy boolean "mask" arrays can also be used to specify a selection.  The
+result of this operation is a 1-D array with elements arranged in the
+standard NumPy (C-style) order.  Behind the scenes, this generates a laundry
+list of points to select, so be careful when using it with large masks::
+
+    >>> arr = numpy.arange(100).reshape((10,10))
+    >>> dset = f.create_dataset("MyDataset", data=arr)
+    >>> result = dset[arr > 50]
+    >>> result.shape
+    (49,)
+
+
+.. _dataset_iter:
+
+Length and iteration
+--------------------
+
+As with NumPy arrays, the ``len()`` of a dataset is the length of the first
+axis, and iterating over a dataset iterates over the first axis.  However,
+modifications to the yielded data are not recorded in the file.  Resizing a
+dataset while iterating has undefined results.
+
+On 32-bit platforms, ``len(dataset)`` will fail if the first axis is bigger
+than 2**32. It's recommended to use :meth:`Dataset.len` for large datasets.
+
+
+Reference
+---------
+
+.. class:: Dataset(identifier)
+
+    Dataset objects are typically created via :meth:`Group.create_dataset`,
+    or by retrieving existing datasets from a file.  Call this constructor to
+    create a new Dataset bound to an existing 
+    :class:`DatasetID <low:h5py.h5d.DatasetID>` identifier.
+
+    .. method:: __getitem__(args)
+
+        NumPy-style slicing to retrieve data.  See :ref:`dataset_slicing`.
+
+    .. method:: __setitem__(args)
+
+        NumPy-style slicing to write data.  See :ref:`dataset_slicing`.
+
+    .. method:: read_direct(array, source_sel=None, dest_sel=None)
+
+        Read from an HDF5 dataset directly into a NumPy array, which can
+        avoid making an intermediate copy as happens with slicing. The
+        destination array must be C-contiguous and writable, and must have
+        a datatype to which the source data may be cast.  Data type conversion
+        will be carried out on the fly by HDF5.
+
+        `source_sel` and `dest_sel` indicate the range of points in the
+        dataset and destination array respectively.  Use the output of
+        ``numpy.s_[args]``::
+
+            >>> dset = f.create_dataset("dset", (100,), dtype='int64')
+            >>> arr = np.zeros((100,), dtype='int32')
+            >>> dset.read_direct(arr, np.s_[0:10], np.s_[50:60])
+
+    .. method:: astype(dtype)
+
+        Return a context manager allowing you to read data as a particular
+        type.  Conversion is handled by HDF5 directly, on the fly::
+
+            >>> dset = f.create_dataset("bigint", (1000,), dtype='int64')
+            >>> with dset.astype('int16'):
+            ...     out = dset[:]
+            >>> out.dtype
+            dtype('int16')
+
+    .. method:: resize(size, axis=None)
+
+        Change the shape of a dataset.  `size` may be a tuple giving the new
+        dataset shape, or an integer giving the new length of the specified
+        `axis`.
+
+        Datasets may be resized only up to :attr:`Dataset.maxshape`.
+
+    .. method:: len()
+        
+        Return the size of the first axis.
+
+    .. attribute:: shape
+
+        NumPy-style shape tuple giving dataset dimensions.
+
+    .. attribute:: dtype
+
+        NumPy dtype object giving the dataset's type.
+
+    .. attribute:: size
+
+        Integer giving the total number of elements in the dataset.
+
+    .. attribute:: maxshape
+
+        NumPy-style shape tuple indicating the maxiumum dimensions up to which
+        the dataset may be resized.  Axes with ``None`` are unlimited.
+
+    .. attribute:: chunks
+
+        Tuple giving the chunk shape, or None if chunked storage is not used.
+        See :ref:`dataset_chunks`.
+
+    .. attribute:: compression
+
+        String with the currently applied compression filter, or None if
+        compression is not enabled for this dataset.  See :ref:`dataset_compression`.
+
+    .. attribute:: compression_opts
+
+        Options for the compression filter.  See :ref:`dataset_compression`.
+
+    .. attribute:: scaleoffset
+
+        Setting for the HDF5 scale-offset filter (integer), or None if
+        scale-offset compression is not used for this dataset.
+        See :ref:`dataset_scaleoffset`.
+
+    .. attribute:: shuffle
+
+        Whether the shuffle filter is applied (T/F).  See :ref:`dataset_shuffle`.
+
+    .. attribute:: fletcher32
+
+        Whether Fletcher32 checksumming is enabled (T/F).  See :ref:`dataset_fletcher32`.
+
+    .. attribute:: fillvalue
+
+        Value used when reading uninitialized portions of the dataset, or None
+        if no fill value has been defined, in which case HDF5 will use a
+        type-appropriate default value.  Can't be changed after the dataset is
+        created.
+
+    .. attribute:: dims
+
+        Access to :ref:`dimension_scales`.
+
+    .. attribute:: attrs
+
+        :ref:`attributes` for this dataset.
+
+    .. attribute:: id
+
+        The dataset's low-level identifer; an instance of
+        :class:`DatasetID <low:h5py.h5d.DatasetID>`.
+
+    .. attribute:: ref
+
+        An HDF5 object reference pointing to this dataset.  See
+        :ref:`refs_object`.
+
+    .. attribute:: regionref
+
+        Proxy object for creating HDF5 region references.  See
+        :ref:`refs_region`.
+
+    .. attribute:: name
+
+        String giving the full path to this dataset.
+
+    .. attribute:: file
+
+        :class:`File` instance in which this dataset resides
+
+    .. attribute:: parent
+
+        :class:`Group` instance containing this dataset.
diff --git a/docs/high/dims.rst b/docs/high/dims.rst
new file mode 100644
index 0000000..37cbc5d
--- /dev/null
+++ b/docs/high/dims.rst
@@ -0,0 +1,92 @@
+.. _dimension_scales:
+
+HDF5 Dimension Scales
+=====================
+
+Datasets are multidimensional arrays. HDF5 provides support for labeling the
+dimensions and associating one or "dimension scales" with each dimension. A
+dimension scale is simply another HDF5 dataset. In principle, the length of the
+multidimensional array along the dimension of interest should be equal to the
+length of the dimension scale, but HDF5 does not enforce this property.
+
+The HDF5 library provides the H5DS API for working with dimension scales. H5py
+provides low-level bindings to this API in :mod:`h5py.h5ds`. These low-level
+bindings are in turn used to provide a high-level interface through the
+``Dataset.dims`` property. Suppose we have the following data file::
+
+    f = File('foo.h5', 'w')
+    f['data'] = np.ones((4, 3, 2), 'f')
+
+HDF5 allows the dimensions of ``data`` to be labeled, for example::
+
+    f['data'].dims[0].label = 'z'
+    f['data'].dims[2].label = 'x'
+
+Note that the first dimension, which has a length of 4, has been labeled "z",
+the third dimension (in this case the fastest varying dimension), has been
+labeled "x", and the second dimension was given no label at all.
+
+We can also use HDF5 datasets as dimension scales. For example, if we have::
+
+    f['x1'] = [1, 2]
+    f['x2'] = [1, 1.1]
+    f['y1'] = [0, 1, 2]
+    f['z1'] = [0, 1, 4, 9]
+
+We are going to treat the ``x1``, ``x2``, ``y1``, and ``z1`` datasets as
+dimension scales::
+
+    f['data'].dims.create_scale(f['x1'])
+    f['data'].dims.create_scale(f['x2'], 'x2 name')
+    f['data'].dims.create_scale(f['y1'], 'y1 name')
+    f['data'].dims.create_scale(f['z1'], 'z1 name')
+
+When you create a dimension scale, you may provide a name for that scale. In
+this case, the ``x1`` scale was not given a name, but the others were. Now we
+can associate these dimension scales with the primary dataset::
+
+    f['data'].dims[0].attach_scale(f['z1'])
+    f['data'].dims[1].attach_scale(f['y1'])
+    f['data'].dims[2].attach_scale(f['x1'])
+    f['data'].dims[2].attach_scale(f['x2'])
+
+Note that two dimension scales were associated with the third dimension of
+``data``. You can also detach a dimension scale::
+
+    f['data'].dims[2].detach_scale(f['x2'])
+
+but for now, lets assume that we have both ``x1`` and ``x2`` still associated
+with the third dimension of ``data``. You can attach a dimension scale to any
+number of HDF5 datasets, you can even attach it to multiple dimensions of a
+single HDF5 dataset.
+
+Now that the dimensions of ``data`` have been labeled, and the dimension scales
+for the various axes have been specified, we have provided much more context
+with which ``data`` can be interpreted. For example, if you want to know the
+labels for the various dimensions of ``data``::
+
+    >>> [dim.label for dim in f['data'].dims]
+    ['z', '', 'x']
+
+If you want the names of the dimension scales associated with the "x" axis::
+
+    >>> f['data'].dims[2].keys()
+    ['', 'x2 name']
+
+:meth:`items` and :meth:`values` methods are also provided. The dimension
+scales themselves can also be accessed with::
+
+    f['data'].dims[2][1]
+
+or::
+
+    f['data'].dims[2]['x2 name']
+
+such that::
+
+    >>> f['data'].dims[2][1] == f['x2']
+    True
+
+though, beware that if you attempt to index the dimension scales with a string,
+the first dimension scale whose name matches the string is the one that will be
+returned. There is no guarantee that the name of the dimension scale is unique.
diff --git a/docs/high/file.rst b/docs/high/file.rst
new file mode 100644
index 0000000..81b35ba
--- /dev/null
+++ b/docs/high/file.rst
@@ -0,0 +1,183 @@
+.. _file:
+
+
+HDF5 File Objects
+=================
+
+File objects serve as your entry point into the world of HDF5.  In addition
+to the File-specific capabilities listed here, every File instance is
+also an :ref:`HDF5 group <group>` representing the `root group` of the file.
+
+.. _file_open:
+
+Opening & creating files
+------------------------
+
+HDF5 files work generally like standard Python file objects.  They support
+standard modes like r/w/a, and should be closed when they are no longer in
+use.  However, there is obviously no concept of "text" vs "binary" mode.
+
+    >>> f = h5py.File('myfile.hdf5','r')
+
+The file name may be a byte string or unicode string. Valid modes are:
+
+    ========  ================================================
+     r        Readonly, file must exist
+     r+       Read/write, file must exist
+     w        Create file, truncate if exists
+     w- or x  Create file, fail if exists
+     a        Read/write if exists, create otherwise (default)
+    ========  ================================================
+
+
+.. _file_driver:
+
+File drivers
+------------
+
+HDF5 ships with a variety of different low-level drivers, which map the logical
+HDF5 address space to different storage mechanisms.  You can specify which
+driver you want to use when the file is opened::
+
+    >>> f = h5py.File('myfile.hdf5', driver=<driver name>, <driver_kwds>)
+
+For example, the HDF5 "core" driver can be used to create a purely in-memory
+HDF5 file, optionally written out to disk when it is closed.  Here's a list
+of supported drivers and their options:
+
+    None
+        **Strongly recommended.** Use the standard HDF5 driver appropriate
+        for the current platform. On UNIX, this is the H5FD_SEC2 driver;
+        on Windows, it is H5FD_WINDOWS.
+
+    'sec2'
+        Unbuffered, optimized I/O using standard POSIX functions.
+
+    'stdio' 
+        Buffered I/O using functions from stdio.h.
+
+    'core'
+        Memory-map the entire file; all operations are performed in
+        memory and written back out when the file is closed.  Keywords:
+
+        backing_store:  If True (default), save changes to a real file
+                        when closing.  If False, the file exists purely
+                        in memory and is discarded when closed.
+
+        block_size:     Increment (in bytes) by which memory is extended.
+                        Default is 64k.
+
+    'family'
+        Store the file on disk as a series of fixed-length chunks.  Useful
+        if the file system doesn't allow large files.  Note: the filename
+        you provide *must* contain a printf-style integer format code
+        (e.g. %d"), which will be replaced by the file sequence number.
+        Keywords:
+
+        memb_size:  Maximum file size (default is 2**31-1).
+
+
+.. _file_version:
+
+Version Bounding
+----------------
+
+HDF5 has been evolving for many years now.  By default, the library will
+write objects in the most compatible fashion possible, so that older versions
+will still be able to read files generated by modern programs.  However, there
+can be performance advantages if you are willing to forgo a certain level
+of backwards compatibility.  By using the "libver" option to File, you can
+specify the minimum and maximum sophistication of these structures:
+
+    >>> f = h5py.File('name.hdf5', libver='earliest') # most compatible
+    >>> f = h5py.File('name.hdf5', libver='latest')   # most modern
+
+Here "latest" means that HDF5 will always use the newest version of these
+structures without particular concern for backwards compatibility.  The
+"earliest" option means that HDF5 will make a *best effort* to be backwards
+compatible.
+
+The default is "earliest".
+
+
+.. _file_userblock:
+
+User block
+----------
+
+HDF5 allows the user to insert arbitrary data at the beginning of the file,
+in a reserved space called the `user block`.  The length of the user block
+must be specified when the file is created.  It can be either zero
+(the default) or a power of two greater than or equal to 512.  You
+can specify the size of the user block when creating a new file, via the
+``userblock_size`` keyword to File; the userblock size of an open file can
+likewise be queried through the ``File.userblock_size`` property.
+
+Modifying the user block on an open file is not supported; this is a limitation
+of the HDF5 library.  However, once the file is closed you are free to read and
+write data at the start of the file, provided your modifications don't leave
+the user block region.
+
+Reference
+---------
+
+.. note::
+    
+    Unlike Python file objects, the attribute ``File.name`` gives the
+    HDF5 name of the root group, "``/``". To access the on-disk name, use
+    :attr:`File.filename`.
+
+.. class:: File(name, mode=None, driver=None, libver=None, userblock_size, **kwds)
+
+    Open or create a new file.
+
+    Note that in addition to the File-specific methods and properties listed
+    below, File objects inherit the full interface of :class:`Group`.
+
+    :param name:    Name of file (`str` or `unicode`), or an instance of
+                    :class:`h5f.FileID` to bind to an existing
+                    file identifier.
+    :param mode:    Mode in which to open file; one of
+                    ("w", "r", "r+", "a", "w-").  See :ref:`file_open`.
+    :param driver:  File driver to use; see :ref:`file_driver`.
+    :param libver:  Compatibility bounds; see :ref:`file_version`.
+    :param userblock_size:  Size (in bytes) of the user block.  If nonzero,
+                    must be a power of 2 and at least 512.  See
+                    :ref:`file_userblock`.
+    :param kwds:    Driver-specific keywords; see :ref:`file_driver`.
+
+    .. method:: close()
+
+        Close this file.  All open objects will become invalid.
+
+    .. method:: flush()
+
+        Request that the HDF5 library flush its buffers to disk.
+
+    .. attribute:: id
+
+        Low-level identifier (an instance of :class:`FileID <low:h5py.h5f.FileID>`).
+
+    .. attribute:: filename
+
+        Name of this file on disk.  Generally a Unicode string; a byte string
+        will be used if HDF5 returns a non-UTF-8 encoded string.
+
+    .. attribute:: mode
+
+        String indicating if the file is open readonly ("r") or read-write
+        ("r+").  Will always be one of these two values, regardless of the
+        mode used to open the file.
+
+    .. attribute:: driver
+
+        String giving the driver used to open the file.  Refer to
+        :ref:`file_driver` for a list of drivers.
+
+    .. attribute:: libver
+
+        2-tuple with library version settings.  See :ref:`file_version`.
+
+    .. attribute:: userblock_size
+
+        Size of user block (in bytes).  Generally 0.  See :ref:`file_userblock`.
diff --git a/docs/high/group.rst b/docs/high/group.rst
new file mode 100644
index 0000000..08694a0
--- /dev/null
+++ b/docs/high/group.rst
@@ -0,0 +1,436 @@
+.. _group:
+
+
+HDF5 Groups
+===========
+
+
+Groups are the container mechanism by which HDF5 files are organized.  From
+a Python perspective, they operate somewhat like dictionaries.  In this case
+the "keys" are the names of group members, and the "values" are the members
+themselves (:class:`Group` and :class:`Dataset`) objects.
+
+Group objects also contain most of the machinery which makes HDF5 useful.
+The :ref:`File object <file>` does double duty as the HDF5 *root group*, and
+serves as your entry point into the file:
+
+    >>> f = h5py.File('foo.hdf5','w')
+    >>> f.name
+    u'/'
+    >>> f.keys()
+    []
+
+Names of all objects in the file are all text strings (``unicode`` on
+Py2, ``str`` on Py3).  These will be encoded with the HDF5-approved UTF-8
+encoding before being passed to the HDF5 C library.  Objects may also be
+retrieved using byte strings, which will be passed on to HDF5 as-is.
+
+
+.. _group_create:
+
+Creating groups
+---------------
+
+New groups are easy to create::
+
+    >>> grp = f.create_group("bar")
+    >>> grp.name
+    '/bar'
+    >>> subgrp = grp.create_group("baz")
+    >>> subgrp.name
+    '/bar/baz'
+
+Multiple intermediate groups can also be created implicitly::
+
+    >>> grp2 = f.create_group("/some/long/path")
+    >>> grp2.name
+    '/some/long/path'
+    >>> grp3 = f['/some/long']
+    >>> grp3.name
+    '/some/long'
+
+
+.. _group_links:
+
+Dict interface and links
+------------------------
+
+Groups implement a subset of the Python dictionary convention.  They have
+methods like ``keys()``, ``values()`` and support iteration.  Most importantly,
+they support the indexing syntax, and standard exceptions:
+
+    >>> myds = subgrp["MyDS"]
+    >>> missing = subgrp["missing"]
+    KeyError: "Name doesn't exist (Symbol table: Object not found)"
+
+Objects can be deleted from the file using the standard syntax::
+
+    >>> del subgroup["MyDataset"]
+
+.. note::
+    When using h5py from Python 3, the keys(), values() and items() methods
+    will return view-like objects instead of lists.  These objects support
+    containership testing and iteration, but can't be sliced like lists.
+
+
+.. _group_hardlinks:
+
+Hard links
+~~~~~~~~~~
+
+What happens when assigning an object to a name in the group?  It depends on
+the type of object being assigned.  For NumPy arrays or other data, the default
+is to create an :ref:`HDF5 datasets <dataset>`::
+
+    >>> grp["name"] = 42
+    >>> out = grp["name"]
+    >>> out
+    <HDF5 dataset "name": shape (), type "<i8">
+    
+When the object being stored is an existing Group or Dataset, a new link is
+made to the object::
+
+    >>> grp["other name"] = out
+    >>> grp["other name"]
+    <HDF5 dataset "other name": shape (), type "<i8">
+
+Note that this is `not` a copy of the dataset!  Like hard links in a UNIX file
+system, objects in an HDF5 file can be stored in multiple groups::
+
+    >>> f["other name"] == f["name"]
+    True
+
+
+.. _group_softlinks:
+
+Soft links
+~~~~~~~~~~
+
+Also like a UNIX filesystem, HDF5 groups can contain "soft" or symbolic links,
+which contain a text path instead of a pointer to the object itself.  You
+can easily create these in h5py by using ``h5py.SoftLink``::
+
+    >>> myfile = h5py.File('foo.hdf5','w')
+    >>> group = myfile.create_group("somegroup")
+    >>> myfile["alias"] = h5py.SoftLink('/somegroup')
+
+If the target is removed, they will "dangle":
+
+    >>> del myfile['somegroup']
+    >>> print myfile['alias']
+    KeyError: 'Component not found (Symbol table: Object not found)'
+
+
+.. _group_extlinks:
+
+External links
+~~~~~~~~~~~~~~
+
+New in HDF5 1.8, external links are "soft links plus", which allow you to
+specify the name of the file as well as the path to the desired object.  You
+can refer to objects in any file you wish.  Use similar syntax as for soft
+links:
+
+    >>> myfile = h5py.File('foo.hdf5','w')
+    >>> myfile['ext link'] = h5py.ExternalLink("otherfile.hdf5", "/path/to/resource")
+
+When the link is accessed, the file "otherfile.hdf5" is opened, and object at
+"/path/to/resource" is returned.
+
+Since the object retrieved is in a different file, its ".file" and ".parent"
+properties will refer to objects in that file, *not* the file in which the
+link resides.
+
+.. note::
+
+    Currently, you can't access an external link if the file it points to is
+    already open.  This is related to how HDF5 manages file permissions
+    internally.
+
+
+Reference
+---------
+
+.. class:: Group(identifier)
+
+    Generally Group objects are created by opening objects in the file, or
+    by the method :meth:`Group.create_group`.  Call the constructor with
+    a :class:`GroupID <low:h5py.h5g.GroupID>` instance to create a new Group
+    bound to an existing low-level identifier.
+
+    .. method:: __iter__()
+
+        Iterate over the names of objects directly attached to the group.
+        Use :meth:`Group.visit` or :meth:`Group.visititems` for recursive
+        access to group members.
+
+    .. method:: __contains__(name)
+
+        Dict-like containership testing.  `name` may be a relative or absolute
+        path.
+
+    .. method:: __getitem__(name)
+
+        Retrieve an object.  `name` may be a relative or absolute path, or
+        an :ref:`object or region reference <refs>`. See :ref:`group_links`.
+
+    .. method:: __setitem__(name, value)
+
+        Create a new link, or automatically create a dataset.
+        See :ref:`group_links`.
+
+    .. method:: keys()
+
+        Get the names of directly attached group members.  On Py2, this is
+        a list.  On Py3, it's a set-like object.
+        Use :meth:`Group.visit` or :meth:`Group.visititems` for recursive
+        access to group members.
+
+    .. method:: values()
+
+        Get the objects contained in the group (Group and Dataset instances).
+        Broken soft or external links show up as None.  On Py2, this is a list.
+        On Py3, it's a collection or bag-like object.
+
+    .. method:: items()
+
+        Get ``(name, value)`` pairs for object directly attached to this group.
+        Values for broken soft or external links show up as None.  On Py2,
+        this is a list.  On Py3, it's a set-like object.
+
+    .. method:: iterkeys()
+
+        (Py2 only) Get an iterator over key names.  Exactly equivalent to
+        ``iter(group)``.
+        Use :meth:`Group.visit` or :meth:`Group.visititems` for recursive
+        access to group members.
+
+    .. method:: itervalues()
+
+        (Py2 only) Get an iterator over objects attached to the group.
+        Broken soft and external links will show up as ``None``.
+
+    .. method:: iteritems()
+
+        (Py2 only) Get an iterator over ``(name, value)`` pairs for objects
+        directly attached to the group.  Broken soft and external link values
+        show up as ``None``.
+
+    .. method:: get(name, default=None, getclass=False, getlink=False)
+
+        Retrieve an item, or information about an item.  `name` and `default`
+        work like the standard Python ``dict.get``.
+
+        :param name:    Name of the object to retrieve.  May be a relative or
+                        absolute path.
+        :param default: If the object isn't found, return this instead.
+        :param getclass:    If True, return the class of object instead;
+                            :class:`Group` or :class:`Dataset`.
+        :param getlink: If true, return the type of link via a :class:`HardLink`,
+                        :class:`SoftLink` or :class:`ExternalLink` instance.
+                        If ``getclass`` is also True, returns the corresponding
+                        Link class without instantiating it.
+
+
+    .. method:: visit(callable)
+
+        Recursively visit all objects in this group and subgroups.  You supply
+        a callable with the signature::
+
+            callable(name) -> None or return value
+
+        `name` will be the name of the object relative to the current group.
+        Return None to continue visiting until all objects are exhausted.
+        Returning anything else will immediately stop visiting and return
+        that value from ``visit``::
+
+            >>> def find_foo(name):
+            ...     """ Find first object with 'foo' anywhere in the name """
+            ...     if 'foo' in name:
+            ...         return name
+            >>> group.visit(find_foo)
+            u'some/subgroup/foo'
+
+
+    .. method:: visititems(callable)
+
+        Recursively visit all objects in this group and subgroups.  Like
+        :meth:`Group.visit`, except your callable should have the signature::
+
+            callable(name, object) -> None or return value
+
+        In this case `object` will be a :class:`Group` or :class:`Dataset`
+        instance.
+
+
+    .. method:: move(source, dest)
+
+        Move an object or link in the file.  If `source` is a hard link, this
+        effectively renames the object.  If a soft or external link, the
+        link itself is moved.
+
+        :param source:  Name of object or link to move.
+        :type source:   String
+        :param dest:    New location for object or link.
+        :type dest:   String
+
+
+    .. method:: copy(source, dest, name=None, shallow=False, expand_soft=False, expand_external=False, expand_refs=False, without_attrs=False)
+
+        Copy an object or group.  The source and destination need not be in
+        the same file.  If the source is a Group object, by default all objects
+        within that group will be copied recursively.
+
+        :param source:  What to copy.  May be a path in the file or a Group/Dataset object.
+        :param dest:    Where to copy it.  May be a path or Group object.
+        :param name:    If the destination is a Group object, use this for the
+                        name of the copied object (default is basename).
+        :param shallow: Only copy immediate members of a group.
+        :param expand_soft: Expand soft links into new objects.
+        :param expand_external: Expand external links into new objects.
+        :param expand_refs: Copy objects which are pointed to by references.
+        :param without_attrs:   Copy object(s) without copying HDF5 attributes.
+
+
+    .. method:: create_group(name)
+
+        Create and return a new group in the file.
+
+        :param name:    Name of group to create.  May be an absolute
+                        or relative path.  Provide None to create an anonymous
+                        group, to be linked into the file later.
+        :type name:     String or None
+
+        :return:        The new :class:`Group` object.
+
+
+    .. method:: require_group(name)
+
+        Open a group in the file, creating it if it doesn't exist.
+        TypeError is raised if a conflicting object already exists.
+        Parameters as in :meth:`Group.create_group`.
+
+
+    .. method:: create_dataset(name, shape=None, dtype=None, data=None, **kwds)
+
+        Create a new dataset.  Options are explained in :ref:`dataset_create`.
+
+        :param name:    Name of dataset to create.  May be an absolute
+                        or relative path.  Provide None to create an anonymous
+                        dataset, to be linked into the file later.
+
+        :param shape:   Shape of new dataset (Tuple).
+
+        :param dtype:   Data type for new dataset
+
+        :param data:    Initialize dataset to this (NumPy array).
+
+        :keyword chunks:    Chunk shape, or True to enable auto-chunking.
+
+        :keyword maxshape:  Dataset will be resizable up to this shape (Tuple).
+                            Automatically enables chunking.  Use None for the
+                            axes you want to be unlimited.
+
+        :keyword compression:   Compression strategy.  See :ref:`dataset_compression`.
+
+        :keyword compression_opts:  Parameters for compression filter.
+
+        :keyword scaleoffset:   See :ref:`dataset_scaleoffset`.
+
+        :keyword shuffle:   Enable shuffle filter (T/**F**).  See :ref:`dataset_shuffle`.
+
+        :keyword fletcher32: Enable Fletcher32 checksum (T/**F**).  See :ref:`dataset_fletcher32`.
+
+        :keyword fillvalue: This value will be used when reading
+                            uninitialized parts of the dataset.
+
+        :keyword track_times:   Enable dataset creation timestamps (**T**/F).
+
+
+    .. method:: require_dataset(name, shape=None, dtype=None, exact=None, **kwds)
+
+        Open a dataset, creating it if it doesn't exist.
+
+        If keyword "exact" is False (default), an existing dataset must have
+        the same shape and a conversion-compatible dtype to be returned.  If
+        True, the shape and dtype must match exactly.
+
+        Other dataset keywords (see create_dataset) may be provided, but are
+        only used if a new dataset is to be created.
+
+        Raises TypeError if an incompatible object already exists, or if the
+        shape or dtype don't match according to the above rules.
+
+        :keyword exact:     Require shape and type to match exactly (T/**F**)
+
+    .. attribute:: attrs
+
+        :ref:`attributes` for this group.
+
+    .. attribute:: id
+
+        The groups's low-level identifer; an instance of
+        :class:`GroupID <low:h5py.h5g.GroupID>`.
+
+    .. attribute:: ref
+
+        An HDF5 object reference pointing to this group.  See
+        :ref:`refs_object`.
+
+    .. attribute:: regionref
+
+        A proxy object allowing you to interrogate region references.
+        See :ref:`refs_region`.
+
+    .. attribute:: name
+
+        String giving the full path to this group.
+
+    .. attribute:: file
+
+        :class:`File` instance in which this group resides.
+
+    .. attribute:: parent
+
+        :class:`Group` instance containing this group.
+
+
+Link classes
+------------
+
+.. class:: HardLink()
+
+    Exists only to support :meth:`Group.get`.  Has no state and provides no
+    properties or methods.
+
+.. class:: SoftLink(path)
+
+    Exists to allow creation of soft links in the file.
+    See :ref:`group_softlinks`.  These only serve as containers for a path;
+    they are not related in any way to a particular file.
+
+    :param path:    Value of the soft link.
+    :type path:     String
+
+    .. attribute:: path
+
+        Value of the soft link
+
+.. class:: ExternalLink(filename, path)
+
+    Like :class:`SoftLink`, only they specify a filename in addition to a
+    path.  See :ref:`group_extlinks`.
+
+    :param filename:    Name of the file to which the link points
+    :type filename:     String
+    
+    :param path:        Path to the object in the external file.
+    :type path:         String
+
+    .. attribute:: filename
+
+        Name of the external file
+
+    .. attribute::  path
+
+        Path to the object in the external file
\ No newline at end of file
diff --git a/docs/high/index.rst b/docs/high/index.rst
new file mode 100644
index 0000000..f209dcc
--- /dev/null
+++ b/docs/high/index.rst
@@ -0,0 +1,16 @@
+
+High-Level Reference
+====================
+
+The "high-level interface" (as distinct from the large, C-like API that talks
+directly to HDF5) is how most users will interact with h5py.  It consists of
+a small number of classes which represent the main HDF5 abstractions like
+file, groups, and datasets.
+
+.. toctree::
+
+    file
+    group
+    dataset
+    attr
+    dims
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..a7ae27e
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,93 @@
+HDF5 for Python
+===============
+
+The h5py package is a Pythonic interface to the HDF5 binary data format.
+
+`HDF5 <http://hdfgroup.org>`_ is an open-source library and file format for 
+storing large amounts of numerical data, originally developed at NCSA.  It is 
+widely used in the scientific community for everything from NASA's Earth
+Observing System to the storage of data from laboratory experiments and 
+simulations.  Over the past few years, HDF5 has rapidly emerged as the de-facto
+standard  technology in Python to store large numerical datasets.
+
+This is the reference documentation for the h5py package.  Check out
+the :ref:`quick` if you're new to h5py and HDF5.
+
+The lead author of h5py, Andrew Collette, also wrote
+`an O'Reilly book <http://shop.oreilly.com/product/0636920030249.do>`_
+which provides a comprehensive, example-based introduction to using Python
+and HDF5 together.
+
+Getting h5py
+------------
+
+Downloads are at http://www.h5py.org.  It can be tricky to install all the
+C library dependencies for h5py, so check out the :ref:`install guide <install>`
+first.
+
+
+Getting help
+-------------
+
+Tutorial and reference documentation is available here at http://docs.h5py.org.
+We also have a mailing list `at Google Groups <http://groups.google.com/d/forum/h5py>`_.
+Anyone is welcome to post; the list is read by both users and the core developers
+of h5py.
+
+
+Introductory info
+-----------------
+
+.. toctree::
+    :maxdepth: 2
+
+    quick
+    build
+
+
+High-level API reference
+------------------------
+
+.. toctree::
+    :maxdepth: 2
+
+    high/file
+    high/group
+    high/dataset
+    high/attr
+    high/dims
+
+
+Advanced topics
+---------------
+
+.. toctree::
+    :maxdepth: 2
+    
+    config
+    special
+    strings
+    refs
+    mpi
+    swmr
+
+
+Low-level API reference
+-----------------------
+
+.. toctree::
+    :maxdepth: 2
+
+    low
+
+
+Meta-info about the h5py project
+--------------------------------
+
+.. toctree::
+    :maxdepth: 2
+
+    whatsnew/index
+    contributing
+    faq
+    licenses
diff --git a/docs/licenses.rst b/docs/licenses.rst
new file mode 100644
index 0000000..5976076
--- /dev/null
+++ b/docs/licenses.rst
@@ -0,0 +1,232 @@
+Licenses and legal info
+=======================
+
+Copyright Notice and Statement for the h5py Project
+---------------------------------------------------
+
+::
+
+    Copyright (c) 2008 Andrew Collette and contributors
+    http://h5py.alfven.org
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+
+    a. Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+
+    b. Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the
+       distribution.
+
+    c. Neither the name of the author nor the names of contributors may 
+       be used to endorse or promote products derived from this software 
+       without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+HDF5 Copyright Statement
+------------------------
+
+::
+
+    HDF5 (Hierarchical Data Format 5) Software Library and Utilities
+    Copyright 2006-2007 by The HDF Group (THG).
+
+    NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities
+    Copyright 1998-2006 by the Board of Trustees of the University of Illinois.
+
+    All rights reserved.
+
+    Contributors: National Center for Supercomputing Applications (NCSA)
+    at the University of Illinois, Fortner Software, Unidata Program
+    Center (netCDF), The Independent JPEG Group (JPEG), Jean-loup Gailly
+    and Mark Adler (gzip), and Digital Equipment Corporation (DEC).
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted for any purpose (including commercial
+    purposes) provided that the following conditions are met:
+
+       1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions, and the following disclaimer.
+       2. Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions, and the following
+    disclaimer in the documentation and/or materials provided with the
+    distribution.
+       3. In addition, redistributions of modified forms of the source or
+    binary code must carry prominent notices stating that the original
+    code was changed and the date of the change.
+       4. All publications or advertising materials mentioning features or
+    use of this software are asked, but not required, to acknowledge that
+    it was developed by The HDF Group and by the National Center for
+    Supercomputing Applications at the University of Illinois at
+    Urbana-Champaign and credit the contributors.
+       5. Neither the name of The HDF Group, the name of the University,
+    nor the name of any Contributor may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission from THG, the University, or the Contributor, respectively.
+
+    DISCLAIMER: THIS SOFTWARE IS PROVIDED BY THE HDF GROUP (THG) AND THE
+    CONTRIBUTORS "AS IS" WITH NO WARRANTY OF ANY KIND, EITHER EXPRESSED OR
+    IMPLIED. In no event shall THG or the Contributors be liable for any
+    damages suffered by the users arising out of the use of this software,
+    even if advised of the possibility of such damage.
+
+    Portions of HDF5 were developed with support from the University of
+    California, Lawrence Livermore National Laboratory (UC LLNL). The
+    following statement applies to those portions of the product and must
+    be retained in any redistribution of source code, binaries,
+    documentation, and/or accompanying materials:
+
+    This work was partially produced at the University of California,
+    Lawrence Livermore National Laboratory (UC LLNL) under contract
+    no. W-7405-ENG-48 (Contract 48) between the U.S. Department of Energy
+    (DOE) and The Regents of the University of California (University) for
+    the operation of UC LLNL.
+
+    DISCLAIMER: This work was prepared as an account of work sponsored by
+    an agency of the United States Government. Neither the United States
+    Government nor the University of California nor any of their
+    employees, makes any warranty, express or implied, or assumes any
+    liability or responsibility for the accuracy, completeness, or
+    usefulness of any information, apparatus, product, or process
+    disclosed, or represents that its use would not infringe privately-
+    owned rights. Reference herein to any specific commercial products,
+    process, or service by trade name, trademark, manufacturer, or
+    otherwise, does not necessarily constitute or imply its endorsement,
+    recommendation, or favoring by the United States Government or the
+    University of California. The views and opinions of authors expressed
+    herein do not necessarily state or reflect those of the United States
+    Government or the University of California, and shall not be used for
+    advertising or product endorsement purposes.
+
+PyTables Copyright Statement
+----------------------------
+
+::
+
+    Copyright Notice and Statement for PyTables Software Library and Utilities:
+
+    Copyright (c) 2002, 2003, 2004  Francesc Altet
+    Copyright (c) 2005, 2006, 2007  Carabos Coop. V.
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+
+    a. Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+
+    b. Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the
+       distribution.
+
+    c. Neither the name of the Carabos Coop. V. nor the names of its
+       contributors may be used to endorse or promote products derived
+       from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+stdint.h (Windows version) License
+----------------------------------
+
+::
+
+    Copyright (c) 2006-2008 Alexander Chemeris
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+      1. Redistributions of source code must retain the above copyright notice,
+         this list of conditions and the following disclaimer.
+
+      2. Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in the
+         documentation and/or other materials provided with the distribution.
+
+      3. The name of the author may be used to endorse or promote products
+         derived from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+    WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+    MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+    EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+    OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
+    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+    OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+    ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Python license
+--------------
+
+#. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
+   the Individual or Organization ("Licensee") accessing and otherwise using Python
+   Python 2.7.5 software in source or binary form and its associated documentation.
+
+#. Subject to the terms and conditions of this License Agreement, PSF hereby
+   grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+   analyze, test, perform and/or display publicly, prepare derivative works,
+   distribute, and otherwise use Python Python 2.7.5 alone or in any derivative
+   version, provided, however, that PSF's License Agreement and PSF's notice of
+   copyright, i.e., "Copyright 2001-2013 Python Software Foundation; All Rights
+   Reserved" are retained in Python Python 2.7.5 alone or in any derivative version
+   prepared by Licensee.
+
+#. In the event Licensee prepares a derivative work that is based on or
+   incorporates Python Python 2.7.5 or any part thereof, and wants to make the
+   derivative work available to others as provided herein, then Licensee hereby
+   agrees to include in any such work a brief summary of the changes made to Python
+   Python 2.7.5.
+
+#. PSF is making Python Python 2.7.5 available to Licensee on an "AS IS" basis.
+   PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.  BY WAY OF
+   EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
+   WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
+   USE OF PYTHON Python 2.7.5 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+#. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON Python 2.7.5
+   FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
+   MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON Python 2.7.5, OR ANY DERIVATIVE
+   THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+#. This License Agreement will automatically terminate upon a material breach of
+   its terms and conditions.
+
+#. Nothing in this License Agreement shall be deemed to create any relationship
+   of agency, partnership, or joint venture between PSF and Licensee.  This License
+   Agreement does not grant permission to use PSF trademarks or trade name in a
+   trademark sense to endorse or promote products or services of Licensee, or any
+   third party.
+
+#. By copying, installing or otherwise using Python Python 2.7.5, Licensee agrees
+   to be bound by the terms and conditions of this License Agreement.
+
diff --git a/docs/low.rst b/docs/low.rst
new file mode 100644
index 0000000..5ab6926
--- /dev/null
+++ b/docs/low.rst
@@ -0,0 +1,4 @@
+Low-Level Interface
+===================
+
+Now located at http://api.h5py.org.
\ No newline at end of file
diff --git a/docs/mpi.rst b/docs/mpi.rst
new file mode 100644
index 0000000..f90c3de
--- /dev/null
+++ b/docs/mpi.rst
@@ -0,0 +1,149 @@
+.. _parallel:
+
+Parallel HDF5
+=============
+
+Starting with version 2.2.0, h5py includes support for Parallel HDF5.  This
+is the "native" way to use HDF5 in a parallel computing environment.
+
+
+How does Parallel HDF5 work?
+----------------------------
+
+Parallel HDF5 is a configuration of the HDF5 library which lets you share
+open files across multiple parallel processes.  It uses the MPI (Message
+Passing Interface) standard for interprocess communication.  Consequently,
+when using Parallel HDF5 from Python, your application will also have to use
+the MPI library.
+
+This is accomplished through the `mpi4py <http://mpi4py.scipy.org/>`_ Python package, which provides
+excellent, complete Python bindings for MPI.  Here's an example
+"Hello World" using ``mpi4py``::
+
+    >>> from mpi4py import MPI
+    >>> print "Hello World (from process %d)" % MPI.COMM_WORLD.rank
+
+To run an MPI-based parallel program, use the ``mpiexec`` program to launch
+several parallel instances of Python::
+
+    $ mpiexec -n 4 python demo.py
+    Hello World (from process 1)
+    Hello World (from process 2)
+    Hello World (from process 3)
+    Hello World (from process 0)
+
+The ``mpi4py`` package includes all kinds of mechanisms to share data between
+processes, synchronize, etc.  It's a different flavor of parallelism than,
+say, threads or ``multiprocessing``, but easy to get used to.
+
+Check out the `mpi4py web site <http://mpi4py.scipy.org/>`_ for more information
+and a great tutorial.
+
+
+Building against Parallel HDF5
+------------------------------
+
+HDF5 must be built with at least the following options::
+
+    $./configure --enable-parallel --enable-shared
+
+Note that ``--enable-shared`` is required.
+
+Often, a "parallel" version of HDF5 will be available through your package
+manager.  You can check to see what build options were used by using the
+program ``h5cc``::
+
+    $ h5cc -showconfig
+
+Once you've got a Parallel-enabled build of HDF5, h5py has to be compiled in
+"MPI mode".  This is simple; set your default compiler to the ``mpicc`` wrapper
+and build h5py with the ``--mpi`` option::
+
+    $ export CC=mpicc
+    $ python setup.py build --mpi [--hdf5=/path/to/parallel/hdf5]
+
+
+Using Parallel HDF5 from h5py
+-----------------------------
+
+The parallel features of HDF5 are mostly transparent.  To open a file shared
+across multiple processes, use the ``mpio`` file driver.  Here's an example
+program which opens a file, creates a single dataset and fills it with the
+process ID::
+
+
+    from mpi4py import MPI
+    import h5py
+
+    rank = MPI.COMM_WORLD.rank  # The process ID (integer 0-3 for 4-process run)
+
+    f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
+
+    dset = f.create_dataset('test', (4,), dtype='i')
+    dset[rank] = rank
+
+    f.close()
+
+Run the program::
+
+    $ mpiexec -n 4 python demo2.py
+
+Looking at the file with ``h5dump``::
+
+    $ h5dump parallel_test.hdf5
+    HDF5 "parallel_test.hdf5" {
+    GROUP "/" {
+       DATASET "test" {
+          DATATYPE  H5T_STD_I32LE
+          DATASPACE  SIMPLE { ( 4 ) / ( 4 ) }
+          DATA {
+          (0): 0, 1, 2, 3
+          }
+       }
+    }
+    }
+
+Collective versus independent operations
+----------------------------------------
+
+MPI-based programs work by launching many instances of the Python interpreter,
+each of which runs your script.  There are certain requirements imposed on
+what each process can do.  Certain operations in HDF5, for example, anything
+which modifies the file metadata, must be performed by all processes.  Other
+operations, for example, writing data to a dataset, can be performed by some
+processes and not others.
+
+These two classes are called *collective* and *independent* operations.  Anything
+which modifies the *structure* or metadata of a file must be done collectively.
+For example, when creating a group, each process must participate::
+
+    >>> grp = f.create_group('x')  # right
+
+    >>> if rank == 1:
+    ...     grp = f.create_group('x')   # wrong; all processes must do this
+
+On the other hand, writing data to a dataset can be done independently::
+
+    >>> if rank > 2:
+    ...     dset[rank] = 42   # this is fine
+
+
+MPI atomic mode
+---------------
+
+HDF5 versions 1.8.9+ support the MPI "atomic" file access mode, which trades
+speed for more stringent consistency requirements.  Once you've opened a
+file with the ``mpio`` driver, you can place it in atomic mode using the
+settable ``atomic`` property::
+
+    >>> f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
+    >>> f.atomic = True
+
+
+More information
+----------------
+
+Parallel HDF5 is a new feature in h5py.  If you have any questions, feel free to
+ask on the mailing list (h5py at google groups).  We welcome bug reports,
+enhancements and general inquiries.
+
diff --git a/docs/quick.rst b/docs/quick.rst
new file mode 100644
index 0000000..f70a682
--- /dev/null
+++ b/docs/quick.rst
@@ -0,0 +1,146 @@
+.. _quick:
+
+Quick Start Guide
+=================
+
+If you're having trouble installing h5py, refer to :ref:`install`.
+
+Core concepts
+-------------
+
+An HDF5 file is a container for two kinds of objects: `datasets`, which are
+array-like collections of data, and `groups`, which are folder-like containers
+that hold datasets and other groups. The most fundamental thing to remember
+when using h5py is:
+
+    **Groups work like dictionaries, and datasets work like NumPy arrays**
+
+The very first thing you'll need to do is create a new file::
+
+    >>> import h5py
+    >>> import numpy as np
+    >>>
+    >>> f = h5py.File("mytestfile.hdf5", "w")
+
+The :ref:`File object <file>` is your starting point.  It has a couple of
+methods which look interesting.  One of them is ``create_dataset``::
+
+    >>> dset = f.create_dataset("mydataset", (100,), dtype='i')
+
+The object we created isn't an array, but :ref:`an HDF5 dataset <dataset>`.
+Like NumPy arrays, datasets have both a shape and a data type:
+
+    >>> dset.shape
+    (100,)
+    >>> dset.dtype
+    dtype('int32')
+
+They also support array-style slicing.  This is how you read and write data
+from a dataset in the file:
+
+    >>> dset[...] = np.arange(100)
+    >>> dset[0]
+    0
+    >>> dset[10]
+    9
+    >>> dset[0:100:10]
+    array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
+
+For more, see :ref:`file` and :ref:`dataset`.
+
+
+Groups and hierarchical organization
+------------------------------------
+
+"HDF" stands for "Hierarchical Data Format".  Every object in an HDF5 file
+has a name, and they're arranged in a POSIX-style hierarchy with 
+``/``-separators::
+
+    >>> dset.name
+    u'/mydataset'
+
+The "folders" in this system are called :ref:`groups <group>`.  The ``File`` object we
+created is itself a group, in this case the `root group`, named ``/``:
+
+    >>> f.name
+    u'/'
+
+Creating a subgroup is accomplished via the aptly-named ``create_group``::
+
+    >>> grp = f.create_group("subgroup")
+
+All ``Group`` objects also have the ``create_*`` methods like File::
+
+    >>> dset2 = grp.create_dataset("another_dataset", (50,), dtype='f')
+    >>> dset2.name
+    u'/subgroup/another_dataset'
+
+By the way, you don't have to create all the intermediate groups manually.
+Specifying a full path works just fine::
+
+    >>> dset3 = f.create_dataset('subgroup2/dataset_three', (10,), dtype='i')
+    >>> dset3.name
+    u'/subgroup2/dataset_three'
+
+Groups support most of the Python dictionary-style interface.  
+You retrieve objects in the file using the item-retrieval syntax::
+
+    >>> dataset_three = f['subgroup2/dataset_three']
+
+Iterating over a group provides the names of its members::
+
+    >>> for name in f:
+    ...     print name
+    mydataset
+    subgroup
+    subgroup2
+
+Containership testing also uses names:
+
+    >>> "mydataset" in f
+    True
+    >>> "somethingelse" in f
+    False
+
+You can even use full path names:
+
+    >>> "subgroup/another_dataset" in f
+    True
+
+There are also the familiar ``keys()``, ``values()``, ``items()`` and
+``iter()`` methods, as well as ``get()``.
+
+Since iterating over a group only yields its directly-attached members,
+iterating over an entire file is accomplished with the ``Group`` methods
+``visit()`` and ``visititems()``, which take a callable::
+
+    >>> def printname(name):
+    ...     print name
+    >>> f.visit(printname)
+    mydataset
+    subgroup
+    subgroup/another_dataset
+    subgroup2
+    subgroup2/dataset_three
+
+For more, see :ref:`group`.
+
+Attributes
+----------
+
+One of the best features of HDF5 is that you can store metadata right next
+to the data it describes.  All groups and datasets support attached named
+bits of data called `attributes`.
+
+Attributes are accessed through the ``attrs`` proxy object, which again
+implements the dictionary interface::
+
+    >>> dset.attrs['temperature'] = 99.5
+    >>> dset.attrs['temperature']
+    99.5
+    >>> 'temperature' in dset.attrs
+    True
+
+For more, see :ref:`attributes`.
+
+
diff --git a/docs/refs.rst b/docs/refs.rst
new file mode 100644
index 0000000..ca8ff95
--- /dev/null
+++ b/docs/refs.rst
@@ -0,0 +1,129 @@
+.. _refs:
+
+Object and Region References
+============================
+
+In addition to soft and external links, HDF5 supplies one more mechanism to
+refer to objects and data in a file.  HDF5 *references* are low-level pointers
+to other objects.  The great advantage of references is that they can be
+stored and retrieved as data; you can create an attribute or an entire dataset
+of reference type.
+
+References come in two flavors, object references and region references.
+As the name suggests, object references point to a particular object in a file,
+either a dataset, group or named datatype.  Region references always point to
+a dataset, and additionally contain information about a certain selection
+(*dataset region*) on that dataset.  For example, if you have a dataset
+representing an image, you could specify a region of interest, and store it
+as an attribute on the dataset.
+
+
+.. _refs_object:
+
+Using object references
+-----------------------
+
+It's trivial to create a new object reference; every high-level object
+in h5py has a read-only property "ref", which when accessed returns a new
+object reference:
+
+    >>> myfile = h5py.File('myfile.hdf5')
+    >>> mygroup = myfile['/some/group']
+    >>> ref = mygroup.ref
+    >>> print ref
+    <HDF5 object reference>
+
+"Dereferencing" these objects is straightforward; use the same syntax as when
+opening any other object:
+
+    >>> mygroup2 = myfile[ref]
+    >>> print mygroup2
+    <HDF5 group "/some/group" (0 members)>
+
+.. _refs_region:
+
+Using region references
+-----------------------
+
+Region references always contain a selection.  You create them using the 
+dataset property "regionref" and standard NumPy slicing syntax:
+
+    >>> myds = myfile.create_dataset('dset', (200,200))
+    >>> regref = myds.regionref[0:10, 0:5]
+    >>> print regref
+    <HDF5 region reference>
+
+The reference itself can now be used in place of slicing arguments to the
+dataset:
+
+    >>> subset = myds[regref]
+
+There is one complication; since HDF5 region references don't express shapes
+the same way as NumPy does, the data returned will be "flattened" into a
+1-D array:
+
+    >>> subset.shape
+    (50,)
+
+This is similar to the behavior of NumPy's fancy indexing, which returns
+a 1D array for selections which don't conform to a regular grid.
+
+In addition to storing a selection, region references inherit from object
+references, and can be used anywhere an object reference is accepted.  In this
+case the object they point to is the dataset used to create them.
+
+Storing references in a dataset
+-------------------------------
+
+HDF5 treats object and region references as data.  Consequently, there is a
+special HDF5 type to represent them.  However, NumPy has no equivalent type.
+Rather than implement a special "reference type" for NumPy, references are
+handled at the Python layer as plain, ordinary python objects.  To NumPy they
+are represented with the "object" dtype (kind 'O').  A small amount of
+metadata attached to the dtype tells h5py to interpret the data as containing
+reference objects.
+
+H5py contains a convenience function to create these "hinted dtypes" for you:
+
+    >>> ref_dtype = h5py.special_dtype(ref=h5py.Reference)
+    >>> type(ref_dtype)
+    <type 'numpy.dtype'>
+    >>> ref_dtype.kind
+    'O'
+
+The types accepted by this "ref=" keyword argument are h5py.Reference (for
+object references) and h5py.RegionReference (for region references).
+
+To create an array of references, use this dtype as you normally would:
+
+    >>> ref_dataset = myfile.create_dataset("MyRefs", (100,), dtype=ref_dtype)
+
+You can read from and write to the array as normal:
+
+    >>> ref_dataset[0] = myfile.ref
+    >>> print ref_dataset[0]
+    <HDF5 object reference>
+
+Storing references in an attribute
+----------------------------------
+
+Simply assign the reference to a name; h5py will figure it out and store it
+with the correct type:
+
+    >>> myref = myfile.ref
+    >>> myfile.attrs["Root group reference"] = myref
+
+Null references
+---------------
+
+When you create a dataset of reference type, the uninitialized elements are
+"null" references.  H5py uses the truth value of a reference object to
+indicate whether or not it is null:
+
+    >>> print bool(myfile.ref)
+    True
+    >>> nullref = ref_dataset[50]
+    >>> print bool(nullref)
+    False
+
+
diff --git a/docs/special.rst b/docs/special.rst
new file mode 100644
index 0000000..2f79de2
--- /dev/null
+++ b/docs/special.rst
@@ -0,0 +1,128 @@
+Special types
+=============
+
+HDF5 supports a few types which have no direct NumPy equivalent.  Among the
+most useful and widely used are *variable-length* (VL) types, and enumerated
+types.  As of version 2.3, h5py fully supports HDF5 enums and VL types.
+
+How special types are represented
+---------------------------------
+
+Since there is no direct NumPy dtype for variable-length strings, enums or
+references, h5py extends the dtype system slightly to let HDF5 know how to
+store these types.  Each type is represented by a native NumPy dtype, with a
+small amount of metadata attached.  NumPy routines ignore the metadata, but
+h5py can use it to determine how to store the data.
+
+There are two functions for creating these "hinted" dtypes:
+
+.. function:: special_dtype(**kwds)
+
+    Create a NumPy dtype object containing type hints.  Only one keyword
+    may be specified.
+
+    :param vlen: Base type for HDF5 variable-length datatype.
+
+    :param enum: 2-tuple ``(basetype, values_dict)``.  ``basetype`` must be
+                 an integer dtype; ``values_dict`` is a dictionary mapping
+                 string names to integer values.
+
+    :param ref:  Provide class ``h5py.Reference`` or ``h5py.RegionReference``
+                 to create a type representing object or region references
+                 respectively.
+
+.. function:: check_dtype(**kwds)
+
+    Determine if the given dtype object is a special type.  Example::
+
+        >>> out = h5py.check_dtype(vlen=mydtype)
+        >>> if out is not None:
+        ...     print "Vlen of type %s" % out
+        str
+
+    :param vlen:    Check for an HDF5 variable-length type; returns base class
+    :param enum:    Check for an enumerated type; returns 2-tuple ``(basetype, values_dict)``.
+    :param ref:     Check for an HDF5 object or region reference; returns
+                    either ``h5py.Reference`` or ``h5py.RegionReference``.
+
+
+Variable-length strings
+-----------------------
+
+In HDF5, data in VL format is stored as arbitrary-length vectors of a base
+type.  In particular, strings are stored C-style in null-terminated buffers.
+NumPy has no native mechanism to support this.  Unfortunately, this is the
+de facto standard for representing strings in the HDF5 C API, and in many
+HDF5 applications.
+
+Thankfully, NumPy has a generic pointer type in the form of the "object" ("O")
+dtype.  In h5py, variable-length strings are mapped to object arrays.  A
+small amount of metadata attached to an "O" dtype tells h5py that its contents
+should be converted to VL strings when stored in the file.
+
+Existing VL strings can be read and written to with no additional effort; 
+Python strings and fixed-length NumPy strings can be auto-converted to VL
+data and stored.
+
+Here's an example showing how to create a VL array of strings::
+
+    >>> f = h5py.File('foo.hdf5')
+    >>> dt = h5py.special_dtype(vlen=str)
+    >>> ds = f.create_dataset('VLDS', (100,100), dtype=dt)
+    >>> ds.dtype.kind
+    'O'
+    >>> h5py.check_dtype(vlen=ds.dtype)
+    <type 'str'>
+
+
+.. _vlen:
+
+Arbitrary vlen data
+-------------------
+
+Starting with h5py 2.3, variable-length types are not restricted to strings.
+For example, you can create a "ragged" array of integers::
+
+    >>> dt = h5py.special_dtype(vlen=np.dtype('int32'))
+    >>> dset = f.create_dataset('vlen_int', (100,), dtype=dt)
+    >>> dset[0] = [1,2,3]
+    >>> dset[1] = [1,2,3,4,5]
+
+Single elements are read as NumPy arrays::
+
+    >>> dset[0]
+    array([1, 2, 3], dtype=int32)
+
+Multidimensional selections produce an object array whose members are integer
+arrays::
+
+    >>> dset[0:2]
+    array([array([1, 2, 3], dtype=int32), array([1, 2, 3, 4, 5], dtype=int32)], dtype=object)
+    
+
+Enumerated types
+----------------
+
+HDF5 has the concept of an *enumerated type*, which is an integer datatype
+with a restriction to certain named values.  Since NumPy has no such datatype,
+HDF5 ENUM types are read and written as integers.
+
+Here's an example of creating an enumerated type::
+
+    >>> dt = h5py.special_dtype(enum=('i', {"RED": 0, "GREEN": 1, "BLUE": 42}))
+    >>> h5py.check_dtype(enum=dt)
+    {'BLUE': 42, 'GREEN': 1, 'RED': 0}
+    >>> f = h5py.File('foo.hdf5','w')
+    >>> ds = f.create_dataset("EnumDS", (100,100), dtype=dt)
+    >>> ds.dtype.kind
+    'i'
+    >>> ds[0,:] = 42
+    >>> ds[0,0]
+    42
+    >>> ds[1,0]
+    0
+
+Object and region references
+----------------------------
+
+References have their :ref:`own section <refs>`.
diff --git a/docs/strings.rst b/docs/strings.rst
new file mode 100644
index 0000000..9ed03bc
--- /dev/null
+++ b/docs/strings.rst
@@ -0,0 +1,162 @@
+.. _strings:
+
+Strings in HDF5
+===============
+
+The Most Important Thing
+------------------------
+
+If you remember nothing else, remember this:
+
+    **All strings in HDF5 hold encoded text.**
+
+You *can't* store arbitrary binary data in HDF5 strings.  Not only will this
+break, it will break in odd, hard-to-discover ways that will leave
+you confused and cursing.
+
+
+.. _str_binary:
+
+How to store raw binary data
+----------------------------
+
+If you have a non-text blob in a Python byte string (as opposed to ASCII or
+UTF-8 encoded text, which is fine), you should wrap it in a ``void`` type for
+storage. This will map to the HDF5 OPAQUE datatype, and will prevent your
+blob from getting mangled by the string machinery.
+
+Here's an example of how to store binary data in an attribute, and then
+recover it::
+
+    >>> binary_blob = b"Hello\x00Hello\x00"
+    >>> dset.attrs["attribute_name"] = np.void(binary_blob)
+    >>> out = dset.attrs["attribute_name"]
+    >>> binary_blob = out.tostring()
+
+
+How to store text strings
+-------------------------
+
+At the high-level interface, h5py exposes three kinds of strings.  Each maps
+to a specific type within Python (but see :ref:`str_py3` below):
+
+* Fixed-length ASCII (NumPy ``S`` type)
+* Variable-length ASCII (Python 2 ``str``, Python 3 ``bytes``)
+* Variable-length UTF-8 (Python 2 ``unicode``, Python 3 ``str``)
+
+
+Compatibility
+^^^^^^^^^^^^^
+
+If you want to write maximally-compatible files and don't want to read the
+whole chapter:
+
+* Use ``numpy.string_`` for scalar attributes
+* Use the NumPy ``S`` dtype for datasets and array attributes
+
+
+Fixed-length ASCII
+^^^^^^^^^^^^^^^^^^
+
+These are created when you use ``numpy.string_``:
+
+    >>> dset.attrs["name"] = numpy.string_("Hello")
+
+or the ``S`` dtype::
+
+    >>> dset = f.create_dataset("string_ds", (100,), dtype="S10")
+
+In the file, these map to fixed-width ASCII strings.  One byte per character
+is used.  The representation is "null-padded", which is the internal
+representation used by NumPy (and the only one which round-trips through HDF5).
+
+Technically, these strings are supposed to store `only` ASCII-encoded text,
+although in practice anything you can store in NumPy will round-trip.  But
+for compatibility with other progams using HDF5 (IDL, MATLAB, etc.), you
+should use ASCII only.
+
+.. note::
+    
+    This is the most-compatible way to store a string.  Everything else
+    can read it.
+
+Variable-length ASCII
+^^^^^^^^^^^^^^^^^^^^^
+
+These are created when you assign a byte string to an attribute::
+
+    >>> dset.attrs["attr"] = b"Hello"
+
+or when you create a dataset with an explicit "bytes" vlen type::
+
+    >>> dt = h5py.special_dtype(vlen=bytes)
+    >>> dset = f.create_dataset("name", (100,), dtype=dt)
+
+Note that they're `not` fully identical to Python byte strings.  You can
+only store ASCII-encoded text, without NULL bytes::
+
+    >>> dset.attrs["name"] = b"Hello\x00there"
+    ValueError: VLEN strings do not support embedded NULLs
+
+In the file, these are created as variable-length strings with character set
+H5T_CSET_ASCII.
+
+
+Variable-length UTF-8
+^^^^^^^^^^^^^^^^^^^^^
+
+These are created when you assign a ``unicode`` string to an attribute::
+
+    >>> dset.attrs["name"] = u"Hello"
+
+or if you create a dataset with an explicit ``unicode`` vlen type:
+
+    >>> dt = h5py.special_dtype(vlen=unicode)
+    >>> dset = f.create_dataset("name", (100,), dtype=dt)
+
+They can store any character a Python unicode string can store, with the
+exception of NULLs.  In the file these are created as variable-length strings
+with character set H5T_CSET_UTF8.
+
+
+Exceptions for Python 3
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Most strings in the HDF5 world are stored in ASCII, which means they map to
+byte strings.  But in Python 3, there's a strict separation between `data` and
+`text`, which intentionally makes it painful to handle encoded strings
+directly.
+
+So, when reading or writing scalar string attributes, on Python 3 they will 
+`always` be returned as type ``str``, regardless of the underlying storage
+mechanism.  The regular rules for writing apply; to get a fixed-width ASCII
+string, use ``numpy.string_``, and to get a variable-length ASCII string, use
+``bytes``.
+ 
+
+What about NumPy's ``U`` type?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+NumPy also has a Unicode type, a UTF-32 fixed-width format (4-byte characters).
+HDF5 has no support for wide characters.  Rather than trying to hack around
+this and "pretend" to support it, h5py will raise an error when attempting
+to create datasets or attributes of this type.
+
+
+Object names
+------------
+
+Unicode strings are used exclusively for object names in the file::
+
+    >>> f.name
+    u'/'
+
+You can supply either byte or unicode strings (on both Python 2 and Python 3)
+when creating or retrieving objects. If a byte string is supplied, 
+it will be used as-is; Unicode strings will be encoded down to UTF-8.
+
+In the file, h5py uses the most-compatible representation; H5T_CSET_ASCII for
+characters in the ASCII range; H5T_CSET_UTF8 otherwise.
+
+    >>> grp = f.create_dataset(b"name")
+    >>> grp2 = f.create_dataset(u"name2")
diff --git a/docs/swmr.rst b/docs/swmr.rst
new file mode 100644
index 0000000..0e5f1d0
--- /dev/null
+++ b/docs/swmr.rst
@@ -0,0 +1,168 @@
+.. _swmr:
+
+Single Writer Multiple Reader (SWMR)
+====================================
+
+Starting with version 2.5.0, h5py includes support for the HDF5 SWMR features.
+
+The SWMR feature is not available in the current release (1.8 series) of HDF5 
+library. It is planned to be released for production use in version 1.10. Until
+then it is available as an experimental prototype form from development snapshot
+version 1.9.178 on the
+`HDF Group ftp server <ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/SWMR/>`_ or the
+`HDF Group svn repository <http://svn.hdfgroup.uiuc.edu/hdf5/branches/revise_chunks>`_.
+
+.. Warning:: The SWMR feature is currently in prototype form and available for 
+             experimenting and testing. Please do not consider this a production
+             quality feature until the HDF5 library is released as 1.10.
+
+.. Warning:: FILES PRODUCED BY THE HDF5 1.9.X DEVELOPMENT SNAPSHOTS MAY NOT BE
+             READABLE BY OTHER VERSIONS OF HDF5, INCLUDING THE EXISTING 1.8
+             SERIES AND ALSO 1.10 WHEN IT IS RELEASED.
+
+What is SWMR?
+-------------
+
+The SWMR features allow simple concurrent reading of a HDF5 file while it is 
+being written from another process. Prior to this feature addition it was not
+possible to do this as the file data and meta-data would not be syncrhonised
+and attempts to read a file which was open for writing would fail or result in
+garbage data.
+
+A file which is being written to in SWMR mode is guaranteed to always be in a
+valid (non-corrupt) state for reading. This has the added benefit of leaving a 
+file in a valid state even if the writing application crashes before closing 
+the file properly.
+
+This feature has been implemented to work with independent writer and reader
+processes. No synchronisation is required between processes and it is up to the
+user to implement either a file polling mechanism, inotify or any other IPC 
+mechanism to notify when data has been written.
+
+The SWMR functionality requires use of the latest HDF5 file format: v110. In
+practice this implies setting the libver bounding to "latest" when opening or 
+creating the file.
+
+
+.. Warning:: New v110 format files are *not* compatible with v18 format. So
+             files, written in SWMR mode with libver='latest' cannot be opened
+             with older versions of the HDF5 library (basically any version
+             older than the SWMR feature).
+
+
+The HDF Group has documented the SWMR features in details on the website:
+`Single-Writer/Multiple-Reader (SWMR) Documentation <http://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html>`_.
+This is highly recommended reading for anyone intending to use the SWMR feature
+even through h5py. For production systems in particular pay attention to the
+file system requirements regarding POSIX I/O semantics.
+
+
+
+Using the SWMR feature from h5py
+--------------------------------
+
+The following basic steps are typically required by writer and reader processes:
+
+- Writer process create the target file and all groups, datasets and attributes.
+- Writer process switch file into SWMR mode.
+- Reader process can open the file with swmr=True.
+- Writer writes and/or appends data to existing datasets (new groups and datasets *cannot* be created when in SWMR mode).
+- Writer regularly flushes the target dataset to make it visible to reader processes.
+- Reader refreshes target dataset before reading new meta-data and/or main data.
+- Writer eventually completes and close the file as normal.
+- Reader can finish and close file as normal whenever it is convenient.
+
+The following snippet demonstrate a SWMR writer appending to a single dataset::
+
+    f = h5py.File("swmr.h5", 'w', libver='latest')
+    arr = np.array([1,2,3,4])
+    dset = f.create_dataset("data", chunks=(2,), maxshape=(None,), data=arr)
+    f.swmr_mode = True
+    # Now it is safe for the reader to open the swmr.h5 file
+    for i in range(5):
+        new_shape = ((i+1) * len(arr), )
+        dset.resize( new_shape )
+        dset[i*len(arr):] = arr
+        dset.flush()
+        # Notify the reader process that new data has been written
+
+
+The following snippet demonstrate how to monitor a dataset as a SWMR reader::
+
+    f = h5py.File("swmr.h5", 'r', libver='latest', swmr=True)
+    dset = f["data"]
+    while True:
+        dset.id.refresh()
+        shape = dset.shape
+        print( shape )
+
+
+Examples
+--------
+
+In addition to the above example snippets, a few more complete examples can be
+found in the examples folder. These examples are described in the following 
+sections
+
+Dataset monitor with inotify
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The inotify example demonstrate how to use SWMR in a reading application which
+monitors live progress as a dataset is being written by another process. This
+example uses the the linux inotify 
+(`pyinotify <https://pypi.python.org/pypi/pyinotify>`_ python bindings) to 
+receive a signal each time the target file has been updated.
+
+.. literalinclude:: ../examples/swmr_inotify_example.py
+
+Multiprocess concurrent write and read
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The SWMR multiprocess example starts starts two concurrent child processes: 
+a writer and a reader.
+The writer process first creates the target file and dataset. Then it switches
+the file into SWMR mode and the reader process is notified (with a
+multiprocessing.Event) that it is safe to open the file for reading.
+
+The writer process then continue to append chunks to the dataset. After each 
+write it notifies the reader that new data has been written. Whether the new
+data is visible in the file at this point is subject to OS and file system
+latencies.
+
+The reader first waits for the initial "SWMR mode" notification from the
+writer, upon which it goes into a loop where it waits for further notifications
+from the writer. The reader may drop some notifications, but for each one
+received it will refresh the dataset and read the dimensions. After a time-out
+it will drop out of the loop and exit.
+
+.. literalinclude:: ../examples/swmr_multiprocess.py
+
+The example output below (from a virtual Ubuntu machine) illustrate some
+latency between the writer and reader::
+
+    python examples/swmr_multiprocess.py 
+      INFO  2015-02-26 18:05:03,195        root  Starting reader
+      INFO  2015-02-26 18:05:03,196        root  Starting reader
+      INFO  2015-02-26 18:05:03,197      reader  Waiting for initial event
+      INFO  2015-02-26 18:05:03,197        root  Waiting for writer to finish
+      INFO  2015-02-26 18:05:03,198      writer  Creating file swmrmp.h5
+      INFO  2015-02-26 18:05:03,203      writer  SWMR mode
+      INFO  2015-02-26 18:05:03,205      reader  Opening file swmrmp.h5
+      INFO  2015-02-26 18:05:03,210      writer  Resizing dset shape: (4,)
+      INFO  2015-02-26 18:05:03,212      writer  Sending event
+      INFO  2015-02-26 18:05:03,213      reader  Read dset shape: (4,)
+      INFO  2015-02-26 18:05:03,214      writer  Resizing dset shape: (8,)
+      INFO  2015-02-26 18:05:03,214      writer  Sending event
+      INFO  2015-02-26 18:05:03,215      writer  Resizing dset shape: (12,)
+      INFO  2015-02-26 18:05:03,215      writer  Sending event
+      INFO  2015-02-26 18:05:03,215      writer  Resizing dset shape: (16,)
+      INFO  2015-02-26 18:05:03,215      reader  Read dset shape: (12,)
+      INFO  2015-02-26 18:05:03,216      writer  Sending event
+      INFO  2015-02-26 18:05:03,216      writer  Resizing dset shape: (20,)
+      INFO  2015-02-26 18:05:03,216      reader  Read dset shape: (16,)
+      INFO  2015-02-26 18:05:03,217      writer  Sending event
+      INFO  2015-02-26 18:05:03,217      reader  Read dset shape: (20,)
+      INFO  2015-02-26 18:05:03,218      reader  Read dset shape: (20,)
+      INFO  2015-02-26 18:05:03,219        root  Waiting for reader to finish
+
+
diff --git a/docs/whatsnew/2.0.rst b/docs/whatsnew/2.0.rst
new file mode 100644
index 0000000..75879f7
--- /dev/null
+++ b/docs/whatsnew/2.0.rst
@@ -0,0 +1,177 @@
+What's new in h5py 2.0
+======================
+
+HDF5 for Python (h5py) 2.0 represents the first major refactoring of the h5py
+codebase since the project's launch in 2008.  Many of the most important
+changes are behind the scenes, and include changes to the way h5py interacts
+with the HDF5 library and Python.  These changes have substantially
+improved h5py's stability, and make it possible to use more modern versions
+of HDF5 without compatibility concerns.  It is now also possible to use
+h5py with Python 3.
+
+Enhancements unlikely to affect compatibility
+---------------------------------------------
+
+* HDF5 1.8.3 through 1.8.7 now work correctly and are officially supported.
+
+* Python 3.2 is officially supported by h5py!  Thanks especially to
+  Darren Dale for getting this working.
+
+* Fill values can now be specified when creating a dataset.  The fill time is
+  H5D_FILL_TIME_IFSET for contiguous datasets, and H5D_FILL_TIME_ALLOC for
+  chunked datasets.
+
+* On Python 3, dictionary-style methods like Group.keys() and Group.values()
+  return view-like objects instead of lists.
+
+* Object and region references now work correctly in compound types.
+
+* Zero-length dimensions for extendible axes are now allowed.
+
+* H5py no longer attempts to auto-import ipython on startup.
+
+* File format bounds can now be given when opening a high-level File object
+  (keyword "libver").
+
+
+Changes which may break existing code
+-------------------------------------
+
+Supported HDF5/Python versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* HDF5 1.6.X is no longer supported on any platform; following the release of
+  1.6.10 some time ago, this branch is no longer maintained by The HDF Group.
+
+* Python 2.6 or later is now required to run h5py.  This is a consequence of
+  the numerous changes made to h5py for Python 3 compatibility.
+
+* On Python 2.6, unittest2 is now required to run the test suite.
+
+Group, Dataset and Datatype constructors have changed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In h5py 2.0, it is no longer possible to create new groups, datasets or
+named datatypes by passing names and settings to the constructors directly.
+Instead, you should use the standard Group methods create_group and
+create_dataset.
+
+The File constructor remains unchanged and is still the correct mechanism for
+opening and creating files.
+
+Code which manually creates Group, Dataset or Datatype objects will have to
+be modified to use create_group or create_dataset.  File-resident datatypes
+can be created by assigning a NumPy dtype to a name
+(e.g. mygroup["name"] = numpy.dtype('S10')).
+
+Unicode is now used for object names
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Older versions of h5py used byte strings to represent names in the file.
+Starting with version 2.0, you may use either byte or unicode strings to create
+objects, but object names (obj.name, etc) will generally be returned as Unicode.
+
+Code which may be affected:
+
+* Anything which uses "isinstance" or explicit type checks on names, expecting
+  "str" objects.  Such checks should be removed, or changed to compare to
+  "basestring" instead.
+
+* In Python 2.X, other parts of your application may complain if they are
+  handed Unicode data which can't be encoded down to ascii.  This is a
+  general problem in Python 2.
+
+File objects must be manually closed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+With h5py 1.3, when File objects (or low-level FileID) objects went out of
+scope, the corresponding HDF5 file was closed.  This led to surprising
+behavior, especially when files were opened with the H5F_CLOSE_STRONG flag;
+"losing" the original File object meant that all open groups and datasets
+suddenly became invalid.
+
+Beginning with h5py 2.0, files must be manually closed, by calling the "close"
+method or by using the file object as a context manager.  If you forget to
+close a file, the HDF5 library will try to close it for you when the
+application exits.
+
+Please note that opening the same file multiple times (i.e. without closing
+it first) continues to result in undefined behavior.
+
+Changes to scalar slicing code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When a scalar dataset was accessed with the syntax ``dataset[()]``, h5py
+incorrectly returned an ndarray.  H5py now correctly returns an array
+scalar.  Using ``dataset[...]`` on a scalar dataset still returns an ndarray.
+
+Array scalars now always returned when indexing a dataset
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using datasets of compound type, retrieving a single element incorrectly
+returned a tuple of values, rather than an instance of ``numpy.void_`` with the
+proper fields populated.  Among other things, this meant you couldn't do
+things like ``dataset[index][field]``.  H5py now always returns an array scalar,
+except in the case of object dtypes (references, vlen strings).
+
+Reading object-like data strips special type information
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the past, reading multiple data points from dataset with vlen or reference
+type returned a Numpy array with a "special dtype" (such as those created
+by ``h5py.special_dtype()``).  In h5py 2.0, all such arrays now have a generic
+Numpy object dtype (``numpy.dtype('O')``).  To get a copy of the dataset's
+dtype, always use the dataset's dtype property directly (``mydataset.dtype``).
+
+The selections module has been removed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Only numpy-style slicing arguments remain supported in the high level interface.
+Existing code which uses the selections module should be refactored to use
+numpy slicing (and ``numpy.s_`` as appropriate), or the standard C-style HDF5
+dataspace machinery.
+
+The H5Error exception class has been removed (along with h5py.h5e)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All h5py exceptions are now native Python exceptions, no longer inheriting
+from H5Error.  RuntimeError is raised if h5py can't figure out what exception
+is appropriate... every instance of this behavior is considered a bug.  If you
+see h5py raising RuntimeError please report it so we can add the correct
+mapping!
+
+The old errors module (h5py.h5e) has also been removed.  There is no public
+error-management API.
+
+File .mode property is now either 'r' or 'r+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Files can be opened using the same mode arguments as before, but now the
+property File.mode will always return 'r' (read-only) or 'r+' (read-write).
+
+Long-deprecated dict methods have been removed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Certain ancient aliases for Group/AttributeManager methods (e.g. ``listnames``)
+have been removed.
+Please use the standard Python dict interface (Python 2 or Python 3 as
+appropriate) to interact with these objects.
+
+Known issues
+------------
+
+* Thread support has been improved in h5py 2.0. However, we still recommend
+  that for your own sanity you use locking to serialize access to files.
+
+* There are reports of crashes related to storing object and region references.
+  If this happens to you, please post on the mailing list or contact the h5py
+  author directly.
+
+
+
+
+
+
+
+
+
diff --git a/docs/whatsnew/2.1.rst b/docs/whatsnew/2.1.rst
new file mode 100644
index 0000000..270396d
--- /dev/null
+++ b/docs/whatsnew/2.1.rst
@@ -0,0 +1,61 @@
+What's new in h5py 2.1
+======================
+
+Dimension scales
+----------------
+
+H5py now supports the Dimension Scales feature of HDF5!  Thanks to Darren
+Dale for implementing this.  You can find more information on using scales
+in the :ref:`dimensionscales` section of the docs.
+
+Unicode strings allowed in attributes
+-------------------------------------
+
+Group, dataset and attribute names in h5py 2.X can all be given as unicode.
+Now, you can also store (scalar) unicode data in attribute values as well::
+
+    >>> myfile.attrs['x'] = u"I'm a Unicode string!"
+
+Storing Unicode strings in datasets or as members of compound types is not
+yet implemented.
+
+Dataset size property
+---------------------
+
+Dataset objects now expose a ``.size`` property which provides the total
+number of elements in the dataspace.
+
+``Dataset.value`` property is now deprecated.
+---------------------------------------------
+
+The property ``Dataset.value``, which dates back to h5py 1.0, is deprecated
+and will be removed in a later release.  This property dumps the entire
+dataset into a NumPy array.  Code using ``.value`` should be updated to use
+NumPy indexing, using ``mydataset[...]`` or ``mydataset[()]`` as appropriate.
+
+Bug fixes
+---------
+
+    * Object and region references were sometimes incorrectly wrapped wrapped
+      in a ``numpy.object_`` instance (issue 202)
+    * H5py now ignores old versions of Cython (<0.13) when building
+      (issue 221)
+    * Link access property lists weren't being properly tracked in the high
+      level interface (issue 212)
+    * Race condition fixed in identifier tracking which led to Python crashes
+      (issue 151)
+    * Highlevel objects will now complain if you try to bind them to the wrong
+      HDF5 object types (issue 191)
+    * Unit tests can now be run after installation (issue 201)
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/whatsnew/2.2.rst b/docs/whatsnew/2.2.rst
new file mode 100644
index 0000000..69835e6
--- /dev/null
+++ b/docs/whatsnew/2.2.rst
@@ -0,0 +1,101 @@
+What's new in h5py 2.2
+======================
+
+
+Support for Parallel HDF5
+-------------------------
+
+On UNIX platforms, you can now take advantage of MPI and Parallel HDF5.
+Cython, ``mpi4py`` and an MPI-enabled build of HDF5 are required..  
+See :ref:`parallel` in the documentation for details.
+
+
+Support for Python 3.3
+----------------------
+
+Python 3.3 is now officially supported.
+
+
+Mini float support (issue #141)
+-------------------------------
+
+Two-byte floats (NumPy ``float16``) are supported.
+
+HDF5 scale/offset filter
+------------------------
+
+The Scale/Offset filter added in HDF5 1.8 is now available.
+
+
+Field indexing is now allowed when writing to a dataset (issue #42)
+-------------------------------------------------------------------
+
+H5py has long supported reading only certain fields from a dataset::
+
+    >>> dset = f.create_dataset('x', (100,), dtype=np.dtype([('a', 'f'), ('b', 'i')]))
+    >>> out = dset['a', 0:100:10]
+    >>> out.dtype
+    dtype('float32')
+
+Now, field names are also allowed when writing to a dataset:
+
+    >>> dset['a', 20:50] = 1.0
+
+
+Region references preserve shape (issue #295)
+---------------------------------------------
+
+Previously, region references always resulted in a 1D selection, even when
+2D slicing was used::
+
+    >>> dset = f.create_dataset('x', (10, 10))
+    >>> ref = dset.regionref[0:5,0:5]
+    >>> out = dset[ref]
+    >>> out.shape
+    (25,)
+
+Shape is now preserved::
+
+    >>> out = dset[ref]
+    >>> out.shape
+    (5, 5)
+
+Additionally, the shape of both the target dataspace and the selection shape
+can be determined via new methods on the ``regionref`` proxy (now available
+on both datasets and groups)::
+
+    >>> f.regionref.shape(ref)
+    (10, 10)
+    >>> f.regionref.selection(ref)
+    (5, 5)
+
+
+Committed types can be linked to datasets and attributes
+--------------------------------------------------------
+
+HDF5 supports "shared" named types stored in the file::
+
+    >>> f['name'] = np.dtype("int64")
+
+You can now use these types when creating a new dataset or attribute, and
+HDF5 will "link" the dataset type to the named type::
+
+    >>> dset = f.create_dataset('int dataset', (10,), dtype=f['name'])
+    >>> f.attrs.create('int scalar attribute', shape=(), dtype=f['name'])
+
+
+``move`` method on Group objects
+--------------------------------
+
+It's no longer necessary to move objects in a file by manually re-linking them::
+
+    >>> f.create_group('a')
+    >>> f['b'] = f['a']
+    >>> del f['a']
+
+The method ``Group.move`` allows this to be performed in one step::
+
+    >>> f.move('a', 'b')
+
+Both the source and destination must be in the same file.
+
diff --git a/docs/whatsnew/2.3.rst b/docs/whatsnew/2.3.rst
new file mode 100644
index 0000000..dc9a2fb
--- /dev/null
+++ b/docs/whatsnew/2.3.rst
@@ -0,0 +1,85 @@
+What's new in h5py 2.3
+======================
+
+
+Support for arbitrary vlen data
+-------------------------------
+
+Variable-length data is :ref:`no longer restricted to strings <vlen>`.  You
+can use this feature to produce "ragged" arrays, whose members are 1D
+arrays of variable length.
+
+The implementation of special types was changed to use the NumPy dtype
+"metadata" field. This change should be transparent, as access to special types
+is handled through ``h5py.special_dtype`` and ``h5py.check_dtype``.
+
+
+Improved exception messages
+---------------------------
+
+H5py has historically suffered from low-detail exception messages generated
+automatically by HDF5.  While the exception types in 2.3 remain identical to
+those in 2.2, the messages have been substantially improved to provide more
+information as to the source of the error.
+
+Examples::
+
+    ValueError: Unable to set extend dataset (Dimension cannot exceed the existing maximal size (new: 100 max: 1))
+
+    IOError: Unable to open file (Unable to open file: name = 'x3', errno = 2, error message = 'no such file or directory', flags = 0, o_flags = 0)
+
+    KeyError: "Unable to open object (Object 'foo' doesn't exist)"
+
+
+Improved setuptools support
+---------------------------
+
+``setup.py`` now uses ``setup_requires`` to make installation via pip friendlier.
+
+
+Multiple low-level additions
+----------------------------
+
+Improved support for opening datasets via the low-level interface, by
+adding ``H5Dopen2`` and many new property-list functions.
+
+
+Improved support for MPI features
+---------------------------------
+
+Added support for retrieving the MPI communicator and info objects from an
+open file.  Added boilerplate code to allow compiling cleanly against newer
+versions of mpi4py.
+
+
+Readonly files can now be opened in default mode
+------------------------------------------------
+
+When opening a read-only file with no mode flags, now defaults to opening the
+file on RO mode rather than raising an exception.
+
+
+Single-step build for HDF5 on Windows
+-------------------------------------
+
+Building h5py on windows has typically been hamstrung by the need to build
+a compatible version of HDF5 first.  A new Paver-based system located in
+the "windows" distribution directory allows single-step compilation of HDF5
+with settings that are known to work with h5py.
+
+For more, see:
+
+https://github.com/h5py/h5py/tree/master/windows
+
+Thanks to
+---------
+
+* Martin Teichmann
+* Florian Rathgerber
+* Pierre de Buyl
+* Thomas Caswell
+* Andy Salnikov
+* Darren Dale
+* Robert David Grant
+* Toon Verstraelen
+* Many others who contributed bug reports
diff --git a/docs/whatsnew/2.4.rst b/docs/whatsnew/2.4.rst
new file mode 100644
index 0000000..e631699
--- /dev/null
+++ b/docs/whatsnew/2.4.rst
@@ -0,0 +1,47 @@
+What's new in h5py 2.4
+======================
+
+Build system changes
+--------------------
+
+The setup.py-based build system has been reworked to be more maintainable, and
+to fix certain long-standing bugs.  As a consequence, the options to setup.py
+have changed; a new top-level "configure" command handles options like 
+``--hdf5=/path/to/hdf5`` and ``--mpi``.  Setup.py now works correctly under
+Python 3 when these options are used.
+
+Cython (0.17+) is now required when building from source on all platforms;
+the .c files are no longer shipped in the UNIX release.  The minimum NumPy 
+version is now 1.6.1.
+
+Files will now auto-close
+-------------------------
+  
+Files are now automatically closed when all objects within them
+are unreachable. Previously, if File.close() was not explicitly called,
+files would remain open and "leaks" were possible if the File object
+was lost.
+
+Thread safety improvements
+--------------------------
+
+Access to all APIs, high- and low-level, are now protected by a global lock.
+The entire API is now believed to be thread-safe.  Feedback and real-world
+testing is welcome.
+
+External link improvements
+--------------------------
+
+External links now work if the target file is already open.  Previously
+this was not possible because of a mismatch in the file close strengths.
+
+Thanks to
+---------
+
+Many people, but especially:
+
+* Matthieu Brucher
+* Laurence Hole
+* John Tyree
+* Pierre de Buyl
+* Matthew Brett
diff --git a/docs/whatsnew/index.rst b/docs/whatsnew/index.rst
new file mode 100644
index 0000000..ec48769
--- /dev/null
+++ b/docs/whatsnew/index.rst
@@ -0,0 +1,15 @@
+.. _whatsnew:
+
+**********************
+"What's new" documents
+**********************
+
+These document the changes between minor (or major) versions of h5py.
+
+.. toctree::
+
+    2.4
+    2.3
+    2.2
+    2.1
+    2.0
diff --git a/docs_api/Makefile b/docs_api/Makefile
new file mode 100644
index 0000000..33379fd
--- /dev/null
+++ b/docs_api/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Low-levelAPIforh5py.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Low-levelAPIforh5py.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/Low-levelAPIforh5py"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Low-levelAPIforh5py"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs_api/automod.py b/docs_api/automod.py
new file mode 100644
index 0000000..0fb4839
--- /dev/null
+++ b/docs_api/automod.py
@@ -0,0 +1,258 @@
+
+"""
+    Requires patched version of autodoc.py
+    http://bugs.python.org/issue3422
+"""
+import re
+from functools import partial
+
+# === Regexp replacement machinery ============================================
+
+role_expr = re.compile(r"(:.+:(?:`.+`)?)")
+
+def safe_replace(istr, expr, rpl):
+    """ Perform a role-safe replacement of all occurances of "expr", using
+        the callable "rpl".
+    """
+    outparts = []
+    for part in role_expr.split(istr):
+        if not role_expr.search(part):
+            part = expr.sub(rpl, part)
+        outparts.append(part)
+    return "".join(outparts)
+
+
+# === Replace literal class names =============================================
+
+class_base = r"""
+(?P<pre>
+  \W+
+)
+(?P<name>%s)
+(?P<post>
+  \W+
+)
+"""
+
+class_exprs = { "ObjectID": "h5py.h5.ObjectID",
+                "GroupID": "h5py.h5g.GroupID",
+                "FileID": "h5py.h5f.FileID",
+                "DatasetID": "h5py.h5d.DatasetID",
+                "TypeID": "h5py.h5t.TypeID",
+                "[Dd]ataset creation property list": "h5py.h5p.PropDCID",
+                "[Dd]ataset transfer property list": "h5py.h5p.PropDXID",
+                "[Ff]ile creation property list": "h5py.h5p.PropFCID",
+                "[Ff]ile access property list": "h5py.h5p.PropFAID",
+                "[Ll]ink access property list": "h5py.h5p.PropLAID",
+                "[Ll]ink creation property list": "h5py.h5p.PropLCID",
+                "[Gg]roup creation property list": "h5py.h5p.PropGCID"}
+
+
+class_exprs = dict( 
+    (re.compile(class_base % x.replace(" ",r"\s"), re.VERBOSE), y) \
+    for x, y in class_exprs.iteritems() )
+
+def replace_class(istr):
+
+    def rpl(target, match):
+        pre, name, post = match.group('pre', 'name', 'post')
+        return '%s:class:`%s <%s>`%s' % (pre, name, target, post)
+
+    for expr, target in class_exprs.iteritems():
+        rpl2 = partial(rpl, target)
+        istr = safe_replace(istr, expr, rpl2)
+
+    return istr
+
+# === Replace constant and category expressions ===============================
+
+# e.g. h5f.OBJ_ALL -> :data:`h5f.OBJ_ALL <h5py.h5f.OBJ_ALL>`
+# and  h5f.OBJ*    -> :ref:`h5f.OBJ* <ref.h5f.OBJ>`
+
+const_exclude = ['HDF5', 'API', 'H5', 'H5A', 'H5D', 'H5F', 'H5P', 'H5Z', 'INT',
+                 'UINT', 'STRING', 'LONG', 'PHIL', 'GIL', 'TUPLE', 'LIST',
+                 'FORTRAN', 'BOOL', 'NULL', 'NOT', 'SZIP']
+const_exclude = ["%s(?:\W|$)" % x for x in const_exclude]
+const_exclude = "|".join(const_exclude)
+
+const_expr = re.compile(r"""
+(?P<pre>
+  (?:^|\s+)                   # Must be preceeded by whitespace or string start
+  \W?                         # May have punctuation ( (CONST) or "CONST" )
+  (?!%s)                      # Exclude known list of non-constant objects
+)
+(?P<module>h5[a-z]{0,2}\.)?   # Optional h5xx. prefix
+(?P<name>[A-Z_][A-Z0-9_]+)    # The constant name itself
+(?P<wild>\*)?                 # Wildcard indicates this is a category
+(?P<post>
+  \W?                         # May have trailing punctuation
+  (?:$|\s+)                   # Must be followed by whitespace or end of string
+)                      
+""" % const_exclude, re.VERBOSE)
+
+def replace_constant(istr, current_module):
+
+    def rpl(match):
+        mod, name, wild = match.group('module', 'name', 'wild')
+        pre, post = match.group('pre', 'post')
+
+        if mod is None:
+            mod = current_module+'.'
+            displayname = name
+        else:
+            displayname = mod+name
+
+        if wild:
+            target = 'ref.'+mod+name
+            role = ':ref:'
+            displayname += '*'
+        else:
+            target = 'h5py.'+mod+name
+            role = ':data:'
+
+        return '%s%s`%s <%s>`%s' % (pre, role, displayname, target, post)
+
+    return safe_replace(istr, const_expr, rpl)
+
+
+# === Replace literal references to modules ===================================
+
+mod_expr = re.compile(r"""
+(?P<pre>
+  (?:^|\s+)                 # Must be preceeded by whitespace
+  \W?                       # Optional opening paren/quote/whatever
+)
+(?!h5py)                    # Don't match the package name
+(?P<name>h5[a-z]{0,2})      # Names of the form h5, h5a, h5fd
+(?P<post>
+  \W?                       # Optional closing paren/quote/whatever
+  (?:$|\s+)                 # Must be followed by whitespace
+)
+""", re.VERBOSE)
+
+def replace_module(istr):
+
+    def rpl(match):
+        pre, name, post = match.group('pre', 'name', 'post')
+        return '%s:mod:`%s <h5py.%s>`%s' % (pre, name, name, post)
+
+    return safe_replace(istr, mod_expr, rpl)
+
+
+# === Replace parameter lists =================================================
+
+# e.g. "    + STRING path ('/default')" -> ":param STRING path: ('/default')"
+
+param_expr = re.compile(r"""
+^
+\s*
+\+
+\s+
+(?P<desc>
+  [^\s\(]
+  .*
+  [^\s\)]
+)
+(?:
+  \s+
+  \(
+  (?P<default>
+    [^\s\(]
+    .*
+    [^\s\)]
+  )
+  \)
+)?
+$
+""", re.VERBOSE)
+
+def replace_param(istr):
+    """ Replace parameter lists.  Not role-safe. """
+
+    def rpl(match):
+        desc, default = match.group('desc', 'default')
+        default = ' (%s) ' % default if default is not None else ''
+        return ':param %s:%s' % (desc, default)
+
+    return param_expr.sub(rpl, istr)
+
+
+
+# === Begin Sphinx extension code =============================================
+
+def is_callable(docstring):
+    return str(docstring).strip().startswith('(')
+
+def setup(spx):
+
+    def proc_doc(app, what, name, obj, options, lines):
+        """ Process docstrings for modules and routines """
+
+        final_lines = lines[:]
+
+        # Remove the signature lines from the docstring
+        if is_callable(obj.__doc__):
+            doclines = []
+            arglines = []
+            final_lines = arglines
+            for line in lines:
+                if len(line.strip()) == 0:
+                    final_lines = doclines
+                final_lines.append(line)
+
+        # Resolve class names, constants and modules
+        if hasattr(obj, 'im_class'):
+            mod = obj.im_class.__module__
+        elif hasattr(obj, '__module__'):
+            mod = obj.__module__
+        else:
+            mod = ".".join(name.split('.')[0:2])  # i.e. "h5py.h5z"
+        mod = mod.split('.')[1]  # i.e. 'h5z'
+
+        del lines[:]
+        for line in final_lines:
+            #line = replace_param(line)
+            line = replace_constant(line, mod)
+            line = replace_module(line)
+            line = replace_class(line)
+            line = line.replace('**kwds', '\*\*kwds').replace('*args','\*args')
+            lines.append(line)
+
+
+
+
+    def proc_sig(app, what, name, obj, options, signature, return_annotation):
+        """ Auto-generate function signatures from docstrings """
+
+        def getsig(docstring):
+            """ Get (sig, return) from a docstring, or None. """
+            if not is_callable(docstring):
+                return None
+
+            lines = []
+            for line in docstring.split("\n"):
+                if len(line.strip()) == 0:
+                    break
+                lines.append(line)
+            rawsig = " ".join(x.strip() for x in lines)
+
+            if '=>' in rawsig:
+                sig, ret = tuple(x.strip() for x in rawsig.split('=>'))
+            elif '->' in rawsig:
+                sig, ret = tuple(x.strip() for x in rawsig.split('->'))
+            else:
+                sig = rawsig
+                ret = None
+
+            if sig == "()":
+                sig = "( )" # Why? Ask autodoc.
+
+            return (sig, ret)
+
+        sigtuple = getsig(obj.__doc__)
+
+        return sigtuple
+
+    spx.connect('autodoc-process-signature', proc_sig)
+    spx.connect('autodoc-process-docstring', proc_doc)
+
diff --git a/docs_api/conf.py b/docs_api/conf.py
new file mode 100644
index 0000000..6153c6d
--- /dev/null
+++ b/docs_api/conf.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+#
+# Low-level API for h5py documentation build configuration file, created by
+# sphinx-quickstart on Fri Jan 31 22:42:08 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import h5py
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc', 'automod']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Low-level API for h5py'
+copyright = u'2014, Andrew Collette and contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = "%d.%d" % h5py.version.version_tuple[0:2]
+
+# The full version, including alpha/beta/rc tags.
+release = h5py.version.version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'nature'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+html_title = "Low-level API for h5py"
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Low-levelAPIforh5pydoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'Low-levelAPIforh5py.tex', u'Low-level API for h5py Documentation',
+   u'Andrew Collette and contributors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'low-levelapiforh5py', u'Low-level API for h5py Documentation',
+     [u'Andrew Collette and contributors'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'Low-levelAPIforh5py', u'Low-level API for h5py Documentation',
+   u'Andrew Collette and contributors', 'Low-levelAPIforh5py', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/docs_api/h5.rst b/docs_api/h5.rst
new file mode 100644
index 0000000..793a9cf
--- /dev/null
+++ b/docs_api/h5.rst
@@ -0,0 +1,44 @@
+Module H5
+=========
+
+.. automodule:: h5py.h5
+
+Library API
+-----------
+
+.. autofunction:: get_config
+.. autofunction:: get_libversion
+
+
+Configuration class
+-------------------
+
+.. autoclass:: H5PYConfig
+
+
+Module constants
+----------------
+
+.. data:: INDEX_NAME
+
+    Resolve indices in alphanumeric order
+
+.. data:: INDEX_CRT_ORDER
+
+    Resolve indices in order of object creation.  Not always available.
+
+.. data:: ITER_NATIVE
+
+    Traverse index in the fastest possible order.  No particular pattern is
+    guaranteed.
+
+.. data:: ITER_INC
+
+    Traverse index in increasing order
+
+.. data:: ITER_DEC
+
+    Traverse index in decreasing order
+
+
+
diff --git a/docs_api/h5a.rst b/docs_api/h5a.rst
new file mode 100644
index 0000000..9c6ac4a
--- /dev/null
+++ b/docs_api/h5a.rst
@@ -0,0 +1,30 @@
+Module H5A
+==========
+
+.. automodule:: h5py.h5a
+
+Functional API
+--------------
+
+.. autofunction:: create
+.. autofunction:: open
+.. autofunction:: exists
+.. autofunction:: rename
+.. autofunction:: delete
+.. autofunction:: get_num_attrs
+.. autofunction:: get_info
+.. autofunction:: iterate
+
+Info objects
+------------
+
+.. autoclass:: AttrInfo
+    :members:
+
+Attribute objects
+-----------------
+
+.. autoclass:: AttrID
+    :members:
+
+
diff --git a/docs_api/h5d.rst b/docs_api/h5d.rst
new file mode 100644
index 0000000..7b59be3
--- /dev/null
+++ b/docs_api/h5d.rst
@@ -0,0 +1,47 @@
+Module H5D
+==========
+
+.. automodule:: h5py.h5d
+    :members:
+
+Module constants
+----------------
+
+Storage strategies
+~~~~~~~~~~~~~~~~~~
+
+.. data:: COMPACT
+.. data:: CONTIGUOUS
+.. data:: CHUNKED
+
+.. _ref.h5d.ALLOC_TIME:
+
+Allocation times
+~~~~~~~~~~~~~~~~
+
+.. data:: ALLOC_TIME_DEFAULT
+.. data:: ALLOC_TIME_LATE 
+.. data:: ALLOC_TIME_EARLY
+.. data:: ALLOC_TIME_INCR  
+
+Allocation status
+~~~~~~~~~~~~~~~~~
+
+.. data:: SPACE_STATUS_NOT_ALLOCATED
+.. data:: SPACE_STATUS_PART_ALLOCATED
+.. data:: SPACE_STATUS_ALLOCATED
+
+Fill time
+~~~~~~~~~
+
+.. data:: FILL_TIME_ALLOC
+.. data:: FILL_TIME_NEVER
+.. data:: FILL_TIME_IFSET
+
+Fill values
+~~~~~~~~~~~
+
+.. data:: FILL_VALUE_UNDEFINED
+.. data:: FILL_VALUE_DEFAULT
+.. data:: FILL_VALUE_USER_DEFINED
+
diff --git a/docs_api/h5f.rst b/docs_api/h5f.rst
new file mode 100644
index 0000000..a8c3436
--- /dev/null
+++ b/docs_api/h5f.rst
@@ -0,0 +1,87 @@
+Module H5F
+==========
+
+.. automodule:: h5py.h5f
+
+Functional API
+--------------
+
+.. autofunction:: open
+.. autofunction:: create
+.. autofunction:: flush
+.. autofunction:: is_hdf5
+.. autofunction:: mount
+.. autofunction:: unmount
+.. autofunction:: get_name
+.. autofunction:: get_obj_count
+.. autofunction:: get_obj_ids
+
+File objects
+------------
+
+.. autoclass:: FileID
+    :members:
+
+Module constants
+----------------
+
+.. _ref.h5f.ACC:
+
+File access flags
+~~~~~~~~~~~~~~~~~
+
+.. data:: ACC_TRUNC
+
+    Create/truncate file
+
+.. data:: ACC_EXCL
+
+    Create file if it doesn't exist; fail otherwise
+
+.. data:: ACC_RDWR
+
+    Open in read/write mode
+
+.. data:: ACC_RDONLY
+
+    Open in read-only mode
+
+
+.. _ref.h5f.CLOSE:
+
+File close strength
+~~~~~~~~~~~~~~~~~~~
+
+.. data:: CLOSE_WEAK
+.. data:: CLOSE_SEMI
+.. data:: CLOSE_STRONG
+.. data:: CLOSE_DEFAULT
+
+.. _ref.h5f.SCOPE:
+
+File scope
+~~~~~~~~~~
+
+.. data:: SCOPE_LOCAL
+.. data:: SCOPE_GLOBAL
+
+.. _ref.h5f.OBJ:
+
+Object types
+~~~~~~~~~~~~
+
+.. data:: OBJ_FILE
+.. data:: OBJ_DATASET
+.. data:: OBJ_GROUP
+.. data:: OBJ_DATATYPE
+.. data:: OBJ_ATTR
+.. data:: OBJ_ALL
+.. data:: OBJ_LOCAL
+
+Library version bounding
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. data:: LIBVER_EARLIEST
+.. data:: LIBVER_LATEST
+
+
diff --git a/docs_api/h5fd.rst b/docs_api/h5fd.rst
new file mode 100644
index 0000000..a04254f
--- /dev/null
+++ b/docs_api/h5fd.rst
@@ -0,0 +1,61 @@
+Module H5FD
+===========
+
+.. automodule:: h5py.h5fd
+
+Module constants
+----------------
+
+.. data:: MEM_DEFAULT
+.. data:: MEM_SUPER
+.. data:: MEM_BTREE
+.. data:: MEM_DRAW
+.. data:: MEM_GHEAP
+.. data:: MEM_LHEAP
+.. data:: MEM_OHDR
+.. data:: MEM_NTYPES
+
+File drivers
+~~~~~~~~~~~~
+
+.. data:: CORE
+.. data:: FAMILY
+.. data:: LOG
+.. data:: MPIO
+.. data:: MULTI
+.. data:: SEC2
+.. data:: STDIO
+.. data:: WINDOWS
+
+Logging driver settings
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. note:: Not all logging flags are currently implemented by HDF5.
+
+.. data:: LOG_LOC_READ
+.. data:: LOG_LOC_WRITE
+.. data:: LOG_LOC_SEEK
+.. data:: LOG_LOC_IO
+
+.. data:: LOG_FILE_READ 
+.. data:: LOG_FILE_WRITE
+.. data:: LOG_FILE_IO
+
+.. data:: LOG_FLAVOR
+
+.. data:: LOG_NUM_READ
+.. data:: LOG_NUM_WRITE
+.. data:: LOG_NUM_SEEK
+.. data:: LOG_NUM_IO
+
+.. data:: LOG_TIME_OPEN
+.. data:: LOG_TIME_READ
+.. data:: LOG_TIME_WRITE
+.. data:: LOG_TIME_SEEK
+.. data:: LOG_TIME_CLOSE
+.. data:: LOG_TIME_IO
+
+.. data:: LOG_ALLOC
+.. data:: LOG_ALL
+
+
diff --git a/docs_api/h5g.rst b/docs_api/h5g.rst
new file mode 100644
index 0000000..f4419e9
--- /dev/null
+++ b/docs_api/h5g.rst
@@ -0,0 +1,63 @@
+Module H5G
+==========
+
+.. automodule:: h5py.h5g
+
+Functional API
+--------------
+
+.. autofunction:: open
+.. autofunction:: create
+.. autofunction:: iterate
+.. autofunction:: get_objinfo
+
+Info objects
+------------
+
+.. autoclass:: GroupStat
+    :members:
+
+Group objects
+-------------
+
+.. autoclass:: GroupID
+    :members:
+
+Module constants
+----------------
+
+Object type codes
+~~~~~~~~~~~~~~~~~
+
+.. data:: LINK
+
+    Symbolic link
+
+.. data:: GROUP
+
+    HDF5 group
+
+.. data:: DATASET
+
+    HDF5 dataset
+
+.. data:: TYPE
+
+    Named (file-resident) datatype
+
+Link type codes
+~~~~~~~~~~~~~~~
+
+.. data:: LINK_HARD
+.. data:: LINK_SOFT
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs_api/h5i.rst b/docs_api/h5i.rst
new file mode 100644
index 0000000..3b4de13
--- /dev/null
+++ b/docs_api/h5i.rst
@@ -0,0 +1,26 @@
+Module H5I
+==========
+
+Functional API
+--------------
+
+.. automodule:: h5py.h5i
+    :members:
+
+Module constants
+----------------
+
+Identifier classes
+~~~~~~~~~~~~~~~~~~
+
+.. data:: BADID
+.. data:: FILE
+.. data:: GROUP
+.. data:: DATASPACE
+.. data:: DATASET
+.. data:: ATTR
+.. data:: REFERENCE
+.. data:: GENPROP_CLS
+.. data:: GENPROP_LST
+.. data:: DATATYPE
+
diff --git a/docs_api/h5l.rst b/docs_api/h5l.rst
new file mode 100644
index 0000000..b1e4510
--- /dev/null
+++ b/docs_api/h5l.rst
@@ -0,0 +1,18 @@
+Module H5L
+==========
+
+Linkproxy objects
+-----------------
+
+.. automodule:: h5py.h5l
+    :members:
+
+Module constants
+----------------
+
+Link types
+~~~~~~~~~~
+
+.. data:: TYPE_HARD
+.. data:: TYPE_SOFT
+.. data:: TYPE_EXTERNAL
diff --git a/docs_api/h5o.rst b/docs_api/h5o.rst
new file mode 100644
index 0000000..0f0c41e
--- /dev/null
+++ b/docs_api/h5o.rst
@@ -0,0 +1,57 @@
+Module H5O
+==========
+
+.. automodule:: h5py.h5o
+
+Functional API
+--------------
+
+.. autofunction:: open
+.. autofunction:: link
+.. autofunction:: copy
+.. autofunction:: set_comment
+.. autofunction:: get_comment
+.. autofunction:: visit
+.. autofunction:: get_info
+
+Info classes
+------------
+
+.. autoclass:: ObjInfo
+    :members:
+
+Module constants
+----------------
+
+Object types
+~~~~~~~~~~~~
+
+.. data:: TYPE_GROUP
+.. data:: TYPE_DATASET
+.. data:: TYPE_NAMED_DATATYPE
+
+.. _ref.h5o.COPY:
+
+Copy flags
+~~~~~~~~~~
+
+.. data:: COPY_SHALLOW_HIERARCHY_FLAG
+
+    Copy only immediate members of a group.
+
+.. data:: COPY_EXPAND_SOFT_LINK_FLAG
+
+    Expand soft links into new objects.
+
+.. data:: COPY_EXPAND_EXT_LINK_FLAG
+
+    Expand external link into new objects.
+
+.. data:: COPY_EXPAND_REFERENCE_FLAG
+
+    Copy objects that are pointed to by references.
+
+.. data:: COPY_WITHOUT_ATTR_FLAG
+
+    Copy object without copying attributes.
+
diff --git a/docs_api/h5p.rst b/docs_api/h5p.rst
new file mode 100644
index 0000000..2a1d4eb
--- /dev/null
+++ b/docs_api/h5p.rst
@@ -0,0 +1,95 @@
+Module H5P
+==========
+
+.. automodule:: h5py.h5p
+
+Functional API
+--------------
+
+.. autofunction:: create
+
+Base classes
+------------
+
+.. autoclass:: PropID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: PropClassID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: PropInstanceID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: PropCreateID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: PropCopyID
+    :show-inheritance:
+    :members:
+
+File creation
+-------------
+
+.. autoclass:: PropFCID
+    :show-inheritance:
+    :members:
+
+File access
+-----------
+
+.. autoclass:: PropFAID
+    :show-inheritance:
+    :members:
+
+Dataset creation
+----------------
+
+.. autoclass:: PropDCID
+    :show-inheritance:
+    :members:
+
+
+Link creation
+-------------
+
+.. autoclass:: PropLCID
+    :show-inheritance:
+    :members:
+
+
+Link access
+-----------
+
+.. autoclass:: PropLAID
+    :show-inheritance:
+    :members:
+
+
+Group creation
+--------------
+
+.. autoclass:: PropGCID
+    :show-inheritance:
+    :members:
+
+
+Module constants
+----------------
+
+Predefined classes
+~~~~~~~~~~~~~~~~~~
+
+.. data:: DEFAULT
+.. data:: FILE_CREATE
+.. data:: FILE_ACCESS
+.. data:: DATASET_CREATE
+.. data:: DATASET_XFER
+.. data:: OBJECT_COPY
+.. data:: LINK_CREATE
+.. data:: LINK_ACCESS
+.. data:: GROUP_CREATE
+
diff --git a/docs_api/h5r.rst b/docs_api/h5r.rst
new file mode 100644
index 0000000..cc0290c
--- /dev/null
+++ b/docs_api/h5r.rst
@@ -0,0 +1,36 @@
+Module H5R
+==========
+
+.. automodule:: h5py.h5r
+
+Functional API
+--------------
+
+.. autofunction:: create
+.. autofunction:: dereference
+.. autofunction:: get_region
+.. autofunction:: get_obj_type
+.. autofunction:: get_name
+
+Reference classes
+-----------------
+
+.. autoclass:: Reference
+    :members:
+
+.. autoclass:: RegionReference
+    :show-inheritance:
+    :members:
+
+API constants
+-------------
+
+.. data:: OBJECT
+
+    Typecode for object references
+
+.. data:: DATASET_REGION
+
+    Typecode for dataset region references
+
+
diff --git a/docs_api/h5s.rst b/docs_api/h5s.rst
new file mode 100644
index 0000000..e72dd58
--- /dev/null
+++ b/docs_api/h5s.rst
@@ -0,0 +1,62 @@
+Module H5S
+==========
+
+.. automodule:: h5py.h5s
+
+Functional API
+--------------
+
+.. autofunction:: create
+.. autofunction:: create_simple
+.. autofunction:: decode
+
+Dataspace objects
+-----------------
+
+.. autoclass:: SpaceID
+    :show-inheritance:
+    :members:
+
+Module constants
+----------------
+
+.. data:: ALL
+
+    Accepted in place of an actual datapace; means "every point"
+
+.. data:: UNLIMITED
+    
+    Indicates an unlimited maximum dimension
+
+Dataspace class codes
+~~~~~~~~~~~~~~~~~~~~~
+
+.. data:: NO_CLASS
+.. data:: SCALAR
+.. data:: SIMPLE
+
+Selection codes
+~~~~~~~~~~~~~~~
+
+.. data:: SELECT_NOOP
+.. data:: SELECT_SET
+.. data:: SELECT_OR
+.. data:: SELECT_AND
+.. data:: SELECT_XOR
+.. data:: SELECT_NOTB
+.. data:: SELECT_NOTA
+.. data:: SELECT_APPEND
+.. data:: SELECT_PREPEND
+.. data:: SELECT_INVALID
+
+Existing selection type
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. data:: SEL_NONE
+.. data:: SEL_POINTS
+.. data:: SEL_HYPERSLABS
+.. data:: SEL_ALL
+
+
+
+
diff --git a/docs_api/h5t.rst b/docs_api/h5t.rst
new file mode 100644
index 0000000..d04fb8b
--- /dev/null
+++ b/docs_api/h5t.rst
@@ -0,0 +1,234 @@
+Module H5T
+==========
+
+.. automodule:: h5py.h5t
+
+Functions specific to h5py
+--------------------------
+
+.. autofunction:: py_create
+.. autofunction:: special_dtype
+.. autofunction:: check_dtype
+
+Functional API
+--------------
+.. autofunction:: create
+.. autofunction:: open
+.. autofunction:: array_create
+.. autofunction:: enum_create
+.. autofunction:: vlen_create
+.. autofunction:: decode
+.. autofunction:: convert
+.. autofunction:: find
+
+Type classes
+------------
+
+.. autoclass:: TypeID
+    :members:
+
+Atomic classes
+~~~~~~~~~~~~~~
+
+Atomic types are integers and floats.  Much of the functionality for each is
+inherited from the base class :class:`TypeAtomicID`.
+
+.. autoclass:: TypeAtomicID
+    :show-inheritance:
+    :members:
+    
+.. autoclass:: TypeIntegerID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeFloatID
+    :show-inheritance:
+    :members:
+
+Strings
+~~~~~~~
+
+.. autoclass:: TypeStringID
+    :show-inheritance:
+    :members:
+
+Compound Types
+~~~~~~~~~~~~~~
+
+Traditional compound type (like NumPy record type) and enumerated types share
+a base class, :class:`TypeCompositeID`.
+
+.. autoclass:: TypeCompositeID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeCompoundID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeEnumID
+    :show-inheritance:
+    :members:
+
+Other types
+~~~~~~~~~~~
+
+.. autoclass:: TypeArrayID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeOpaqueID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeVlenID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeBitfieldID
+    :show-inheritance:
+    :members:
+
+.. autoclass:: TypeReferenceID
+    :show-inheritance:
+    :members:
+
+Predefined Datatypes
+--------------------
+
+These locked types are pre-allocated by the library.
+
+Floating-point
+~~~~~~~~~~~~~~
+
+.. data:: IEEE_F32LE
+.. data:: IEEE_F32BE
+.. data:: IEEE_F64LE
+.. data:: IEEE_F64BE
+
+Integer types
+~~~~~~~~~~~~~
+
+.. data:: STD_I8LE
+.. data:: STD_I16LE
+.. data:: STD_I32LE
+.. data:: STD_I64LE
+
+.. data:: STD_I8BE
+.. data:: STD_I16BE
+.. data:: STD_I32BE
+.. data:: STD_I64BE
+
+.. data:: STD_U8LE
+.. data:: STD_U16LE
+.. data:: STD_U32LE
+.. data:: STD_U64LE
+
+.. data:: STD_U8BE
+.. data:: STD_U16BE
+.. data:: STD_U32BE
+.. data:: STD_U64BE
+
+.. data:: NATIVE_INT8
+.. data:: NATIVE_UINT8
+.. data:: NATIVE_INT16
+.. data:: NATIVE_UINT16
+.. data:: NATIVE_INT32
+.. data:: NATIVE_UINT32
+.. data:: NATIVE_INT64
+.. data:: NATIVE_UINT64
+.. data:: NATIVE_FLOAT
+.. data:: NATIVE_DOUBLE 
+
+Reference types
+~~~~~~~~~~~~~~~
+
+.. data:: STD_REF_OBJ
+.. data:: STD_REF_DSETREG
+
+String types
+~~~~~~~~~~~~
+
+.. data:: C_S1
+
+    Null-terminated fixed-length string
+
+.. data:: FORTRAN_S1
+
+    Zero-padded fixed-length string
+    
+.. data:: VARIABLE
+
+    Variable-length string
+
+Python object type
+~~~~~~~~~~~~~~~~~~
+
+.. data:: PYTHON_OBJECT
+
+Module constants
+----------------
+
+Datatype class codes
+~~~~~~~~~~~~~~~~~~~~
+
+.. data:: NO_CLASS
+.. data:: INTEGER
+.. data:: FLOAT
+.. data:: TIME
+.. data:: STRING
+.. data:: BITFIELD
+.. data:: OPAQUE
+.. data:: COMPOUND
+.. data:: REFERENCE
+.. data:: ENUM
+.. data:: VLEN
+.. data:: ARRAY
+
+API Constants
+~~~~~~~~~~~~~
+
+.. data:: SGN_NONE
+.. data:: SGN_2
+
+.. data:: ORDER_LE
+.. data:: ORDER_BE
+.. data:: ORDER_VAX
+.. data:: ORDER_NONE
+.. data:: ORDER_NATIVE
+
+.. data:: DIR_DEFAULT
+.. data:: DIR_ASCEND
+.. data:: DIR_DESCEND
+
+.. data:: STR_NULLTERM
+.. data:: STR_NULLPAD
+.. data:: STR_SPACEPAD
+
+.. data:: NORM_IMPLIED
+.. data:: NORM_MSBSET
+.. data:: NORM_NONE
+
+.. data:: CSET_ASCII
+.. DATA:: CSET_UTF8
+
+.. data:: PAD_ZERO
+.. data:: PAD_ONE
+.. data:: PAD_BACKGROUND
+
+.. data:: BKG_NO
+.. data:: BKG_TEMP
+.. data:: BKG_YES
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs_api/h5z.rst b/docs_api/h5z.rst
new file mode 100644
index 0000000..22d85b6
--- /dev/null
+++ b/docs_api/h5z.rst
@@ -0,0 +1,65 @@
+Module H5Z
+==========
+
+.. automodule:: h5py.h5z
+    :members:
+
+Module constants
+----------------
+
+.. _ref.h5z.FILTER:
+
+Predefined filters
+~~~~~~~~~~~~~~~~~~
+
+.. data:: FILTER_NONE
+.. data:: FILTER_ALL
+.. data:: FILTER_DEFLATE
+.. data:: FILTER_SHUFFLE
+.. data:: FILTER_FLETCHER32
+.. data:: FILTER_SZIP
+.. data:: FILTER_SCALEOFFSET
+.. data:: FILTER_LZF
+
+.. _ref.h5z.FLAG:
+
+Filter flags
+~~~~~~~~~~~~
+
+.. data:: FLAG_DEFMASK
+.. data:: FLAG_MANDATORY
+.. data:: FLAG_OPTIONAL
+.. data:: FLAG_INVMASK
+.. data:: FLAG_REVERSE
+.. data:: FLAG_SKIP_EDC
+
+.. _ref.h5z.SZIP:
+
+SZIP-specific options
+~~~~~~~~~~~~~~~~~~~~~
+
+.. data:: SZIP_ALLOW_K13_OPTION_MASK
+.. data:: SZIP_CHIP_OPTION_MASK
+.. data:: SZIP_EC_OPTION_MASK
+.. data:: SZIP_NN_OPTION_MASK
+.. data:: SZIP_MAX_PIXELS_PER_BLOCK
+
+Scale/offset-specific options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. data:: SO_FLOAT_DSCALE
+.. data:: SO_FLOAT_ESCALE
+.. data:: SO_INT
+.. data:: SO_INT_MINBITS_DEFAULT
+
+Other flags
+~~~~~~~~~~~
+
+.. data:: FILTER_CONFIG_ENCODE_ENABLED
+.. data:: FILTER_CONFIG_DECODE_ENABLED
+
+.. data:: DISABLE_EDC
+.. data:: ENABLE_EDC
+.. data:: NO_EDC
+
+
diff --git a/docs_api/index.rst b/docs_api/index.rst
new file mode 100644
index 0000000..d5b6356
--- /dev/null
+++ b/docs_api/index.rst
@@ -0,0 +1,41 @@
+
+Low-Level API Reference
+=======================
+
+
+This documentation contains the auto-generated API information for the
+HDF5 for Python "low-level" interface, a collection of Cython modules
+which form the interface to the HDF5 C library.  It's hosted separately from
+our main documentation as it requires autodoc.
+
+These docs are updated less frequently than the spiffy ReadTheDocs-hosted
+main documentation; this means roughly once per minor (X.Y) release.
+
+This may not be what you're looking for!
+----------------------------------------
+
+**The main docs for h5py are at** http://docs.h5py.org.  **These are
+the docs specifically for the h5py low-level interface.**
+
+Contents
+--------
+
+.. toctree::
+    :maxdepth: 2
+
+    objects
+    h5
+    h5a
+    h5ac
+    h5d
+    h5f
+    h5fd
+    h5g
+    h5i
+    h5l
+    h5o
+    h5p
+    h5r
+    h5s
+    h5t
+    h5z
\ No newline at end of file
diff --git a/docs_api/objects.rst b/docs_api/objects.rst
new file mode 100644
index 0000000..0094597
--- /dev/null
+++ b/docs_api/objects.rst
@@ -0,0 +1,6 @@
+Base object classes
+===================
+
+.. automodule:: h5py._objects
+
+.. autoclass:: ObjectID
diff --git a/examples/swmr_inotify_example.py b/examples/swmr_inotify_example.py
new file mode 100644
index 0000000..f328f7a
--- /dev/null
+++ b/examples/swmr_inotify_example.py
@@ -0,0 +1,85 @@
+
+"""
+    Demonstrate the use of h5py in SWMR mode to monitor the growth of a dataset
+    on nofication of file modifications.
+    
+    This demo uses pyinotify as a wrapper of Linux inotify.
+    https://pypi.python.org/pypi/pyinotify
+    
+    Usage:
+            swmr_inotify_example.py [FILENAME [DATASETNAME]]
+            
+              FILENAME:    name of file to monitor. Default: swmr.h5
+              DATASETNAME: name of dataset to monitor in DATAFILE. Default: data
+            
+    This script will open the file in SWMR mode and monitor the shape of the
+    dataset on every write event (from inotify). If another application is 
+    concurrently writing data to the file, the writer must have have switched 
+    the file into SWMR mode before this script can open the file.
+"""
+import asyncore
+import pyinotify
+import sys
+import h5py
+import logging
+
+#assert h5py.version.hdf5_version_tuple >= (1,9,178), "SWMR requires HDF5 version >= 1.9.178"
+
+class EventHandler(pyinotify.ProcessEvent):
+
+    def monitor_dataset(self, filename, datasetname):
+        logging.info("Opening file %s", filename)
+        self.f = h5py.File(filename, 'r', libver='latest', swmr=True)
+        logging.debug("Looking up dataset %s"%datasetname)
+        self.dset = self.f[datasetname]
+
+        self.get_dset_shape()
+        
+    def get_dset_shape(self):
+        logging.debug("Refreshing dataset")
+        self.dset.refresh()
+        
+        logging.debug("Getting shape")
+        shape = self.dset.shape
+        logging.info("Read data shape: %s"%str(shape))
+        return shape        
+        
+    def read_dataset(self, latest):
+        logging.info("Reading out dataset [%d]"%latest)
+        self.dset[latest:]
+                
+    def process_IN_MODIFY(self, event):
+        logging.debug("File modified!")
+        shape = self.get_dset_shape()
+        self.read_dataset(shape[0])
+                    
+    def process_IN_CLOSE_WRITE(self, event):
+        logging.info("File writer closed file")
+        self.get_dset_shape()
+        logging.debug("Good bye!")
+        sys.exit(0)
+
+
+if __name__ == "__main__":
+    logging.basicConfig(format='%(asctime)s  %(levelname)s\t%(message)s',level=logging.INFO)
+    
+    file_name = "swmr.h5"
+    if len(sys.argv) > 1:
+        file_name = sys.argv[1]
+    dataset_name = "data"
+    if len(sys.argv) > 2:
+        dataset_name = sys.argv[2]
+
+
+    wm = pyinotify.WatchManager()  # Watch Manager
+    mask = pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE 
+    evh = EventHandler()
+    evh.monitor_dataset( file_name, dataset_name )
+
+    notifier = pyinotify.AsyncNotifier(wm, evh)
+    wdd = wm.add_watch(file_name, mask, rec=False)
+
+    # Sit in this loop() until the file writer closes the file
+    # or the user hits ctrl-c
+    asyncore.loop()
+
diff --git a/examples/swmr_multiprocess.py b/examples/swmr_multiprocess.py
new file mode 100644
index 0000000..84eb1e8
--- /dev/null
+++ b/examples/swmr_multiprocess.py
@@ -0,0 +1,116 @@
+"""
+    Demonstrate the use of h5py in SWMR mode to write to a dataset (appending) 
+    from one process while monitoring the growing dataset from another process.
+    
+    Usage:
+            swmr_multiprocess.py [FILENAME [DATASETNAME]]
+            
+              FILENAME:    name of file to monitor. Default: swmrmp.h5
+              DATASETNAME: name of dataset to monitor in DATAFILE. Default: data
+            
+    This script will start up two processes: a writer and a reader. The writer
+    will open/create the file (FILENAME) in SWMR mode, create a dataset and start
+    appending data to it. After each append the dataset is flushed and an event
+    sent to the reader process. Meanwhile the reader process will wait for events 
+    from the writer and when triggered it will refresh the dataset and read the 
+    current shape of it.
+"""
+
+import sys, time
+import h5py
+import numpy as np
+import logging
+from multiprocessing import Process, Event
+
+class SwmrReader(Process):
+    def __init__(self, event, fname, dsetname, timeout = 2.0):
+        super(SwmrReader, self).__init__()
+        self._event = event
+        self._fname = fname
+        self._dsetname = dsetname
+        self._timeout = timeout
+        
+    def run(self):
+        self.log = logging.getLogger('reader')
+        self.log.info("Waiting for initial event")
+        assert self._event.wait( self._timeout )
+        self._event.clear()
+        
+        self.log.info("Opening file %s", self._fname)
+        f = h5py.File(self._fname, 'r', libver='latest', swmr=True)
+        assert f.swmr_mode
+        dset = f[self._dsetname]
+        try:
+            # monitor and read loop
+            while self._event.wait( self._timeout ):
+                self._event.clear()
+                self.log.debug("Refreshing dataset")
+                dset.refresh()
+
+                shape = dset.shape
+                self.log.info("Read dset shape: %s"%str(shape))
+        finally:
+            f.close()
+
+class SwmrWriter(Process):
+    def __init__(self, event, fname, dsetname):
+        super(SwmrWriter, self).__init__()
+        self._event = event
+        self._fname = fname
+        self._dsetname = dsetname
+        
+    def run(self):
+        self.log = logging.getLogger('writer')
+        self.log.info("Creating file %s", self._fname)
+        f = h5py.File(self._fname, 'w', libver='latest')
+        try:
+            arr = np.array([1,2,3,4])
+            dset = f.create_dataset(self._dsetname, chunks=(2,), maxshape=(None,), data=arr)
+            assert not f.swmr_mode
+
+            self.log.info("SWMR mode")
+            f.swmr_mode = True
+            assert f.swmr_mode
+            self.log.debug("Sending initial event")
+            self._event.set()        
+
+            # Write loop
+            for i in range(5):
+                new_shape = ((i+1) * len(arr), )
+                self.log.info("Resizing dset shape: %s"%str(new_shape))
+                dset.resize( new_shape )
+                self.log.debug("Writing data")
+                dset[i*len(arr):] = arr
+                #dset.write_direct( arr, np.s_[:], np.s_[i*len(arr):] )
+                self.log.debug("Flushing data")
+                dset.flush()
+                self.log.info("Sending event")
+                self._event.set()        
+        finally:
+            f.close()
+
+
+if __name__ == "__main__":
+    logging.basicConfig(format='%(levelname)10s  %(asctime)s  %(name)10s  %(message)s',level=logging.INFO)
+    fname = 'swmrmp.h5'
+    dsetname = 'data'
+    if len(sys.argv) > 1:
+        fname = sys.argv[1]
+    if len(sys.argv) > 2:
+        dsetname = sys.argv[2]
+        
+    event = Event()
+    reader = SwmrReader(event, fname, dsetname)
+    writer = SwmrWriter(event, fname, dsetname)
+    
+    logging.info("Starting reader")
+    reader.start()
+    logging.info("Starting reader")
+    writer.start()
+    
+    logging.info("Waiting for writer to finish")
+    writer.join()
+    logging.info("Waiting for reader to finish")
+    reader.join()
+
+
diff --git a/h5py.egg-info/PKG-INFO b/h5py.egg-info/PKG-INFO
deleted file mode 100644
index 0ed11d1..0000000
--- a/h5py.egg-info/PKG-INFO
+++ /dev/null
@@ -1,38 +0,0 @@
-Metadata-Version: 1.1
-Name: h5py
-Version: 2.4.0
-Summary: Read and write HDF5 files from Python
-Home-page: http://www.h5py.org
-Author: Andrew Collette
-Author-email: andrew dot collette at gmail dot com
-License: UNKNOWN
-Download-URL: https://pypi.python.org/pypi/h5py
-Description: 
-        The h5py package provides both a high- and low-level interface to the HDF5
-        library from Python. The low-level interface is intended to be a complete
-        wrapping of the HDF5 API, while the high-level component supports  access to
-        HDF5 files, datasets and groups using established Python and NumPy concepts.
-        
-        A strong emphasis on automatic conversion between Python (Numpy) datatypes and
-        data structures and their HDF5 equivalents vastly simplifies the process of
-        reading and writing data from Python.
-        
-        Supports HDF5 versions 1.8.4 and higher.  On Windows, HDF5 is included with
-        the installer.
-        
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: Information Technology
-Classifier: Intended Audience :: Science/Research
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Programming Language :: Python
-Classifier: Topic :: Scientific/Engineering
-Classifier: Topic :: Database
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Operating System :: Unix
-Classifier: Operating System :: POSIX :: Linux
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Operating System :: Microsoft :: Windows
-Requires: numpy (>=1.6.1)
-Requires: Cython (>=0.17)
diff --git a/h5py.egg-info/SOURCES.txt b/h5py.egg-info/SOURCES.txt
deleted file mode 100644
index 82c1015..0000000
--- a/h5py.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,121 +0,0 @@
-ANN.rst
-MANIFEST.in
-README.rst
-api_gen.py
-setup.py
-setup_build.py
-setup_configure.py
-examples/multiprocessing_example.py
-examples/threading_example.py
-h5py/__init__.py
-h5py/_conv.pxd
-h5py/_conv.pyx
-h5py/_errors.pxd
-h5py/_errors.pyx
-h5py/_hdf5.pxd
-h5py/_locks.pxi
-h5py/_objects.pxd
-h5py/_objects.pyx
-h5py/_proxy.pxd
-h5py/_proxy.pyx
-h5py/api_compat.h
-h5py/api_functions.txt
-h5py/api_types_ext.pxd
-h5py/api_types_hdf5.pxd
-h5py/h5.pxd
-h5py/h5.pyx
-h5py/h5a.pxd
-h5py/h5a.pyx
-h5py/h5ac.pxd
-h5py/h5ac.pyx
-h5py/h5d.pxd
-h5py/h5d.pyx
-h5py/h5ds.pxd
-h5py/h5ds.pyx
-h5py/h5f.pxd
-h5py/h5f.pyx
-h5py/h5fd.pxd
-h5py/h5fd.pyx
-h5py/h5g.pxd
-h5py/h5g.pyx
-h5py/h5i.pxd
-h5py/h5i.pyx
-h5py/h5l.pxd
-h5py/h5l.pyx
-h5py/h5o.pxd
-h5py/h5o.pyx
-h5py/h5p.pxd
-h5py/h5p.pyx
-h5py/h5r.pxd
-h5py/h5r.pyx
-h5py/h5s.pxd
-h5py/h5s.pyx
-h5py/h5t.pxd
-h5py/h5t.pyx
-h5py/h5z.pxd
-h5py/h5z.pyx
-h5py/highlevel.py
-h5py/ipy_completer.py
-h5py/numpy.pxd
-h5py/utils.pxd
-h5py/utils.pyx
-h5py/version.py
-h5py.egg-info/PKG-INFO
-h5py.egg-info/SOURCES.txt
-h5py.egg-info/dependency_links.txt
-h5py.egg-info/requires.txt
-h5py.egg-info/top_level.txt
-h5py/_hl/__init__.py
-h5py/_hl/attrs.py
-h5py/_hl/base.py
-h5py/_hl/dataset.py
-h5py/_hl/datatype.py
-h5py/_hl/dims.py
-h5py/_hl/files.py
-h5py/_hl/filters.py
-h5py/_hl/group.py
-h5py/_hl/selections.py
-h5py/_hl/selections2.py
-h5py/tests/__init__.py
-h5py/tests/common.py
-h5py/tests/hl/__init__.py
-h5py/tests/hl/test_dataset_getitem.py
-h5py/tests/hl/test_dims_dimensionproxy.py
-h5py/tests/hl/test_file.py
-h5py/tests/old/__init__.py
-h5py/tests/old/common.py
-h5py/tests/old/test_attrs.py
-h5py/tests/old/test_attrs_data.py
-h5py/tests/old/test_base.py
-h5py/tests/old/test_dataset.py
-h5py/tests/old/test_datatype.py
-h5py/tests/old/test_dimension_scales.py
-h5py/tests/old/test_file.py
-h5py/tests/old/test_group.py
-h5py/tests/old/test_h5.py
-h5py/tests/old/test_h5f.py
-h5py/tests/old/test_h5p.py
-h5py/tests/old/test_h5t.py
-h5py/tests/old/test_objects.py
-h5py/tests/old/test_selections.py
-h5py/tests/old/test_slicing.py
-licenses/hdf5.txt
-licenses/license.txt
-licenses/pytables.txt
-licenses/python.txt
-licenses/stdint.txt
-lzf/.DS_Store
-lzf/LICENSE.txt
-lzf/README.txt
-lzf/example.c
-lzf/lzf_filter.c
-lzf/lzf_filter.h
-lzf/lzf/lzf.h
-lzf/lzf/lzfP.h
-lzf/lzf/lzf_c.c
-lzf/lzf/lzf_d.c
-windows/README.txt
-windows/cacheinit.cmake
-windows/pavement.py
-windows/stdint.h
-windows/unistd.h
\ No newline at end of file
diff --git a/h5py.egg-info/dependency_links.txt b/h5py.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/h5py.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/h5py.egg-info/requires.txt b/h5py.egg-info/requires.txt
deleted file mode 100644
index 95d0ae4..0000000
--- a/h5py.egg-info/requires.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-numpy>=1.6.1
-Cython>=0.17
diff --git a/h5py.egg-info/top_level.txt b/h5py.egg-info/top_level.txt
deleted file mode 100644
index c5a4eac..0000000
--- a/h5py.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-h5py
diff --git a/h5py/__init__.py b/h5py/__init__.py
index da1e8d0..d7c8250 100644
--- a/h5py/__init__.py
+++ b/h5py/__init__.py
@@ -7,30 +7,40 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
-from h5py import _errors
+from __future__ import absolute_import
+
+try:
+    from . import _errors
+except ImportError:
+    import os.path as _op
+    if _op.exists(_op.join(_op.dirname(__file__), '..', 'setup.py')):
+        raise ImportError("You cannot import h5py from inside the install directory.\nChange to another directory first.")
+    else:
+        raise
+    
 _errors.silence_errors()
 
-from h5py import _conv
+from . import _conv
 _conv.register_converters()
 
-from h5py import h5a, h5d, h5ds, h5f, h5fd, h5g, h5r, h5s, h5t, h5p, h5z
+from . import h5a, h5d, h5ds, h5f, h5fd, h5g, h5r, h5s, h5t, h5p, h5z
 
 h5s.NULL = h5s._NULL  # NULL is a reserved name at the Cython layer
 h5z._register_lzf()
 
-from h5py.highlevel import *
+from .highlevel import *
 
-from h5py.h5 import get_config
-from h5py.h5r import Reference, RegionReference
-from h5py.h5t import special_dtype, check_dtype
+from .h5 import get_config
+from .h5r import Reference, RegionReference
+from .h5t import special_dtype, check_dtype
 
 # Deprecated functions
-from h5py.h5t import py_new_vlen as new_vlen
-from h5py.h5t import py_get_vlen as get_vlen
-from h5py.h5t import py_new_enum as new_enum
-from h5py.h5t import py_get_enum as get_enum
+from .h5t import py_new_vlen as new_vlen
+from .h5t import py_get_vlen as get_vlen
+from .h5t import py_new_enum as new_enum
+from .h5t import py_get_enum as get_enum
 
-from h5py import version
+from . import version
 
 from .tests import run_tests
 
diff --git a/h5py/_conv.pyx b/h5py/_conv.pyx
index 264bf73..b60bcc7 100644
--- a/h5py/_conv.pyx
+++ b/h5py/_conv.pyx
@@ -466,9 +466,6 @@ cdef herr_t pyref2regref(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
 cdef struct conv_enum_t:
     size_t src_size
     size_t dst_size
-    hid_t supertype
-    int identical
-
 
 cdef int enum_int_converter_init(hid_t src, hid_t dst,
                                  H5T_cdata_t *cdata, int forward) except -1 with gil:
@@ -495,52 +492,59 @@ cdef int enum_int_converter_conv(hid_t src, hid_t dst, H5T_cdata_t *cdata,
     cdef int i
     cdef char* cbuf = NULL
     cdef char* buf = <char*>buf_i
-
+    cdef int identical
+    cdef hid_t supertype = -1
+    
     info = <conv_enum_t*>cdata[0].priv
     
-    if forward:
-        info[0].supertype = H5Tget_super(src)
-        info[0].identical = H5Tequal(info[0].supertype, dst)
-    else:
-        info[0].supertype = H5Tget_super(dst)
-        info[0].identical = H5Tequal(info[0].supertype, src)
-   
-    # Short-circuit success
-    if info[0].identical:
-        return 0
-
-    if buf_stride == 0:
-        # Contiguous case: call H5Tconvert directly
+    try:
         if forward:
-            H5Tconvert(info[0].supertype, dst, nl, buf, NULL, dxpl)
+            supertype = H5Tget_super(src)
+            identical = H5Tequal(supertype, dst)
         else:
-            H5Tconvert(src, info[0].supertype, nl, buf, NULL, dxpl)
-    else:
-        # Non-contiguous: gather, convert and then scatter
-        if info[0].src_size > info[0].dst_size:
-            nalloc = info[0].src_size*nl
+            supertype = H5Tget_super(dst)
+            identical = H5Tequal(supertype, src)
+   
+        # Short-circuit success
+        if identical:
+            return 0
+
+        if buf_stride == 0:
+            # Contiguous case: call H5Tconvert directly
+            if forward:
+                H5Tconvert(supertype, dst, nl, buf, NULL, dxpl)
+            else:
+                H5Tconvert(src, supertype, nl, buf, NULL, dxpl)
         else:
-            nalloc = info[0].dst_size*nl
+            # Non-contiguous: gather, convert and then scatter
+            if info[0].src_size > info[0].dst_size:
+                nalloc = info[0].src_size*nl
+            else:
+                nalloc = info[0].dst_size*nl
+
+            cbuf = <char*>malloc(nalloc)
+            if cbuf == NULL:
+                raise MemoryError()
 
-        cbuf = <char*>malloc(nalloc)
-        if cbuf == NULL:
-            raise MemoryError()
-        try:
             for i from 0<=i<nl:
                 memcpy(cbuf + (i*info[0].src_size), buf + (i*buf_stride),
                         info[0].src_size)
 
             if forward:
-                H5Tconvert(info[0].supertype, dst, nl, cbuf, NULL, dxpl)
+                H5Tconvert(supertype, dst, nl, cbuf, NULL, dxpl)
             else:
-                H5Tconvert(src, info[0].supertype, nl, cbuf, NULL, dxpl)
+                H5Tconvert(src, supertype, nl, cbuf, NULL, dxpl)
 
             for i from 0<=i<nl:
                 memcpy(buf + (i*buf_stride), cbuf + (i*info[0].dst_size),
                         info[0].dst_size)
-        finally:
-            free(cbuf)
-            cbuf = NULL
+
+    finally:
+        free(cbuf)
+        cbuf = NULL
+        if supertype > 0:
+            H5Tclose(supertype)
+
     return 0
 
 
diff --git a/h5py/_errors.pyx b/h5py/_errors.pyx
index d6ca8c0..b5c6aea 100644
--- a/h5py/_errors.pyx
+++ b/h5py/_errors.pyx
@@ -88,8 +88,8 @@ cdef herr_t walk_cb(int n, H5E_error_t *desc, void *e):
 cdef int set_exception() except -1:
 
     cdef err_data_t err
-    cdef char *desc = NULL          # Note: HDF5 forbids freeing these
-    cdef char *desc_bottom = NULL
+    cdef const char *desc = NULL          # Note: HDF5 forbids freeing these
+    cdef const char *desc_bottom = NULL
 
     # First, extract the major & minor error codes from the top of the
     # stack, along with the top-level error description
@@ -139,7 +139,7 @@ def silence_errors():
 
 def unsilence_errors():
     """ Re-enable HDF5's automatic error printing in this thread """
-    if H5Eset_auto(H5Eprint, stderr) < 0:
+    if H5Eset_auto(<H5E_auto_t> H5Eprint, stderr) < 0:
         raise RuntimeError("Failed to enable automatic error printing")
 
 cdef err_cookie set_error_handler(err_cookie handler):
diff --git a/h5py/_hdf5.pxd b/h5py/_hdf5.pxd
deleted file mode 100644
index deeb5ba..0000000
--- a/h5py/_hdf5.pxd
+++ /dev/null
@@ -1,346 +0,0 @@
-include "config.pxi"
-from api_types_hdf5 cimport *
-from api_types_ext cimport *
-
-cdef extern from "hdf5.h":
-  herr_t H5open() except *
-  herr_t H5close() except *
-  herr_t H5get_libversion(unsigned *majnum, unsigned *minnum, unsigned *relnum) except *
-  hid_t H5Acreate(hid_t loc_id, char *name, hid_t type_id, hid_t space_id, hid_t create_plist) except *
-  hid_t H5Aopen_idx(hid_t loc_id, unsigned int idx) except *
-  hid_t H5Aopen_name(hid_t loc_id, char *name) except *
-  herr_t H5Aclose(hid_t attr_id) except *
-  herr_t H5Adelete(hid_t loc_id, char *name) except *
-  herr_t H5Aread(hid_t attr_id, hid_t mem_type_id, void *buf) except *
-  herr_t H5Awrite(hid_t attr_id, hid_t mem_type_id, void *buf ) except *
-  int H5Aget_num_attrs(hid_t loc_id) except *
-  ssize_t H5Aget_name(hid_t attr_id, size_t buf_size, char *buf) except *
-  hid_t H5Aget_space(hid_t attr_id) except *
-  hid_t H5Aget_type(hid_t attr_id) except *
-  herr_t H5Aiterate(hid_t loc_id, unsigned * idx, H5A_operator_t op, void* op_data) except *
-  herr_t H5Adelete_by_name(hid_t loc_id, char *obj_name, char *attr_name, hid_t lapl_id) except *
-  herr_t H5Adelete_by_idx(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t lapl_id) except *
-  hid_t H5Acreate_by_name(hid_t loc_id, char *obj_name, char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t lapl_id) except *
-  herr_t H5Aopen(hid_t obj_id, char *attr_name, hid_t aapl_id) except *
-  herr_t H5Aopen_by_name( hid_t loc_id, char *obj_name, char *attr_name, hid_t aapl_id, hid_t lapl_id) except *
-  herr_t H5Aopen_by_idx(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id) except *
-  htri_t H5Aexists_by_name( hid_t loc_id, char *obj_name, char *attr_name, hid_t lapl_id) except *
-  htri_t H5Aexists(hid_t obj_id, char *attr_name) except *
-  herr_t H5Arename(hid_t loc_id, char *old_attr_name, char *new_attr_name) except *
-  herr_t H5Arename_by_name(hid_t loc_id, char *obj_name, char *old_attr_name, char *new_attr_name, hid_t lapl_id) except *
-  herr_t H5Aget_info( hid_t attr_id, H5A_info_t *ainfo) except *
-  herr_t H5Aget_info_by_name(hid_t loc_id, char *obj_name, char *attr_name, H5A_info_t *ainfo, hid_t lapl_id) except *
-  herr_t H5Aget_info_by_idx(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, H5A_info_t *ainfo, hid_t lapl_id) except *
-  herr_t H5Aiterate2(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, hsize_t *n, H5A_operator2_t op, void *op_data) except *
-  hsize_t H5Aget_storage_size(hid_t attr_id) except *
-  hid_t H5Dcreate2(hid_t loc_id, char *name, hid_t type_id, hid_t space_id, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id) except *
-  hid_t H5Dcreate_anon(hid_t file_id, hid_t type_id, hid_t space_id, hid_t plist_id, hid_t dapl_id) except *
-  hid_t H5Dopen(hid_t file_id, char *name) except *
-  hid_t H5Dopen2(hid_t loc_id, char *name, hid_t dapl_id ) except *
-  herr_t H5Dclose(hid_t dset_id) except *
-  hid_t H5Dget_space(hid_t dset_id) except *
-  herr_t H5Dget_space_status(hid_t dset_id, H5D_space_status_t *status) except *
-  hid_t H5Dget_type(hid_t dset_id) except *
-  hid_t H5Dget_create_plist(hid_t dataset_id) except *
-  hid_t H5Dget_access_plist(hid_t dataset_id) except *
-  haddr_t H5Dget_offset(hid_t dset_id) except *
-  hsize_t H5Dget_storage_size(hid_t dset_id) except *
-  herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf) except *
-  herr_t H5Dwrite(hid_t dset_id, hid_t mem_type, hid_t mem_space, hid_t file_space, hid_t xfer_plist, void* buf) except *
-  herr_t H5Dextend(hid_t dataset_id, hsize_t *size) except *
-  herr_t H5Dfill(void *fill, hid_t fill_type_id, void *buf,  hid_t buf_type_id, hid_t space_id ) except *
-  herr_t H5Dvlen_get_buf_size(hid_t dset_id, hid_t type_id, hid_t space_id, hsize_t *size) except *
-  herr_t H5Dvlen_reclaim(hid_t type_id, hid_t space_id,  hid_t plist, void *buf) except *
-  herr_t H5Diterate(void *buf, hid_t type_id, hid_t space_id,  H5D_operator_t op, void* operator_data) except *
-  herr_t H5Dset_extent(hid_t dset_id, hsize_t* size) except *
-  hid_t H5Fcreate(char *filename, unsigned int flags, hid_t create_plist, hid_t access_plist) except *
-  hid_t H5Fopen(char *name, unsigned flags, hid_t access_id) except *
-  herr_t H5Fclose(hid_t file_id) except *
-  htri_t H5Fis_hdf5(char *name) except *
-  herr_t H5Fflush(hid_t object_id, H5F_scope_t scope) except *
-  hid_t H5Freopen(hid_t file_id) except *
-  herr_t H5Fmount(hid_t loc_id, char *name, hid_t child_id, hid_t plist_id) except *
-  herr_t H5Funmount(hid_t loc_id, char *name) except *
-  herr_t H5Fget_filesize(hid_t file_id, hsize_t *size) except *
-  hid_t H5Fget_create_plist(hid_t file_id ) except *
-  hid_t H5Fget_access_plist(hid_t file_id) except *
-  hssize_t H5Fget_freespace(hid_t file_id) except *
-  ssize_t H5Fget_name(hid_t obj_id, char *name, size_t size) except *
-  int H5Fget_obj_count(hid_t file_id, unsigned int types) except *
-  int H5Fget_obj_ids(hid_t file_id, unsigned int types, int max_objs, hid_t *obj_id_list) except *
-  herr_t H5Fget_vfd_handle(hid_t file_id, hid_t fapl_id, void **file_handle) except *
-  herr_t H5Fget_intent(hid_t file_id, unsigned int *intent) except *
-  herr_t H5Fget_mdc_config(hid_t file_id, H5AC_cache_config_t *config_ptr) except *
-  herr_t H5Fget_mdc_hit_rate(hid_t file_id, double *hit_rate_ptr) except *
-  herr_t H5Fget_mdc_size(hid_t file_id, size_t *max_size_ptr, size_t *min_clean_size_ptr, size_t *cur_size_ptr, int *cur_num_entries_ptr) except *
-  herr_t H5Freset_mdc_hit_rate_stats(hid_t file_id) except *
-  herr_t H5Fset_mdc_config(hid_t file_id, H5AC_cache_config_t *config_ptr) except *
-  IF HDF5_VERSION >= (1, 8, 9):
-      IF MPI:
-          herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag) except *
-  IF HDF5_VERSION >= (1, 8, 9):
-      IF MPI:
-          herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag) except *
-  hid_t H5Gcreate(hid_t loc_id, char *name, size_t size_hint) except *
-  hid_t H5Gopen(hid_t loc_id, char *name) except *
-  herr_t H5Gclose(hid_t group_id) except *
-  herr_t H5Glink2( hid_t curr_loc_id, char *current_name, H5G_link_t link_type, hid_t new_loc_id, char *new_name) except *
-  herr_t H5Gunlink(hid_t file_id, char *name) except *
-  herr_t H5Gmove2(hid_t src_loc_id, char *src_name, hid_t dst_loc_id, char *dst_name) except *
-  herr_t H5Gget_num_objs(hid_t loc_id, hsize_t*  num_obj) except *
-  int H5Gget_objname_by_idx(hid_t loc_id, hsize_t idx, char *name, size_t size) except *
-  int H5Gget_objtype_by_idx(hid_t loc_id, hsize_t idx) except *
-  herr_t H5Giterate(hid_t loc_id, char *name, int *idx, H5G_iterate_t op, void* data) except *
-  herr_t H5Gget_objinfo(hid_t loc_id, char* name, int follow_link, H5G_stat_t *statbuf) except *
-  herr_t H5Gget_linkval(hid_t loc_id, char *name, size_t size, char *value) except *
-  herr_t H5Gset_comment(hid_t loc_id, char *name, char *comment) except *
-  int H5Gget_comment(hid_t loc_id, char *name, size_t bufsize, char *comment) except *
-  hid_t H5Gcreate_anon( hid_t loc_id, hid_t gcpl_id, hid_t gapl_id) except *
-  hid_t H5Gcreate2(hid_t loc_id, char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id) except *
-  hid_t H5Gopen2( hid_t loc_id, char * name, hid_t gapl_id) except *
-  herr_t H5Gget_info( hid_t group_id, H5G_info_t *group_info) except *
-  herr_t H5Gget_info_by_name( hid_t loc_id, char *group_name, H5G_info_t *group_info, hid_t lapl_id) except *
-  hid_t H5Gget_create_plist(hid_t group_id) except *
-  H5I_type_t H5Iget_type(hid_t obj_id) except *
-  ssize_t H5Iget_name( hid_t obj_id, char *name, size_t size) except *
-  hid_t H5Iget_file_id(hid_t obj_id) except *
-  int H5Idec_ref(hid_t obj_id) except *
-  int H5Iget_ref(hid_t obj_id) except *
-  int H5Iinc_ref(hid_t obj_id) except *
-  htri_t H5Iis_valid( hid_t obj_id ) except *
-  herr_t H5Lmove(hid_t src_loc, char *src_name, hid_t dst_loc, char *dst_name, hid_t lcpl_id, hid_t lapl_id) except *
-  herr_t H5Lcopy(hid_t src_loc, char *src_name, hid_t dst_loc, char *dst_name, hid_t lcpl_id, hid_t lapl_id) except *
-  herr_t H5Lcreate_hard(hid_t cur_loc, char *cur_name, hid_t dst_loc, char *dst_name, hid_t lcpl_id, hid_t lapl_id) except *
-  herr_t H5Lcreate_soft(char *link_target, hid_t link_loc_id, char *link_name, hid_t lcpl_id, hid_t lapl_id) except *
-  herr_t H5Ldelete(hid_t loc_id, char *name, hid_t lapl_id) except *
-  herr_t H5Ldelete_by_idx(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t lapl_id) except *
-  herr_t H5Lget_val(hid_t loc_id, char *name, void *bufout, size_t size, hid_t lapl_id) except *
-  herr_t H5Lget_val_by_idx(hid_t loc_id, char *group_name,  H5_index_t idx_type, H5_iter_order_t order, hsize_t n, void *bufout, size_t size, hid_t lapl_id) except *
-  htri_t H5Lexists(hid_t loc_id, char *name, hid_t lapl_id) except *
-  herr_t H5Lget_info(hid_t loc_id, char *name, H5L_info_t *linfo, hid_t lapl_id) except *
-  herr_t H5Lget_info_by_idx(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, H5L_info_t *linfo, hid_t lapl_id) except *
-  ssize_t H5Lget_name_by_idx(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, char *name, size_t size, hid_t lapl_id) except *
-  herr_t H5Literate(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order, hsize_t *idx, H5L_iterate_t op, void *op_data) except *
-  herr_t H5Literate_by_name(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t *idx, H5L_iterate_t op, void *op_data, hid_t lapl_id) except *
-  herr_t H5Lvisit(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data) except *
-  herr_t H5Lvisit_by_name(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data, hid_t lapl_id) except *
-  herr_t H5Lunpack_elink_val(void *ext_linkval, size_t link_size, unsigned *flags, char **filename, char **obj_path) except *
-  herr_t H5Lcreate_external(char *file_name, char *obj_name, hid_t link_loc_id, char *link_name, hid_t lcpl_id, hid_t lapl_id) except *
-  hid_t H5Oopen(hid_t loc_id, char *name, hid_t lapl_id) except *
-  hid_t H5Oopen_by_addr(hid_t loc_id, haddr_t addr) except *
-  hid_t H5Oopen_by_idx(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t lapl_id) except *
-  herr_t H5Oget_info(hid_t loc_id, H5O_info_t *oinfo) except *
-  herr_t H5Oget_info_by_name(hid_t loc_id, char *name, H5O_info_t *oinfo, hid_t lapl_id) except *
-  herr_t H5Oget_info_by_idx(hid_t loc_id, char *group_name,  H5_index_t idx_type, H5_iter_order_t order, hsize_t n, H5O_info_t *oinfo, hid_t lapl_id) except *
-  herr_t H5Olink(hid_t obj_id, hid_t new_loc_id, char *new_name, hid_t lcpl_id, hid_t lapl_id) except *
-  herr_t H5Ocopy(hid_t src_loc_id, char *src_name, hid_t dst_loc_id,  char *dst_name, hid_t ocpypl_id, hid_t lcpl_id) except *
-  herr_t H5Oincr_refcount(hid_t object_id) except *
-  herr_t H5Odecr_refcount(hid_t object_id) except *
-  herr_t H5Oset_comment(hid_t obj_id, char *comment) except *
-  herr_t H5Oset_comment_by_name(hid_t loc_id, char *name,  char *comment, hid_t lapl_id) except *
-  ssize_t H5Oget_comment(hid_t obj_id, char *comment, size_t bufsize) except *
-  ssize_t H5Oget_comment_by_name(hid_t loc_id, char *name, char *comment, size_t bufsize, hid_t lapl_id) except *
-  herr_t H5Ovisit(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order,  H5O_iterate_t op, void *op_data) except *
-  herr_t H5Ovisit_by_name(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate_t op, void *op_data, hid_t lapl_id) except *
-  herr_t H5Oclose(hid_t object_id) except *
-  IF HDF5_VERSION >= (1, 8, 5):
-      htri_t H5Oexists_by_name(hid_t loc_id, char * name, hid_t lapl_id ) except *
-  hid_t H5Pcreate(hid_t plist_id) except *
-  hid_t H5Pcopy(hid_t plist_id) except *
-  int H5Pget_class(hid_t plist_id) except *
-  herr_t H5Pclose(hid_t plist_id) except *
-  htri_t H5Pequal( hid_t id1, hid_t id2 ) except *
-  herr_t H5Pclose_class(hid_t id) except *
-  herr_t H5Pget_version(hid_t plist, unsigned int *super_, unsigned int* freelist,  unsigned int *stab, unsigned int *shhdr) except *
-  herr_t H5Pset_userblock(hid_t plist, hsize_t size) except *
-  herr_t H5Pget_userblock(hid_t plist, hsize_t * size) except *
-  herr_t H5Pset_sizes(hid_t plist, size_t sizeof_addr, size_t sizeof_size) except *
-  herr_t H5Pget_sizes(hid_t plist, size_t *sizeof_addr, size_t *sizeof_size) except *
-  herr_t H5Pset_sym_k(hid_t plist, unsigned int ik, unsigned int lk) except *
-  herr_t H5Pget_sym_k(hid_t plist, unsigned int *ik, unsigned int *lk) except *
-  herr_t H5Pset_istore_k(hid_t plist, unsigned int ik) except *
-  herr_t H5Pget_istore_k(hid_t plist, unsigned int *ik) except *
-  herr_t H5Pset_fclose_degree(hid_t fapl_id, H5F_close_degree_t fc_degree) except *
-  herr_t H5Pget_fclose_degree(hid_t fapl_id, H5F_close_degree_t *fc_degree) except *
-  herr_t H5Pset_fapl_core( hid_t fapl_id, size_t increment, hbool_t backing_store) except *
-  herr_t H5Pget_fapl_core( hid_t fapl_id, size_t *increment, hbool_t *backing_store) except *
-  herr_t H5Pset_fapl_family( hid_t fapl_id,  hsize_t memb_size, hid_t memb_fapl_id ) except *
-  herr_t H5Pget_fapl_family( hid_t fapl_id, hsize_t *memb_size, hid_t *memb_fapl_id ) except *
-  herr_t H5Pset_family_offset( hid_t fapl_id, hsize_t offset) except *
-  herr_t H5Pget_family_offset( hid_t fapl_id, hsize_t *offset) except *
-  herr_t H5Pset_fapl_log(hid_t fapl_id, char *logfile, unsigned int flags, size_t buf_size) except *
-  herr_t H5Pset_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map, hid_t *memb_fapl, char **memb_name, haddr_t *memb_addr, hbool_t relax) except *
-  herr_t H5Pset_cache(hid_t plist_id, int mdc_nelmts, int rdcc_nelmts,  size_t rdcc_nbytes, double rdcc_w0) except *
-  herr_t H5Pget_cache(hid_t plist_id, int *mdc_nelmts, int *rdcc_nelmts, size_t *rdcc_nbytes, double *rdcc_w0) except *
-  herr_t H5Pset_fapl_sec2(hid_t fapl_id) except *
-  herr_t H5Pset_fapl_stdio(hid_t fapl_id) except *
-  hid_t H5Pget_driver(hid_t fapl_id) except *
-  herr_t H5Pget_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr) except *
-  herr_t H5Pset_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr) except *
-  herr_t H5Pset_layout(hid_t plist, int layout) except *
-  H5D_layout_t H5Pget_layout(hid_t plist) except *
-  herr_t H5Pset_chunk(hid_t plist, int ndims, hsize_t * dim) except *
-  int H5Pget_chunk(hid_t plist, int max_ndims, hsize_t * dims ) except *
-  herr_t H5Pset_deflate( hid_t plist, int level) except *
-  herr_t H5Pset_fill_value(hid_t plist_id, hid_t type_id, void *value ) except *
-  herr_t H5Pget_fill_value(hid_t plist_id, hid_t type_id, void *value ) except *
-  herr_t H5Pfill_value_defined(hid_t plist_id, H5D_fill_value_t *status ) except *
-  herr_t H5Pset_fill_time(hid_t plist_id, H5D_fill_time_t fill_time ) except *
-  herr_t H5Pget_fill_time(hid_t plist_id, H5D_fill_time_t *fill_time ) except *
-  herr_t H5Pset_alloc_time(hid_t plist_id, H5D_alloc_time_t alloc_time ) except *
-  herr_t H5Pget_alloc_time(hid_t plist_id, H5D_alloc_time_t *alloc_time ) except *
-  herr_t H5Pset_filter(hid_t plist, H5Z_filter_t filter, unsigned int flags, size_t cd_nelmts, unsigned int* cd_values ) except *
-  htri_t H5Pall_filters_avail(hid_t dcpl_id) except *
-  int H5Pget_nfilters(hid_t plist) except *
-  H5Z_filter_t H5Pget_filter(hid_t plist, unsigned int filter_number,   unsigned int *flags, size_t *cd_nelmts,  unsigned int* cd_values, size_t namelen, char* name ) except *
-  herr_t H5Pget_filter_by_id( hid_t plist_id, H5Z_filter_t filter,  unsigned int *flags, size_t *cd_nelmts,  unsigned int* cd_values, size_t namelen, char* name) except *
-  herr_t H5Pmodify_filter(hid_t plist, H5Z_filter_t filter, unsigned int flags, size_t cd_nelmts, unsigned int *cd_values) except *
-  herr_t H5Premove_filter(hid_t plist, H5Z_filter_t filter ) except *
-  herr_t H5Pset_fletcher32(hid_t plist) except *
-  herr_t H5Pset_shuffle(hid_t plist_id) except *
-  herr_t H5Pset_szip(hid_t plist, unsigned int options_mask, unsigned int pixels_per_block) except *
-  herr_t H5Pset_scaleoffset(hid_t plist, H5Z_SO_scale_type_t scale_type, int scale_factor) except *
-  herr_t H5Pset_edc_check(hid_t plist, H5Z_EDC_t check) except *
-  H5Z_EDC_t H5Pget_edc_check(hid_t plist) except *
-  herr_t H5Pset_chunk_cache( hid_t dapl_id, size_t rdcc_nslots, size_t rdcc_nbytes, double rdcc_w0 ) except *
-  herr_t H5Pget_chunk_cache( hid_t dapl_id, size_t *rdcc_nslots, size_t *rdcc_nbytes, double *rdcc_w0 ) except *
-  herr_t H5Pset_sieve_buf_size(hid_t fapl_id, size_t size) except *
-  herr_t H5Pget_sieve_buf_size(hid_t fapl_id, size_t *size) except *
-  herr_t H5Pset_nlinks(hid_t plist_id, size_t nlinks) except *
-  herr_t H5Pget_nlinks(hid_t plist_id, size_t *nlinks) except *
-  herr_t H5Pset_elink_prefix(hid_t plist_id, char *prefix) except *
-  ssize_t H5Pget_elink_prefix(hid_t plist_id, char *prefix, size_t size) except *
-  hid_t H5Pget_elink_fapl(hid_t lapl_id) except *
-  herr_t H5Pset_elink_fapl(hid_t lapl_id, hid_t fapl_id) except *
-  herr_t H5Pset_create_intermediate_group(hid_t plist_id, unsigned crt_intmd) except *
-  herr_t H5Pget_create_intermediate_group(hid_t plist_id, unsigned *crt_intmd) except *
-  herr_t H5Pset_copy_object(hid_t plist_id, unsigned crt_intmd) except *
-  herr_t H5Pget_copy_object(hid_t plist_id, unsigned *crt_intmd) except *
-  herr_t H5Pset_char_encoding(hid_t plist_id, H5T_cset_t encoding) except *
-  herr_t H5Pget_char_encoding(hid_t plist_id, H5T_cset_t *encoding) except *
-  herr_t H5Pset_obj_track_times( hid_t ocpl_id, hbool_t track_times ) except *
-  herr_t H5Pget_obj_track_times( hid_t ocpl_id, hbool_t *track_times ) except *
-  herr_t H5Pset_local_heap_size_hint(hid_t plist_id, size_t size_hint) except *
-  herr_t H5Pget_local_heap_size_hint(hid_t plist_id, size_t *size_hint) except *
-  herr_t H5Pset_link_phase_change(hid_t plist_id, unsigned max_compact, unsigned min_dense) except *
-  herr_t H5Pget_link_phase_change(hid_t plist_id, unsigned *max_compact , unsigned *min_dense) except *
-  herr_t H5Pset_est_link_info(hid_t plist_id, unsigned est_num_entries, unsigned est_name_len) except *
-  herr_t H5Pget_est_link_info(hid_t plist_id, unsigned *est_num_entries , unsigned *est_name_len) except *
-  herr_t H5Pset_link_creation_order(hid_t plist_id, unsigned crt_order_flags) except *
-  herr_t H5Pget_link_creation_order(hid_t plist_id, unsigned *crt_order_flags) except *
-  herr_t H5Pset_libver_bounds(hid_t fapl_id, H5F_libver_t libver_low, H5F_libver_t libver_high) except *
-  herr_t H5Pget_libver_bounds(hid_t fapl_id, H5F_libver_t *libver_low, H5F_libver_t *libver_high) except *
-  herr_t H5Pset_alignment(hid_t plist_id, hsize_t threshold, hsize_t alignment) except *
-  herr_t H5Pget_alignment(hid_t plist_id, hsize_t *threshold, hsize_t *alignment) except *
-  IF MPI:
-      herr_t H5Pset_fapl_mpio(hid_t fapl_id, MPI_Comm comm, MPI_Info info) except *
-  IF MPI:
-      herr_t H5Pget_fapl_mpio(hid_t fapl_id, MPI_Comm *comm, MPI_Info *info) except *
-  herr_t H5Rcreate(void *ref, hid_t loc_id, char *name, H5R_type_t ref_type,  hid_t space_id) except *
-  hid_t H5Rdereference(hid_t obj_id, H5R_type_t ref_type, void *ref) except *
-  hid_t H5Rget_region(hid_t dataset, H5R_type_t ref_type, void *ref) except *
-  H5G_obj_t H5Rget_obj_type(hid_t id, H5R_type_t ref_type, void *ref) except *
-  ssize_t H5Rget_name(hid_t loc_id, H5R_type_t ref_type, void *ref, char *name, size_t size) except *
-  hid_t H5Screate(H5S_class_t type) except *
-  hid_t H5Scopy(hid_t space_id ) except *
-  herr_t H5Sclose(hid_t space_id) except *
-  hid_t H5Screate_simple(int rank, hsize_t *dims, hsize_t *maxdims) except *
-  htri_t H5Sis_simple(hid_t space_id) except *
-  herr_t H5Soffset_simple(hid_t space_id, hssize_t *offset ) except *
-  int H5Sget_simple_extent_ndims(hid_t space_id) except *
-  int H5Sget_simple_extent_dims(hid_t space_id, hsize_t *dims, hsize_t *maxdims) except *
-  hssize_t H5Sget_simple_extent_npoints(hid_t space_id) except *
-  H5S_class_t H5Sget_simple_extent_type(hid_t space_id) except *
-  herr_t H5Sextent_copy(hid_t dest_space_id, hid_t source_space_id ) except *
-  herr_t H5Sset_extent_simple(hid_t space_id, int rank, hsize_t *current_size, hsize_t *maximum_size ) except *
-  herr_t H5Sset_extent_none(hid_t space_id) except *
-  H5S_sel_type H5Sget_select_type(hid_t space_id) except *
-  hssize_t H5Sget_select_npoints(hid_t space_id) except *
-  herr_t H5Sget_select_bounds(hid_t space_id, hsize_t *start, hsize_t *end) except *
-  herr_t H5Sselect_all(hid_t space_id) except *
-  herr_t H5Sselect_none(hid_t space_id) except *
-  htri_t H5Sselect_valid(hid_t space_id) except *
-  hssize_t H5Sget_select_elem_npoints(hid_t space_id) except *
-  herr_t H5Sget_select_elem_pointlist(hid_t space_id, hsize_t startpoint,  hsize_t numpoints, hsize_t *buf) except *
-  herr_t H5Sselect_elements(hid_t space_id, H5S_seloper_t op,  size_t num_elements, hsize_t **coord) except *
-  hssize_t H5Sget_select_hyper_nblocks(hid_t space_id ) except *
-  herr_t H5Sget_select_hyper_blocklist(hid_t space_id,  hsize_t startblock, hsize_t numblocks, hsize_t *buf ) except *
-  herr_t H5Sselect_hyperslab(hid_t space_id, H5S_seloper_t op,  hsize_t *start, hsize_t *_stride, hsize_t *count, hsize_t *_block) except *
-  herr_t H5Sencode(hid_t obj_id, void *buf, size_t *nalloc) except *
-  hid_t H5Sdecode(void *buf) except *
-  hid_t H5Tcreate(H5T_class_t type, size_t size) except *
-  hid_t H5Topen(hid_t loc, char* name) except *
-  herr_t H5Tcommit(hid_t loc_id, char* name, hid_t type) except *
-  htri_t H5Tcommitted(hid_t type) except *
-  hid_t H5Tcopy(hid_t type_id) except *
-  htri_t H5Tequal(hid_t type_id1, hid_t type_id2 ) except *
-  herr_t H5Tlock(hid_t type_id) except *
-  H5T_class_t H5Tget_class(hid_t type_id) except *
-  size_t H5Tget_size(hid_t type_id) except *
-  hid_t H5Tget_super(hid_t type) except *
-  htri_t H5Tdetect_class(hid_t type_id, H5T_class_t dtype_class) except *
-  herr_t H5Tclose(hid_t type_id) except *
-  hid_t H5Tget_native_type(hid_t type_id, H5T_direction_t direction) except *
-  herr_t H5Tcommit2(hid_t loc_id, char *name, hid_t dtype_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id) except *
-  hid_t H5Tdecode(unsigned char *buf) except *
-  herr_t H5Tencode(hid_t obj_id, unsigned char *buf, size_t *nalloc) except *
-  H5T_conv_t H5Tfind(hid_t src_id, hid_t dst_id, H5T_cdata_t **pcdata) except *
-  herr_t H5Tconvert(hid_t src_id, hid_t dst_id, size_t nelmts, void *buf, void *background, hid_t plist_id) except *
-  herr_t H5Tregister(H5T_pers_t pers, char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func) except *
-  herr_t H5Tunregister(H5T_pers_t pers, char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func) except *
-  herr_t H5Tset_size(hid_t type_id, size_t size) except *
-  H5T_order_t H5Tget_order(hid_t type_id) except *
-  herr_t H5Tset_order(hid_t type_id, H5T_order_t order) except *
-  hsize_t H5Tget_precision(hid_t type_id) except *
-  herr_t H5Tset_precision(hid_t type_id, size_t prec) except *
-  int H5Tget_offset(hid_t type_id) except *
-  herr_t H5Tset_offset(hid_t type_id, size_t offset) except *
-  herr_t H5Tget_pad(hid_t type_id, H5T_pad_t * lsb, H5T_pad_t * msb ) except *
-  herr_t H5Tset_pad(hid_t type_id, H5T_pad_t lsb, H5T_pad_t msb ) except *
-  H5T_sign_t H5Tget_sign(hid_t type_id) except *
-  herr_t H5Tset_sign(hid_t type_id, H5T_sign_t sign) except *
-  herr_t H5Tget_fields(hid_t type_id, size_t *spos, size_t *epos, size_t *esize, size_t *mpos, size_t *msize ) except *
-  herr_t H5Tset_fields(hid_t type_id, size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize ) except *
-  size_t H5Tget_ebias(hid_t type_id) except *
-  herr_t H5Tset_ebias(hid_t type_id, size_t ebias) except *
-  H5T_norm_t H5Tget_norm(hid_t type_id) except *
-  herr_t H5Tset_norm(hid_t type_id, H5T_norm_t norm) except *
-  H5T_pad_t H5Tget_inpad(hid_t type_id) except *
-  herr_t H5Tset_inpad(hid_t type_id, H5T_pad_t inpad) except *
-  H5T_cset_t H5Tget_cset(hid_t type_id) except *
-  herr_t H5Tset_cset(hid_t type_id, H5T_cset_t cset) except *
-  H5T_str_t H5Tget_strpad(hid_t type_id) except *
-  herr_t H5Tset_strpad(hid_t type_id, H5T_str_t strpad) except *
-  hid_t H5Tvlen_create(hid_t base_type_id) except *
-  htri_t H5Tis_variable_str(hid_t dtype_id) except *
-  int H5Tget_nmembers(hid_t type_id) except *
-  H5T_class_t H5Tget_member_class(hid_t type_id, int member_no) except *
-  char* H5Tget_member_name(hid_t type_id, unsigned membno) except *
-  hid_t H5Tget_member_type(hid_t type_id, unsigned membno) except *
-  int H5Tget_member_offset(hid_t type_id, int membno) except *
-  int H5Tget_member_index(hid_t type_id, char* name) except *
-  herr_t H5Tinsert(hid_t parent_id, char *name, size_t offset, hid_t member_id) except *
-  herr_t H5Tpack(hid_t type_id) except *
-  hid_t H5Tenum_create(hid_t base_id) except *
-  herr_t H5Tenum_insert(hid_t type, char *name, void *value) except *
-  herr_t H5Tenum_nameof( hid_t type, void *value, char *name, size_t size ) except *
-  herr_t H5Tenum_valueof( hid_t type, char *name, void *value ) except *
-  herr_t H5Tget_member_value(hid_t type,  unsigned int memb_no, void *value ) except *
-  hid_t H5Tarray_create(hid_t base_id, int ndims, hsize_t *dims, int *perm) except *
-  int H5Tget_array_ndims(hid_t type_id) except *
-  int H5Tget_array_dims(hid_t type_id, hsize_t *dims, int *perm) except *
-  herr_t H5Tset_tag(hid_t type_id, char* tag) except *
-  char* H5Tget_tag(hid_t type_id) except *
-  htri_t H5Zfilter_avail(H5Z_filter_t id_) except *
-  herr_t H5Zget_filter_info(H5Z_filter_t filter_, unsigned int *filter_config_flags) except *
-cdef extern from "hdf5_hl.h":
-  herr_t H5DSattach_scale(hid_t did, hid_t dsid, unsigned int idx) except *
-  herr_t H5DSdetach_scale(hid_t did, hid_t dsid, unsigned int idx) except *
-  htri_t H5DSis_attached(hid_t did, hid_t dsid, unsigned int idx) except *
-  herr_t H5DSset_scale(hid_t dsid, char *dimname) except *
-  int H5DSget_num_scales(hid_t did, unsigned int dim) except *
-  herr_t H5DSset_label(hid_t did, unsigned int idx, char *label) except *
-  ssize_t H5DSget_label(hid_t did, unsigned int idx, char *label, size_t size) except *
-  ssize_t H5DSget_scale_name(hid_t did, char *name, size_t size) except *
-  htri_t H5DSis_scale(hid_t did) except *
-  herr_t H5DSiterate_scales(hid_t did, unsigned int dim, int *idx, H5DS_iterate_t visitor, void *visitor_data) except *
diff --git a/h5py/_hl/__init__.py b/h5py/_hl/__init__.py
index a23327b..2a8ae91 100644
--- a/h5py/_hl/__init__.py
+++ b/h5py/_hl/__init__.py
@@ -6,3 +6,6 @@
 #
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
+
+from __future__ import absolute_import
+
diff --git a/h5py/_hl/attrs.py b/h5py/_hl/attrs.py
index 37685d8..a1c0e41 100644
--- a/h5py/_hl/attrs.py
+++ b/h5py/_hl/attrs.py
@@ -7,17 +7,19 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import numpy
 import collections
 
-import h5py
-from h5py import h5s, h5t, h5a
+from .. import h5s, h5t, h5a
 from . import base
 from .base import phil, with_phil
 from .dataset import readtime_dtype
+from .datatype import Datatype
 
 
-class AttributeManager(base.DictCompat, base.CommonStateObject):
+class AttributeManager(base.MutableMappingWithLock, base.CommonStateObject):
 
     """
         Allows dictionary-style access to an HDF5 object's attributes.
@@ -52,12 +54,23 @@ class AttributeManager(base.DictCompat, base.CommonStateObject):
         if attr.get_space().get_simple_extent_type() == h5s.NULL:
             raise IOError("Empty attributes cannot be read")
 
-        tid = attr.get_type()
-
-        rtdt = readtime_dtype(attr.dtype, [])
-
-        arr = numpy.ndarray(attr.shape, dtype=rtdt, order='C')
-        attr.read(arr)
+        dtype = readtime_dtype(attr.dtype, [])
+        shape = attr.shape
+        
+        # Do this first, as we'll be fiddling with the dtype for top-level
+        # array types
+        htype = h5t.py_create(dtype)
+
+        # NumPy doesn't support top-level array types, so we have to "fake"
+        # the correct type and shape for the array.  For example, consider
+        # attr.shape == (5,) and attr.dtype == '(3,)f'. Then:
+        if dtype.subdtype is not None:
+            subdtype, subshape = dtype.subdtype
+            shape = attr.shape + subshape   # (5, 3)
+            dtype = subdtype                # 'f'
+            
+        arr = numpy.ndarray(shape, dtype=dtype, order='C')
+        attr.read(arr, mtype=htype)
 
         if len(arr.shape) == 0:
             return arr[()]
@@ -93,45 +106,95 @@ class AttributeManager(base.DictCompat, base.CommonStateObject):
             are given.
         """
 
+        import uuid
+        
         with phil:
-            if data is not None:
-                data = numpy.asarray(data, order='C', dtype=dtype)
-                if shape is None:
-                    shape = data.shape
-                elif numpy.product(shape) != numpy.product(data.shape):
+                
+            # First, make sure we have a NumPy array.  We leave the data
+            # type conversion for HDF5 to perform.
+            data = numpy.asarray(data, order='C')
+    
+            if shape is None:
+                shape = data.shape
+                
+            use_htype = None    # If a committed type is given, we must use it
+                                # in the call to h5a.create.
+                                            
+            if isinstance(dtype, Datatype):
+                use_htype = dtype.id
+                dtype = dtype.dtype
+            elif dtype is None:
+                dtype = data.dtype
+            else:
+                dtype = numpy.dtype(dtype) # In case a string, e.g. 'i8' is passed
+ 
+            original_dtype = dtype  # We'll need this for top-level array types
+
+            # Where a top-level array type is requested, we have to do some
+            # fiddling around to present the data as a smaller array of
+            # subarrays. 
+            if dtype.subdtype is not None:
+            
+                subdtype, subshape = dtype.subdtype
+                
+                # Make sure the subshape matches the last N axes' sizes.
+                if shape[-len(subshape):] != subshape:
+                    raise ValueError("Array dtype shape %s is incompatible with data shape %s" % (subshape, shape))
+
+                # New "advertised" shape and dtype
+                shape = shape[0:len(shape)-len(subshape)]
+                dtype = subdtype
+                
+            # Not an array type; make sure to check the number of elements
+            # is compatible, and reshape if needed.
+            else:
+               
+                if numpy.product(shape) != numpy.product(data.shape):
                     raise ValueError("Shape of new attribute conflicts with shape of data")
 
-                if dtype is None:
-                    dtype = data.dtype
+                if shape != data.shape:
+                    data = data.reshape(shape)
 
-            if isinstance(dtype, h5py.Datatype):
-                htype = dtype.id
-                dtype = htype.dtype
+            # We need this to handle special string types.
+            data = numpy.asarray(data, dtype=dtype)
+    
+            # Make HDF5 datatype and dataspace for the H5A calls
+            if use_htype is None:
+                htype = h5t.py_create(original_dtype, logical=True)
+                htype2 = h5t.py_create(original_dtype)  # Must be bit-for-bit representation rather than logical
             else:
-                if dtype is None:
-                    dtype = numpy.dtype('f')
-                htype = h5t.py_create(dtype, logical=True)
-
-            if shape is None:
-                raise ValueError('At least one of "shape" or "data" must be given')
-
-            data = data.reshape(shape)
-
+                htype = use_htype
+                htype2 = None
+                
             space = h5s.create_simple(shape)
 
-            if name in self:
-                h5a.delete(self._id, self._e(name))
-
-            attr = h5a.create(self._id, self._e(name), htype, space)
+            # This mess exists because you can't overwrite attributes in HDF5.
+            # So we write to a temporary attribute first, and then rename.
+            
+            tempname = uuid.uuid4().hex
 
-            if data is not None:
+            try:
+                attr = h5a.create(self._id, self._e(tempname), htype, space)
+            except:
+                raise
+            else:
                 try:
-                    attr.write(data)
+                    attr.write(data, mtype=htype2)
                 except:
                     attr._close()
-                    h5a.delete(self._id, self._e(name))
+                    h5a.delete(self._id, self._e(tempname))
                     raise
-
+                else:
+                    try:
+                        # No atomic rename in HDF5 :(
+                        if h5a.exists(self._id, self._e(name)):
+                            h5a.delete(self._id, self._e(name))
+                        h5a.rename(self._id, self._e(tempname), self._e(name))
+                    except:
+                        attr._close()
+                        h5a.delete(self._id, self._e(tempname))
+                        raise
+                        
     def modify(self, name, value):
         """ Change the value of an attribute while preserving its type.
 
@@ -186,5 +249,3 @@ class AttributeManager(base.DictCompat, base.CommonStateObject):
         if not self._id:
             return "<Attributes of closed HDF5 object>"
         return "<Attributes of HDF5 object at %s>" % id(self._id)
-
-collections.MutableMapping.register(AttributeManager)
diff --git a/h5py/_hl/base.py b/h5py/_hl/base.py
index 710934a..71e4e56 100644
--- a/h5py/_hl/base.py
+++ b/h5py/_hl/base.py
@@ -7,20 +7,25 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import posixpath
 import warnings
 import os
 import sys
+from collections import (
+    Mapping, MutableMapping, MappingView, KeysView, ValuesView, ItemsView
+)
+
+import six
 
-from h5py import h5d, h5i, h5r, h5p, h5f, h5t
+from .. import h5d, h5i, h5r, h5p, h5f, h5t
 
 # The high-level interface is serialized; every public API function & method
 # is wrapped in a lock.  We re-use the low-level lock because (1) it's fast, 
 # and (2) it eliminates the possibility of deadlocks due to out-of-order
 # lock acquisition.
-from h5py._objects import phil, with_phil
-
-py3 = sys.version_info[0] == 3
+from .._objects import phil, with_phil
 
 
 def is_hdf5(fname):
@@ -49,8 +54,8 @@ def guess_dtype(data):
             return h5t.special_dtype(ref=h5r.Reference)
         if type(data) == bytes:
             return h5t.special_dtype(vlen=bytes)
-        if type(data) == unicode:
-            return h5t.special_dtype(vlen=unicode)
+        if type(data) == six.text_type:
+            return h5t.special_dtype(vlen=six.text_type)
 
         return None
 
@@ -201,7 +206,7 @@ class HLObject(CommonStateObject):
     @with_phil
     def file(self):
         """ Return a File instance associated with this object """
-        import files
+        from . import files
         return files.File(self.id)
 
     @property
@@ -252,7 +257,7 @@ class HLObject(CommonStateObject):
     @with_phil
     def attrs(self):
         """ Attributes attached to this object """
-        import attrs
+        from . import attrs
         return attrs.AttributeManager(self)
 
     @with_phil
@@ -274,65 +279,60 @@ class HLObject(CommonStateObject):
     def __ne__(self, other):
         return not self.__eq__(other)
 
-    def __nonzero__(self):
+    def __bool__(self):
         with phil:
             return bool(self.id)
+    __nonzero__ = __bool__
 
+class MappingViewWithLock(MappingView):
 
-class View(object):
-
-    def __init__(self, obj):
-        self._obj = obj
-
-    @with_phil
     def __len__(self):
-        return len(self._obj)
-
+        with phil:
+            return super(MappingViewWithLock, self).__len__()
 
-class KeyView(View):
 
-    @with_phil
-    def __contains__(self, what):
-        return what in self._obj
+class KeysViewWithLock(MappingViewWithLock, KeysView):
+    def __contains__(self, item):
+        with phil:
+            return super(KeysViewWithLock, self).__contains__(item)
 
-    @with_phil
     def __iter__(self):
-        for x in self._obj:
-            yield x
-
+        with phil:
+            return super(KeysViewWithLock, self).__iter__()
 
-class ValueView(View):
 
-    def __contains__(self, what):
-        raise TypeError("Containership testing doesn't work for values. :(")
+class ValuesViewWithLock(MappingViewWithLock, ValuesView):
+    def __contains__(self, value):
+        with phil:
+            for key in self._mapping:
+                if value == self._mapping.get(key):
+                    return True
+            return False
 
-    @with_phil
     def __iter__(self):
-        for x in self._obj:
-            yield self._obj.get(x)
-
+        with phil:
+            for key in self._mapping:
+                yield self._mapping.get(key)
 
-class ItemView(View):
 
-    @with_phil
-    def __contains__(self, what):
-        if what[0] in self._obj:
-            return what[1] == self._obj.get(what[0])
-        return False
+class ItemsViewWithLock(MappingViewWithLock, ItemsView):
+    def __contains__(self, item):
+        with phil:
+            key, val = item
+            if key in self._mapping:
+                return val == self._mapping.get(key)
+            return False
 
-    @with_phil
     def __iter__(self):
-        for x in self._obj:
-            yield (x, self._obj.get(x))
-
+        with phil:
+            for key in self._mapping:
+                yield (key, self._mapping.get(key))
 
-class DictCompat(object):
 
+class MappingWithLock(Mapping):
     """
-        Contains dictionary-style compatibility methods for groups and
-        attributes.
+    Subclass of collections.Mapping with locks.
     """
-
     def get(self, name, default=None):
         """ Retrieve the member, or return default if it doesn't exist """
         with phil:
@@ -341,18 +341,18 @@ class DictCompat(object):
             except KeyError:
                 return default
 
-    if py3:
+    if six.PY3:
         def keys(self):
             """ Get a view object on member names """
-            return KeyView(self)
+            return KeysViewWithLock(self)
 
         def values(self):
             """ Get a view object on member objects """
-            return ValueView(self)
+            return ValuesViewWithLock(self)
 
         def items(self):
             """ Get a view object on member items """
-            return ItemView(self)
+            return ItemsViewWithLock(self)
 
     else:
         def keys(self):
@@ -360,10 +360,6 @@ class DictCompat(object):
             with phil:
                 return list(self)
 
-        def iterkeys(self):
-            """ Get an iterator over member names """
-            return iter(self)
-
         def values(self):
             """ Get a list containing member objects """
             with phil:
@@ -383,3 +379,6 @@ class DictCompat(object):
             """ Get an iterator over (name, object) pairs """
             for x in self:
                 yield (x, self.get(x))
+
+class MutableMappingWithLock(MappingWithLock,MutableMapping):
+    pass
diff --git a/h5py/_hl/dataset.py b/h5py/_hl/dataset.py
index 3f71da4..89b02f0 100644
--- a/h5py/_hl/dataset.py
+++ b/h5py/_hl/dataset.py
@@ -7,16 +7,22 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import posixpath as pp
 import sys
+
+import six
+from six.moves import xrange
+
 import numpy
 
-import h5py
-from h5py import h5s, h5t, h5r, h5d
-from .base import HLObject, py3, phil, with_phil
+from .. import h5s, h5t, h5r, h5d
+from .base import HLObject, phil, with_phil
 from . import filters
 from . import selections as sel
 from . import selections2 as sel2
+from .datatype import Datatype
 
 _LEGACY_GZIP_COMPRESSION_VALS = frozenset(range(10))
 
@@ -46,7 +52,7 @@ def make_new_dset(parent, shape=None, dtype=None, data=None,
 
     # Convert data to a C-contiguous ndarray
     if data is not None:
-        import base
+        from . import base
         data = numpy.asarray(data, order="C", dtype=base.guess_dtype(data))
 
     # Validate shape
@@ -66,7 +72,7 @@ def make_new_dset(parent, shape=None, dtype=None, data=None,
                  "{} is not compatible with {}".format(chunks, shape)
         raise ValueError(errmsg)
 
-    if isinstance(dtype, h5py.Datatype):
+    if isinstance(dtype, Datatype):
         # Named types are used as-is
         tid = dtype.id
         dtype = tid.dtype  # Following code needs this
@@ -349,36 +355,14 @@ class Dataset(HLObject):
         args = args if isinstance(args, tuple) else (args,)
 
         # Sort field indices from the rest of the args.
-        names = tuple(x for x in args if isinstance(x, basestring))
-        args = tuple(x for x in args if not isinstance(x, basestring))
-        if not py3:
-            names = tuple(x.encode('utf-8') if isinstance(x, unicode) else x for x in names)
-
-        def strip_fields(basetype):
-            """ Strip extra dtype information from special types """
-            if basetype.kind == 'O':
-                return numpy.dtype('O')
-            if basetype.fields is not None:
-                if basetype.kind in ('i','u'):
-                    return basetype.fields['enum'][0]
-                fields = []
-                for name in basetype.names:
-                    fff = basetype.fields[name]
-                    if len(fff) == 3:
-                        (subtype, offset, meta) = fff
-                    else:
-                        subtype, meta = fff
-                        offset = 0
-                    subtype = strip_fields(subtype)
-                    fields.append((name, subtype))
-                return numpy.dtype(fields)
-            return basetype
+        names = tuple(x for x in args if isinstance(x, six.string_types))
+        args = tuple(x for x in args if not isinstance(x, six.string_types))
+        if not six.PY3:
+            names = tuple(x.encode('utf-8') if isinstance(x, six.text_type) else x for x in names)
 
         def readtime_dtype(basetype, names):
             """ Make a NumPy dtype appropriate for reading """
 
-            basetype = strip_fields(basetype)
-
             if len(names) == 0:  # Not compound, or we want all fields
                 return basetype
 
@@ -487,15 +471,15 @@ class Dataset(HLObject):
         args = args if isinstance(args, tuple) else (args,)
 
         # Sort field indices from the slicing
-        names = tuple(x for x in args if isinstance(x, basestring))
-        args = tuple(x for x in args if not isinstance(x, basestring))
-        if not py3:
-            names = tuple(x.encode('utf-8') if isinstance(x, unicode) else x for x in names)
+        names = tuple(x for x in args if isinstance(x, six.string_types))
+        args = tuple(x for x in args if not isinstance(x, six.string_types))
+        if not six.PY3:
+            names = tuple(x.encode('utf-8') if isinstance(x, six.text_type) else x for x in names)
 
         # Generally we try to avoid converting the arrays on the Python
         # side.  However, for compound literals this is unavoidable.
         vlen = h5t.check_dtype(vlen=self.dtype)
-        if vlen not in (bytes, unicode, None):
+        if vlen is not None and vlen not in (bytes, six.text_type):
             try:
                 val = numpy.asarray(val, dtype=vlen)
             except ValueError:
@@ -665,15 +649,39 @@ class Dataset(HLObject):
     @with_phil
     def __repr__(self):
         if not self:
-            r = u'<Closed HDF5 dataset>'
+            r = six.u('<Closed HDF5 dataset>')
         else:
             if self.name is None:
-                namestr = u'("anonymous")'
+                namestr = six.u('("anonymous")')
             else:
                 name = pp.basename(pp.normpath(self.name))
-                namestr = u'"%s"' % (name if name != u'' else u'/')
-            r = u'<HDF5 dataset %s: shape %s, type "%s">' % \
+                namestr = six.u('"%s"') % (
+                    name if name != six.u('') else six.u('/'))
+            r = six.u('<HDF5 dataset %s: shape %s, type "%s">') % \
                 (namestr, self.shape, self.dtype.str)
-        if py3:
+        if six.PY3:
             return r
         return r.encode('utf8')
+        
+    if hasattr(h5d.DatasetID, "refresh"):
+        @with_phil
+        def refresh(self):
+            """ Refresh the dataset metadata by reloading from the file.
+            
+            This is part of the SWMR features and only exist when the HDF5
+            librarary version >=1.9.178
+            """
+            self._id.refresh()
+                
+    if hasattr(h5d.DatasetID, "flush"):
+        @with_phil
+        def flush(self):
+            """ Flush the dataset data and metadata to the file.
+            If the dataset is chunked, raw data chunks are written to the file.
+            
+            This is part of the SWMR features and only exist when the HDF5 
+            librarary version >=1.9.178
+            """
+            self._id.flush()
+            
+
diff --git a/h5py/_hl/datatype.py b/h5py/_hl/datatype.py
index 3e1dd01..aab79ae 100644
--- a/h5py/_hl/datatype.py
+++ b/h5py/_hl/datatype.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import posixpath as pp
 
 from ..h5t import TypeID
diff --git a/h5py/_hl/dims.py b/h5py/_hl/dims.py
index af7db6e..d11abc1 100644
--- a/h5py/_hl/dims.py
+++ b/h5py/_hl/dims.py
@@ -7,9 +7,11 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import numpy
 
-from h5py import h5ds
+from .. import h5ds
 from . import base
 from .base import phil, with_phil
 from .dataset import Dataset, readtime_dtype
@@ -112,7 +114,7 @@ class DimensionProxy(base.CommonStateObject):
                % (self.label, self._dimension, id(self._id)))
 
 
-class DimensionManager(base.DictCompat, base.CommonStateObject):
+class DimensionManager(base.MappingWithLock, base.CommonStateObject):
 
     """
     """
diff --git a/h5py/_hl/files.py b/h5py/_hl/files.py
index 7985194..515d0f8 100644
--- a/h5py/_hl/files.py
+++ b/h5py/_hl/files.py
@@ -7,23 +7,31 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import weakref
 import sys
 import os
 
-from .base import HLObject, py3, phil, with_phil
+import six
+
+from .base import HLObject, phil, with_phil
 from .group import Group
-from h5py import h5, h5f, h5p, h5i, h5fd, h5t, _objects
-from h5py import version
+from .. import h5, h5f, h5p, h5i, h5fd, h5t, _objects
+from .. import version
 
 mpi = h5.get_config().mpi
 hdf5_version = version.hdf5_version_tuple[0:3]
 
+swmr_support = False
+if hdf5_version >= h5.get_config().swmr_min_hdf5_version:
+    swmr_support = True
+
 if mpi:
     import mpi4py
 
 libver_dict = {'earliest': h5f.LIBVER_EARLIEST, 'latest': h5f.LIBVER_LATEST}
-libver_dict_r = dict((y, x) for x, y in libver_dict.iteritems())
+libver_dict_r = dict((y, x) for x, y in six.iteritems(libver_dict))
 
 
 def make_fapl(driver, libver, **kwds):
@@ -58,7 +66,7 @@ def make_fapl(driver, libver, **kwds):
     return plist
 
 
-def make_fid(name, mode, userblock_size, fapl, fcpl=None):
+def make_fid(name, mode, userblock_size, fapl, fcpl=None, swmr=False):
     """ Get a new FileID by opening or creating a file.
     Also validates mode argument."""
 
@@ -75,10 +83,13 @@ def make_fid(name, mode, userblock_size, fapl, fcpl=None):
         fcpl.set_userblock(userblock_size)
 
     if mode == 'r':
-        fid = h5f.open(name, h5f.ACC_RDONLY, fapl=fapl)
+        flags = h5f.ACC_RDONLY
+        if swmr and swmr_support:
+            flags |= h5f.ACC_SWMR_READ
+        fid = h5f.open(name, flags, fapl=fapl)
     elif mode == 'r+':
         fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl)
-    elif mode == 'w-':
+    elif mode in ['w-', 'x']:
         fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl)
     elif mode == 'w':
         fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl)
@@ -102,7 +113,7 @@ def make_fid(name, mode, userblock_size, fapl, fcpl=None):
             except IOError:
                 fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl)
     else:
-        raise ValueError("Invalid mode; must be one of r, r+, w, w-, a")
+        raise ValueError("Invalid mode; must be one of r, r+, w, w-, x, a")
 
     try:
         if userblock_size is not None:
@@ -128,7 +139,7 @@ class File(Group):
         """ Attributes attached to this object """
         # hdf5 complains that a file identifier is an invalid location for an
         # attribute. Instead of self, pass the root group to AttributeManager:
-        import attrs
+        from . import attrs
         return attrs.AttributeManager(self['/'])
 
     @property
@@ -192,10 +203,23 @@ class File(Group):
         @with_phil
         def atomic(self, value):
             self.id.set_mpi_atomicity(value)
-
+            
+    if swmr_support:
+        @property
+        def swmr_mode(self):
+            return self._swmr_mode
+            
+        @swmr_mode.setter
+        @with_phil
+        def swmr_mode(self, value):
+            if value:
+                self.id.start_swmr_write()
+                self._swmr_mode = True
+            else:
+                raise ValueError("It is not possible to forcibly swith SWMR mode off.")
 
     def __init__(self, name, mode=None, driver=None, 
-                 libver=None, userblock_size=None, **kwds):
+                 libver=None, userblock_size=None, swmr=False, **kwds):
         """Create a new file object.
 
         See the h5py user guide for a detailed explanation of the options.
@@ -211,10 +235,15 @@ class File(Group):
             and 'latest' are defined.
         userblock
             Desired size of user block.  Only allowed when creating a new
-            file (mode w or w-).
+            file (mode w, w- or x).
+        swmr
+            Open the file in SWMR read mode. Only used when mode = 'r'.
         Additional keywords
             Passed on to the selected file driver.
         """
+        if swmr and not swmr_support:
+            raise ValueError("The SWMR feature is not available in this version of the HDF5 library")
+        
         with phil:
             if isinstance(name, _objects.ObjectID):
                 fid = h5i.get_file_id(name)
@@ -228,8 +257,13 @@ class File(Group):
                     pass
 
                 fapl = make_fapl(driver, libver, **kwds)
-                fid = make_fid(name, mode, userblock_size, fapl)
-
+                fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
+            
+                if swmr_support:
+                    self._swmr_mode = False
+                    if swmr and mode == 'r':
+                        self._swmr_mode = True                    
+                    
             Group.__init__(self, fid)
 
     def close(self):
@@ -274,16 +308,16 @@ class File(Group):
     @with_phil
     def __repr__(self):
         if not self.id:
-            r = u'<Closed HDF5 file>'
+            r = six.u('<Closed HDF5 file>')
         else:
             # Filename has to be forced to Unicode if it comes back bytes
             # Mode is always a "native" string
             filename = self.filename
             if isinstance(filename, bytes):  # Can't decode fname
                 filename = filename.decode('utf8', 'replace')
-            r = u'<HDF5 file "%s" (mode %s)>' % (os.path.basename(filename),
+            r = six.u('<HDF5 file "%s" (mode %s)>') % (os.path.basename(filename),
                                                  self.mode)
 
-        if py3:
+        if six.PY3:
             return r
         return r.encode('utf8')
diff --git a/h5py/_hl/filters.py b/h5py/_hl/filters.py
index 32e2719..1ef8370 100644
--- a/h5py/_hl/filters.py
+++ b/h5py/_hl/filters.py
@@ -38,10 +38,16 @@
         Tuple of available filter names for encoding 
 """
 
+from __future__ import absolute_import, division
+
+import six
+
 import numpy as np
 
-from h5py import h5s, h5z, h5p, h5d
+from .. import h5s, h5z, h5p, h5d
 
+if six.PY3:
+    long = int
 
 _COMP_FILTERS = {'gzip': h5z.FILTER_DEFLATE,
                 'szip': h5z.FILTER_SZIP,
@@ -56,7 +62,7 @@ DEFAULT_SZIP = ('nn', 8)
 def _gen_filter_tuples():
     decode = []
     encode = []
-    for name, code in _COMP_FILTERS.iteritems():
+    for name, code in six.iteritems(_COMP_FILTERS):
         if h5z.filter_avail(code):
             info = h5z.get_filter_info(code)
             if info & h5z.FILTER_CONFIG_ENCODE_ENABLED:
diff --git a/h5py/_hl/group.py b/h5py/_hl/group.py
index 6033c5f..4d469e2 100644
--- a/h5py/_hl/group.py
+++ b/h5py/_hl/group.py
@@ -7,19 +7,23 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import posixpath as pp
 
+import six
+
 import numpy
 import collections
 
-from h5py import h5g, h5i, h5o, h5r, h5t, h5l, h5p
+from .. import h5g, h5i, h5o, h5r, h5t, h5l, h5p
 from . import base
-from .base import HLObject, DictCompat, py3, phil, with_phil
+from .base import HLObject, MutableMappingWithLock, phil, with_phil
 from . import dataset
 from . import datatype
 
 
-class Group(HLObject, DictCompat):
+class Group(HLObject, MutableMappingWithLock):
 
     """ Represents an HDF5 group.
     """
@@ -451,17 +455,17 @@ class Group(HLObject, DictCompat):
     @with_phil
     def __repr__(self):
         if not self:
-            r = u"<Closed HDF5 group>"
+            r = six.u("<Closed HDF5 group>")
         else:
-            namestr = (u'"%s"' % self.name) if self.name is not None else u"(anonymous)"
-            r = u'<HDF5 group %s (%d members)>' % (namestr, len(self))
+            namestr = (
+                six.u('"%s"') % self.name
+            ) if self.name is not None else six.u("(anonymous)")
+            r = six.u('<HDF5 group %s (%d members)>') % (namestr, len(self))
 
-        if py3:
+        if six.PY3:
             return r
         return r.encode('utf8')
 
-collections.MutableMapping.register(Group)
-
 
 class HardLink(object):
 
diff --git a/h5py/_hl/selections.py b/h5py/_hl/selections.py
index af41907..8f93524 100644
--- a/h5py/_hl/selections.py
+++ b/h5py/_hl/selections.py
@@ -11,17 +11,25 @@
     High-level access to HDF5 dataspace selections
 """
 
+from __future__ import absolute_import
+
+import six
+from six.moves import xrange
+
 import numpy as np
 
-from h5py import h5s, h5r
+from .. import h5s, h5r
 
 # Selection types for hyperslabs
-from h5py.h5s import SELECT_SET  as SET
-from h5py.h5s import SELECT_OR   as OR
-from h5py.h5s import SELECT_AND  as AND
-from h5py.h5s import SELECT_XOR  as XOR
-from h5py.h5s import SELECT_NOTB as NOTB
-from h5py.h5s import SELECT_NOTA as NOTA
+from ..h5s import SELECT_SET  as SET
+from ..h5s import SELECT_OR   as OR
+from ..h5s import SELECT_AND  as AND
+from ..h5s import SELECT_XOR  as XOR
+from ..h5s import SELECT_NOTB as NOTB
+from ..h5s import SELECT_NOTA as NOTA
+
+if six.PY3:
+    long = int
 
 def select(shape, args, dsid):
     """ High-level routine to generate a selection from arbitrary arguments
@@ -297,7 +305,7 @@ class SimpleSelection(Selection):
         tshape.reverse()
         tshape = tuple(tshape)
 
-        chunks = tuple(x/y for x, y in zip(count, tshape))
+        chunks = tuple(x//y for x, y in zip(count, tshape))
         nchunks = long(np.product(chunks))
 
         if nchunks == 1:
@@ -427,7 +435,7 @@ class FancySelection(Selection):
             # TODO: fallback to standard selection
             raise TypeError("Advanced selection inappropriate")
 
-        vectorlength = len(sequenceargs.values()[0])
+        vectorlength = len(list(sequenceargs.values())[0])
         if not all(len(x) == vectorlength for x in sequenceargs.values()):
             raise TypeError("All sequence arguments must have the same length %s" % sequenceargs)
 
@@ -437,7 +445,7 @@ class FancySelection(Selection):
         argvector = []
         for idx in xrange(vectorlength):
             entry = list(args)
-            for position, seq in sequenceargs.iteritems():
+            for position, seq in six.iteritems(sequenceargs):
                 entry[position] = seq[idx]
             argvector.append(entry)
 
diff --git a/h5py/_hl/selections2.py b/h5py/_hl/selections2.py
index c6ce8ac..d0e1b54 100644
--- a/h5py/_hl/selections2.py
+++ b/h5py/_hl/selections2.py
@@ -7,8 +7,10 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import numpy as np
-from h5py import h5s
+from .. import h5s
 
 def read_dtypes(dataset_dtype, names):
     """ Returns a 2-tuple containing:
diff --git a/h5py/_proxy.pyx b/h5py/_proxy.pyx
index 63a9670..21b2012 100644
--- a/h5py/_proxy.pyx
+++ b/h5py/_proxy.pyx
@@ -256,7 +256,7 @@ ctypedef struct h5py_scatter_t:
     void* buf
 
 cdef herr_t h5py_scatter_cb(void* elem, hid_t type_id, unsigned ndim,
-                hsize_t *point, void *operator_data) except -1:
+                const hsize_t *point, void *operator_data) except -1:
 
     cdef h5py_scatter_t* info = <h5py_scatter_t*>operator_data
    
@@ -268,7 +268,7 @@ cdef herr_t h5py_scatter_cb(void* elem, hid_t type_id, unsigned ndim,
     return 0
 
 cdef herr_t h5py_gather_cb(void* elem, hid_t type_id, unsigned ndim,
-                hsize_t *point, void *operator_data) except -1:
+                const hsize_t *point, void *operator_data) except -1:
 
     cdef h5py_scatter_t* info = <h5py_scatter_t*>operator_data
    
diff --git a/h5py/api_functions.txt b/h5py/api_functions.txt
index ea5e707..96579e4 100644
--- a/h5py/api_functions.txt
+++ b/h5py/api_functions.txt
@@ -106,8 +106,12 @@ hdf5:
 
   herr_t    H5Diterate(void *buf, hid_t type_id, hid_t space_id,  H5D_operator_t op, void* operator_data)
   herr_t    H5Dset_extent(hid_t dset_id, hsize_t* size)
-
-
+  
+  # SWMR functions
+  1.9.178   herr_t H5Dflush(hid_t dataset_id)
+  1.9.178   herr_t H5Drefresh(hid_t dataset_id)
+  
+  
   # === H5F - File API ========================================================
 
   hid_t     H5Fcreate(char *filename, unsigned int flags, hid_t create_plist, hid_t access_plist)
@@ -138,6 +142,9 @@ hdf5:
   # MPI functions
   MPI 1.8.9 herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag)
   MPI 1.8.9 herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag)
+  
+  # SWMR functions
+  1.9.178   herr_t H5Fstart_swmr_write(hid_t file_id)
 
 
   # === H5G - Groups API ======================================================
@@ -197,7 +204,7 @@ hdf5:
   herr_t    H5Literate_by_name(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t *idx, H5L_iterate_t op, void *op_data, hid_t lapl_id)
   herr_t    H5Lvisit(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data)
   herr_t    H5Lvisit_by_name(hid_t loc_id, char *group_name, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data, hid_t lapl_id)
-  herr_t    H5Lunpack_elink_val(void *ext_linkval, size_t link_size, unsigned *flags, char **filename, char **obj_path)
+  herr_t    H5Lunpack_elink_val(void *ext_linkval, size_t link_size, unsigned *flags, const char **filename, const char **obj_path)
   herr_t    H5Lcreate_external(char *file_name, char *obj_name, hid_t link_loc_id, char *link_name, hid_t lcpl_id, hid_t lapl_id)
 
 
@@ -262,9 +269,9 @@ hdf5:
   herr_t    H5Pset_family_offset ( hid_t fapl_id, hsize_t offset)
   herr_t    H5Pget_family_offset ( hid_t fapl_id, hsize_t *offset)
   herr_t    H5Pset_fapl_log(hid_t fapl_id, char *logfile, unsigned int flags, size_t buf_size)
-  herr_t    H5Pset_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map, hid_t *memb_fapl, char **memb_name, haddr_t *memb_addr, hbool_t relax)
+  herr_t    H5Pset_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map, hid_t *memb_fapl, const char * const *memb_name, haddr_t *memb_addr, hbool_t relax)
   herr_t    H5Pset_cache(hid_t plist_id, int mdc_nelmts, int rdcc_nelmts,  size_t rdcc_nbytes, double rdcc_w0)
-  herr_t    H5Pget_cache(hid_t plist_id, int *mdc_nelmts, int *rdcc_nelmts, size_t *rdcc_nbytes, double *rdcc_w0)
+  herr_t    H5Pget_cache(hid_t plist_id, int *mdc_nelmts, size_t *rdcc_nelmts, size_t *rdcc_nbytes, double *rdcc_w0)
   herr_t    H5Pset_fapl_sec2(hid_t fapl_id)
   herr_t    H5Pset_fapl_stdio(hid_t fapl_id)
   hid_t     H5Pget_driver(hid_t fapl_id)
@@ -386,7 +393,7 @@ hdf5:
 
   hssize_t  H5Sget_select_elem_npoints(hid_t space_id)
   herr_t    H5Sget_select_elem_pointlist(hid_t space_id, hsize_t startpoint,  hsize_t numpoints, hsize_t *buf)
-  herr_t    H5Sselect_elements(hid_t space_id, H5S_seloper_t op,  size_t num_elements, hsize_t **coord)
+  herr_t    H5Sselect_elements(hid_t space_id, H5S_seloper_t op,  size_t num_elements, const hsize_t *coord)
 
   hssize_t  H5Sget_select_hyper_nblocks(hid_t space_id )
   herr_t    H5Sget_select_hyper_blocklist(hid_t space_id,  hsize_t startblock, hsize_t numblocks, hsize_t *buf )
diff --git a/h5py/api_types_hdf5.pxd b/h5py/api_types_hdf5.pxd
index 7a0b13a..250d91e 100644
--- a/h5py/api_types_hdf5.pxd
+++ b/h5py/api_types_hdf5.pxd
@@ -86,6 +86,8 @@ cdef extern from "hdf5.h":
     H5F_ACC_EXCL
     H5F_ACC_DEBUG
     H5F_ACC_CREAT
+    H5F_ACC_SWMR_WRITE
+    H5F_ACC_SWMR_READ
 
   # The difference between a single file and a set of mounted files
   cdef enum H5F_scope_t:
diff --git a/h5py/h5.pyx b/h5py/h5.pyx
index 295bdf4..48131a7 100644
--- a/h5py/h5.pyx
+++ b/h5py/h5.pyx
@@ -126,6 +126,11 @@ cdef class H5PYConfig:
                 return True
             ELSE:
                 return False
+                
+    property swmr_min_hdf5_version:
+        """ Tuple indicating the minimum HDF5 version required for SWMR features"""
+        def __get__(self):
+            return SWMR_MIN_HDF5_VERSION
 
 cdef H5PYConfig cfg = H5PYConfig()
 
diff --git a/h5py/h5a.pyx b/h5py/h5a.pyx
index b891a4c..94bc6ea 100644
--- a/h5py/h5a.pyx
+++ b/h5py/h5a.pyx
@@ -227,7 +227,7 @@ cdef class _AttrVisitor:
         self.func = func
         self.retval = None
 
-cdef herr_t cb_attr_iter(hid_t loc_id, char* attr_name, H5A_info_t *ainfo, void* vis_in) except 2:
+cdef herr_t cb_attr_iter(hid_t loc_id, const char* attr_name, const H5A_info_t *ainfo, void* vis_in) except 2:
     cdef _AttrVisitor vis = <_AttrVisitor>vis_in
     cdef AttrInfo info = AttrInfo()
     info.info = ainfo[0]
@@ -236,7 +236,7 @@ cdef herr_t cb_attr_iter(hid_t loc_id, char* attr_name, H5A_info_t *ainfo, void*
         return 1
     return 0
 
-cdef herr_t cb_attr_simple(hid_t loc_id, char* attr_name, H5A_info_t *ainfo, void* vis_in) except 2:
+cdef herr_t cb_attr_simple(hid_t loc_id, const char* attr_name, const H5A_info_t *ainfo, void* vis_in) except 2:
     cdef _AttrVisitor vis = <_AttrVisitor>vis_in
     vis.retval = vis.func(attr_name)
     if vis.retval is not None:
@@ -330,8 +330,8 @@ cdef class AttrID(ObjectID):
 
     
     @with_phil
-    def read(self, ndarray arr not None):
-        """(NDARRAY arr)
+    def read(self, ndarray arr not None, TypeID mtype=None):
+        """(NDARRAY arr, TypeID mtype=None)
 
         Read the attribute data into the given Numpy array.  Note that the
         Numpy array must have the same shape as the HDF5 attribute, and a
@@ -339,8 +339,9 @@ cdef class AttrID(ObjectID):
 
         The Numpy array must be writable and C-contiguous.  If this is not
         the case, the read will fail with an exception.
+        
+        If provided, the HDF5 TypeID mtype will override the array's dtype.
         """
-        cdef TypeID mtype
         cdef hid_t space_id
         space_id = 0
 
@@ -348,7 +349,8 @@ cdef class AttrID(ObjectID):
             space_id = H5Aget_space(self.id)
             check_numpy_write(arr, space_id)
 
-            mtype = py_create(arr.dtype)
+            if mtype is None:
+                mtype = py_create(arr.dtype)
 
             attr_rw(self.id, mtype.id, PyArray_DATA(arr), 1)
 
@@ -358,7 +360,7 @@ cdef class AttrID(ObjectID):
 
 
     @with_phil
-    def write(self, ndarray arr not None):
+    def write(self, ndarray arr not None, TypeID mtype=None):
         """(NDARRAY arr)
 
         Write the contents of a Numpy array too the attribute.  Note that
@@ -368,15 +370,16 @@ cdef class AttrID(ObjectID):
         The Numpy array must be C-contiguous.  If this is not the case,
         the write will fail with an exception.
         """
-        cdef TypeID mtype
         cdef hid_t space_id
         space_id = 0
 
         try:
             space_id = H5Aget_space(self.id)
             check_numpy_read(arr, space_id)
-            mtype = py_create(arr.dtype)
-
+            
+            if mtype is None:
+                mtype = py_create(arr.dtype)
+                
             attr_rw(self.id, mtype.id, PyArray_DATA(arr), 0)
 
         finally:
diff --git a/h5py/h5d.pyx b/h5py/h5d.pyx
index b15013b..032fb0f 100644
--- a/h5py/h5d.pyx
+++ b/h5py/h5d.pyx
@@ -10,6 +10,8 @@
     Provides access to the low-level HDF5 "H5D" dataset interface.
 """
 
+include "config.pxi"
+
 # Compile-time imports
 from _objects cimport pdefault
 from numpy cimport ndarray, import_array, PyArray_DATA, NPY_WRITEABLE
@@ -351,3 +353,41 @@ cdef class DatasetID(ObjectID):
             may even be zero.
         """
         return H5Dget_storage_size(self.id)
+        
+    IF HDF5_VERSION >= SWMR_MIN_HDF5_VERSION:
+
+        @with_phil
+        def flush(self):
+            """ no return
+            
+            Flushes all buffers associated with a dataset to disk.
+            
+            This function causes all buffers associated with a dataset to be 
+            immediately flushed to disk without removing the data from the cache.
+            
+            Use this in SWMR write mode to allow readers to be updated with the
+            dataset changes.
+            
+            Feature requires: 1.9.178 HDF5
+            """ 
+            H5Dflush(self.id)
+
+        @with_phil
+        def refresh(self):
+            """ no return
+            
+            Refreshes all buffers associated with a dataset. 
+            
+            This function causes all buffers associated with a dataset to be
+            cleared and immediately re-loaded with updated contents from disk.
+            
+            This function essentially closes the dataset, evicts all metadata
+            associated with it from the cache, and then re-opens the dataset.
+            The reopened dataset is automatically re-registered with the same ID. 
+            
+            Use this in SWMR read mode to poll for dataset changes.
+            
+            Feature requires: 1.9.178 HDF5
+            """ 
+            H5Drefresh(self.id)
+
diff --git a/h5py/h5f.pyx b/h5py/h5f.pyx
index 0e3dad6..f01d64f 100644
--- a/h5py/h5f.pyx
+++ b/h5py/h5f.pyx
@@ -33,6 +33,10 @@ ACC_TRUNC   = H5F_ACC_TRUNC
 ACC_EXCL    = H5F_ACC_EXCL
 ACC_RDWR    = H5F_ACC_RDWR
 ACC_RDONLY  = H5F_ACC_RDONLY
+IF HDF5_VERSION >= SWMR_MIN_HDF5_VERSION:
+    ACC_SWMR_WRITE = H5F_ACC_SWMR_WRITE
+    ACC_SWMR_READ  = H5F_ACC_SWMR_READ
+
 
 SCOPE_LOCAL     = H5F_SCOPE_LOCAL
 SCOPE_GLOBAL    = H5F_SCOPE_GLOBAL
@@ -451,3 +455,36 @@ cdef class FileID(GroupID):
         # I feel this should have some sanity checking to make sure that
         H5Fset_mdc_config(self.id, &config.cache_config)
 
+    IF HDF5_VERSION >= SWMR_MIN_HDF5_VERSION:
+
+        @with_phil
+        def start_swmr_write(self):
+            """ no return
+
+            Enables SWMR writing mode for a file.
+            
+            This function will activate SWMR writing mode for a file associated 
+            with file_id. This routine will prepare and ensure the file is safe
+            for SWMR writing as follows:
+            
+                * Check that the file is opened with write access (H5F_ACC_RDWR).
+                * Check that the file is opened with the latest library format
+                  to ensure data structures with check-summed metadata are used.
+                * Check that the file is not already marked in SWMR writing mode.
+                * Enable reading retries for check-summed metadata to remedy
+                  possible checksum failures from reading inconsistent metadata 
+                  on a system that is not atomic.
+                * Turn off usage of the library’s accumulator to avoid possible 
+                  ordering problem on a system that is not atomic.
+                * Perform a flush of the file’s data buffers and metadata to set
+                  a consistent state for starting SWMR write operations.
+
+            Library objects are groups, datasets, and committed datatypes. For 
+            the current implementation, groups and datasets can remain open when
+            activating SWMR writing mode, but not committed datatypes. Attributes
+            attached to objects cannot remain open. 
+
+            Feature requires: 1.9.178 HDF5
+            """
+            H5Fstart_swmr_write(self.id)
+            
diff --git a/h5py/h5l.pyx b/h5py/h5l.pyx
index d8ab7a8..8af2e5a 100644
--- a/h5py/h5l.pyx
+++ b/h5py/h5l.pyx
@@ -67,7 +67,7 @@ cdef class _LinkVisitor:
         self.retval = None
         self.info = LinkInfo()
 
-cdef herr_t cb_link_iterate(hid_t grp, char* name, H5L_info_t *istruct, void* data) except 2:
+cdef herr_t cb_link_iterate(hid_t grp, const char* name, const H5L_info_t *istruct, void* data) except 2:
     # Standard iteration callback for iterate/visit routines
 
     cdef _LinkVisitor it = <_LinkVisitor?>data
@@ -77,7 +77,7 @@ cdef herr_t cb_link_iterate(hid_t grp, char* name, H5L_info_t *istruct, void* da
         return 0
     return 1
 
-cdef herr_t cb_link_simple(hid_t grp, char* name, H5L_info_t *istruct, void* data) except 2:
+cdef herr_t cb_link_simple(hid_t grp, const char* name, const H5L_info_t *istruct, void* data) except 2:
     # Simplified iteration callback which only provides the name
 
     cdef _LinkVisitor it = <_LinkVisitor?>data
diff --git a/h5py/h5o.pyx b/h5py/h5o.pyx
index f0e5289..1078ae7 100644
--- a/h5py/h5o.pyx
+++ b/h5py/h5o.pyx
@@ -274,7 +274,7 @@ cdef class _ObjectVisitor:
         self.retval = None
         self.objinfo = ObjInfo()
 
-cdef herr_t cb_obj_iterate(hid_t obj, char* name, H5O_info_t *info, void* data) except 2:
+cdef herr_t cb_obj_iterate(hid_t obj, const char* name, const H5O_info_t *info, void* data) except 2:
 
     cdef _ObjectVisitor visit
 
@@ -290,7 +290,7 @@ cdef herr_t cb_obj_iterate(hid_t obj, char* name, H5O_info_t *info, void* data)
         return 1
     return 0
 
-cdef herr_t cb_obj_simple(hid_t obj, char* name, H5O_info_t *info, void* data) except 2:
+cdef herr_t cb_obj_simple(hid_t obj, const char* name, const H5O_info_t *info, void* data) except 2:
 
     cdef _ObjectVisitor visit
 
diff --git a/h5py/h5p.pyx b/h5py/h5p.pyx
index f8edc6d..da175dd 100644
--- a/h5py/h5p.pyx
+++ b/h5py/h5p.pyx
@@ -934,8 +934,8 @@ cdef class PropFAID(PropInstanceID):
         3. UINT rdcc_nbytes:     Size of raw data cache
         4. DOUBLE rdcc_w0:       Preemption policy for data cache.
         """
-        cdef int mdc, rdcc
-        cdef size_t rdcc_nbytes
+        cdef int mdc
+        cdef size_t rdcc, rdcc_nbytes
         cdef double w0
 
         H5Pget_cache(self.id, &mdc, &rdcc, &rdcc_nbytes, &w0)
diff --git a/h5py/h5s.pyx b/h5py/h5s.pyx
index b8b44d9..fbf13cb 100644
--- a/h5py/h5s.pyx
+++ b/h5py/h5s.pyx
@@ -485,7 +485,7 @@ cdef class SpaceID(ObjectID):
 
         nelements = hcoords.dimensions[0]
 
-        H5Sselect_elements(self.id, <H5S_seloper_t>op, nelements, <hsize_t**>hcoords.data)
+        H5Sselect_elements(self.id, <H5S_seloper_t>op, nelements, <hsize_t*>hcoords.data)
 
 
     # === Hyperslab selection functions =======================================
diff --git a/h5py/highlevel.py b/h5py/highlevel.py
index 88e5470..2901c54 100644
--- a/h5py/highlevel.py
+++ b/h5py/highlevel.py
@@ -7,12 +7,14 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
-from _hl import filters
-from _hl.base import is_hdf5, HLObject
-from _hl.files import File
-from _hl.group import Group, SoftLink, ExternalLink, HardLink
-from _hl.dataset import Dataset
-from _hl.datatype import Datatype
-from _hl.attrs import AttributeManager
+from __future__ import absolute_import
+
+from ._hl import filters
+from ._hl.base import is_hdf5, HLObject
+from ._hl.files import File
+from ._hl.group import Group, SoftLink, ExternalLink, HardLink
+from ._hl.dataset import Dataset
+from ._hl.datatype import Datatype
+from ._hl.attrs import AttributeManager
 
 
diff --git a/h5py/ipy_completer.py b/h5py/ipy_completer.py
index 4266562..c9f34f3 100644
--- a/h5py/ipy_completer.py
+++ b/h5py/ipy_completer.py
@@ -35,6 +35,8 @@ as should::
 
 """
 
+from __future__ import absolute_import
+
 import posixpath
 import re
 
diff --git a/h5py/tests/__init__.py b/h5py/tests/__init__.py
index 06127ef..24ebc99 100644
--- a/h5py/tests/__init__.py
+++ b/h5py/tests/__init__.py
@@ -38,4 +38,4 @@ def run_tests(verbose=False):
         for (case, reason) in result.expectedFailures:
             print("X  %s" % mname(case), file=sys.stderr)
         
-    return result
\ No newline at end of file
+    return result
diff --git a/h5py/tests/common.py b/h5py/tests/common.py
index 7c211a8..583fd0c 100644
--- a/h5py/tests/common.py
+++ b/h5py/tests/common.py
@@ -7,10 +7,15 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import sys
 import os
 import shutil
 import tempfile
+
+from six import unichr
+
 import numpy as np
 import h5py
 
@@ -26,18 +31,12 @@ else:
             )
 
 
-if sys.version_info[0] == 3:
-    PY3 = True
-else:
-    PY3 = False
-
-
 # Check if non-ascii filenames are supported
 # Evidently this is the most reliable way to check
 # See also h5py issue #263 and ipython #466
 # To test for this, run the testsuite with LC_ALL=C
 try:
-    testfile, fname = tempfile.mkstemp(u'\u03b7')
+    testfile, fname = tempfile.mkstemp(unichr(0x03b7))
 except UnicodeError:
     UNICODE_FILENAMES = False
 else:
@@ -162,4 +161,4 @@ class TestCase(ut.TestCase):
             self.assertArrayEqual(dset[s], arr_result)
         else:
             with self.assertRaises(exc):
-                dset[s]
\ No newline at end of file
+                dset[s]
diff --git a/h5py/tests/hl/__init__.py b/h5py/tests/hl/__init__.py
index 958b083..ac4c463 100644
--- a/h5py/tests/hl/__init__.py
+++ b/h5py/tests/hl/__init__.py
@@ -1,7 +1,14 @@
+
+from __future__ import absolute_import
+
 from . import  (test_dataset_getitem, 
+                test_dataset_swmr,
                 test_dims_dimensionproxy,
-                test_file, )
+                test_file, 
+                test_attribute_create, )
                 
 MODULES = ( test_dataset_getitem, 
+            test_dataset_swmr, 
             test_dims_dimensionproxy,
-            test_file, )
\ No newline at end of file
+            test_file,
+            test_attribute_create, )
diff --git a/h5py/tests/hl/test_attribute_create.py b/h5py/tests/hl/test_attribute_create.py
new file mode 100644
index 0000000..e518841
--- /dev/null
+++ b/h5py/tests/hl/test_attribute_create.py
@@ -0,0 +1,47 @@
+# This file is part of h5py, a Python interface to the HDF5 library.
+#
+# http://www.h5py.org
+#
+# Copyright 2008-2013 Andrew Collette and contributors
+#
+# License:  Standard 3-clause BSD; see "license.txt" for full license terms
+#           and contributor agreement.
+
+"""
+    Tests the h5py.AttributeManager.create() method.
+"""
+
+from __future__ import absolute_import
+
+import numpy as np
+import h5py
+
+from ..common import ut, TestCase
+
+class TestArray(TestCase):
+
+    """
+        Check that top-level array types can be created and read.
+    """
+    
+    def test_int(self):
+        # See issue 498
+        
+        dt = np.dtype('(3,)i')
+        data = np.arange(3, dtype='i')
+        
+        self.f.attrs.create('x', data=data, dtype=dt)
+        
+        aid = h5py.h5a.open(self.f.id, b'x')
+        
+        htype = aid.get_type()
+        self.assertEqual(htype.get_class(), h5py.h5t.ARRAY)
+        
+        out = self.f.attrs['x']
+        
+        self.assertArrayEqual(out, data)
+        
+    def test_string_dtype(self):
+        # See issue 498 discussion
+        
+        self.f.attrs.create('x', data=42, dtype='i8')
diff --git a/h5py/tests/hl/test_dataset_getitem.py b/h5py/tests/hl/test_dataset_getitem.py
index edcc9a5..a07f4c4 100644
--- a/h5py/tests/hl/test_dataset_getitem.py
+++ b/h5py/tests/hl/test_dataset_getitem.py
@@ -40,6 +40,8 @@
         Field names
 """
 
+from __future__ import absolute_import
+
 import numpy as np
 import h5py
 
@@ -402,4 +404,3 @@ class Test2DZeroFloat(TestCase):
         self.assertNumpyBehavior(self.dset, self.data, np.s_[:,[0,1,2]])
 
         
-        
\ No newline at end of file
diff --git a/h5py/tests/hl/test_dataset_swmr.py b/h5py/tests/hl/test_dataset_swmr.py
new file mode 100644
index 0000000..6ef843d
--- /dev/null
+++ b/h5py/tests/hl/test_dataset_swmr.py
@@ -0,0 +1,159 @@
+from __future__ import absolute_import
+
+import numpy as np
+import h5py
+
+from ..common import ut, TestCase
+
+
+ at ut.skipUnless(h5py.version.hdf5_version_tuple < (1, 9, 178), 'SWMR is available. Skipping backwards compatible tests')
+class TestSwmrNotAvailable(TestCase):
+    """ Test backwards compatibility behaviour when using SWMR functions with 
+    an older version of HDF5 which does not have this feature available.
+    Skip this test if SWMR features *are* available in the HDF5 library.
+    """
+    
+    def setUp(self):
+        TestCase.setUp(self)
+        self.data = np.arange(13).astype('f')
+        self.dset = self.f.create_dataset('data', chunks=(13,), maxshape=(None,), data=self.data)
+
+    def test_open_swmr_raises(self):
+        fname = self.f.filename
+        self.f.close()
+ 
+        with self.assertRaises(ValueError):
+            self.f = h5py.File(fname, 'r', swmr=True)
+        
+    def test_refresh_raises(self):
+        """ If the SWMR feature is not available then Dataset.refresh() should throw an AttributeError
+        """
+        with self.assertRaises(AttributeError):
+            self.dset.refresh()
+
+    def test_flush_raises(self):
+        """ If the SWMR feature is not available the Dataset.flush() should 
+        throw an AttributeError
+        """
+        with self.assertRaises(AttributeError):
+            self.dset.flush()
+            
+    def test_swmr_mode_raises(self):
+        with self.assertRaises(AttributeError):
+            self.f.swmr_mode
+            
+ at ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 9, 178), 'SWMR requires HDF5 >= 1.9.178')
+class TestDatasetSwmrRead(TestCase):
+    """ Testing SWMR functions when reading a dataset.
+    Skip this test if the HDF5 library does not have the SWMR features.
+    """
+    
+    def setUp(self):
+        TestCase.setUp(self)
+        self.data = np.arange(13).astype('f')
+        self.dset = self.f.create_dataset('data', chunks=(13,), maxshape=(None,), data=self.data)
+        fname = self.f.filename
+        self.f.close()
+       
+        self.f = h5py.File(fname, 'r', swmr=True)
+        self.dset = self.f['data']
+        
+    def test_initial_swmr_mode_on(self):
+        """ Verify that the file is initially in SWMR mode"""
+        self.assertTrue(self.f.swmr_mode)
+        
+    def test_read_data(self):
+        self.assertArrayEqual(self.dset, self.data)
+        
+    def test_refresh(self):
+        self.dset.refresh()
+        
+    def test_force_swmr_mode_on_raises(self):
+        """ Verify when reading a file cannot be forcibly switched to swmr mode.
+        When reading with SWMR the file must be opened with swmr=True."""
+        with self.assertRaises(ValueError):
+            self.f.swmr_mode = True
+        self.assertTrue(self.f.swmr_mode)
+        
+    def test_force_swmr_mode_off_raises(self):
+        """ Switching SWMR write mode off is only possible by closing the file.
+        Attempts to forcibly switch off the SWMR mode should raise a ValueError.
+        """
+        with self.assertRaises(ValueError):
+            self.f.swmr_mode = False
+        self.assertTrue(self.f.swmr_mode)
+
+ at ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 9, 178), 'SWMR requires HDF5 >= 1.9.178')
+class TestDatasetSwmrWrite(TestCase):
+    """ Testing SWMR functions when reading a dataset.
+    Skip this test if the HDF5 library does not have the SWMR features.
+    """
+    
+    def setUp(self):
+        """ First setup a file with a small chunked and empty dataset. 
+        No data written yet.
+        """
+        
+        # Note that when creating the file, the swmr=True is not required for 
+        # write, but libver='latest' is required.
+        self.f = h5py.File(self.mktemp(), 'w', libver='latest')
+        
+        self.data = np.arange(4).astype('f')
+        self.dset = self.f.create_dataset('data', shape=(0,), dtype=self.data.dtype, chunks=(2,), maxshape=(None,))
+        
+
+    def test_initial_swmr_mode_off(self):
+        """ Verify that the file is not initially in SWMR mode"""
+        self.assertFalse(self.f.swmr_mode)
+        
+    def test_switch_swmr_mode_on(self):
+        """ Switch to SWMR mode and verify """
+        self.f.swmr_mode = True
+        self.assertTrue(self.f.swmr_mode)
+        
+    def test_switch_swmr_mode_off_raises(self):
+        """ Switching SWMR write mode off is only possible by closing the file.
+        Attempts to forcibly switch off the SWMR mode should raise a ValueError.
+        """
+        self.f.swmr_mode = True
+        self.assertTrue(self.f.swmr_mode)
+        with self.assertRaises(ValueError):
+            self.f.swmr_mode = False
+        self.assertTrue(self.f.swmr_mode)
+        
+    def test_extend_dset(self):
+        """ Extend and flush a SWMR dataset
+        """
+        self.f.swmr_mode = True
+        self.assertTrue(self.f.swmr_mode)
+
+        self.dset.resize( self.data.shape )
+        self.dset[:] = self.data
+        self.dset.flush()
+        
+        # Refresh and read back data for assertion
+        self.dset.refresh()
+        self.assertArrayEqual(self.dset, self.data)
+        
+    def test_extend_dset_multiple(self):
+    
+        self.f.swmr_mode = True
+        self.assertTrue(self.f.swmr_mode)
+
+        self.dset.resize( (4,) )
+        self.dset[0:] = self.data
+        self.dset.flush()
+        
+        # Refresh and read back 1st data block for assertion
+        self.dset.refresh()
+        self.assertArrayEqual(self.dset, self.data)
+
+        self.dset.resize( (8,) )
+        self.dset[4:] = self.data
+        self.dset.flush()
+
+        # Refresh and read back 1st data block for assertion
+        self.dset.refresh()        
+        self.assertArrayEqual(self.dset[0:4], self.data)
+        self.assertArrayEqual(self.dset[4:8], self.data)
+        
diff --git a/h5py/tests/hl/test_dims_dimensionproxy.py b/h5py/tests/hl/test_dims_dimensionproxy.py
index ed3d283..a707f52 100644
--- a/h5py/tests/hl/test_dims_dimensionproxy.py
+++ b/h5py/tests/hl/test_dims_dimensionproxy.py
@@ -11,6 +11,8 @@
     Tests the h5py.Dataset.dims.DimensionProxy class.
 """
 
+from __future__ import absolute_import
+
 import numpy as np
 import h5py
 
@@ -21,4 +23,4 @@ class TestItems(TestCase):
     def test_empty(self):
         """ no dimension scales -> empty list """
         dset = self.f.create_dataset('x', (10,))
-        self.assertEqual(dset.dims[0].items(), [])
\ No newline at end of file
+        self.assertEqual(dset.dims[0].items(), [])
diff --git a/h5py/tests/hl/test_file.py b/h5py/tests/hl/test_file.py
index 01610a4..fefeb23 100644
--- a/h5py/tests/hl/test_file.py
+++ b/h5py/tests/hl/test_file.py
@@ -11,6 +11,8 @@
     Tests the h5py.File object.
 """
 
+from __future__ import absolute_import
+
 import h5py
 
 from ..common import ut, TestCase
@@ -64,4 +66,4 @@ class TestDealloc(TestCase):
         del f
         
         self.assertEqual(nfiles(), start_nfiles)
-        self.assertEqual(ngroups(), start_ngroups)
\ No newline at end of file
+        self.assertEqual(ngroups(), start_ngroups)
diff --git a/h5py/tests/old/__init__.py b/h5py/tests/old/__init__.py
index 5062aea..379d316 100644
--- a/h5py/tests/old/__init__.py
+++ b/h5py/tests/old/__init__.py
@@ -1,3 +1,6 @@
+
+from __future__ import absolute_import
+
 from . import ( test_attrs,
                 test_attrs_data,
                 test_base,
@@ -28,4 +31,4 @@ MODULES = ( test_attrs,
                 test_h5t,
                 test_objects,
                 test_selections,
-                test_slicing )
\ No newline at end of file
+                test_slicing )
diff --git a/h5py/tests/old/common.py b/h5py/tests/old/common.py
index d41e4e6..04e2d37 100644
--- a/h5py/tests/old/common.py
+++ b/h5py/tests/old/common.py
@@ -7,13 +7,11 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
-import sys
+from __future__ import absolute_import
 
-if sys.version_info[0] == 3:
-    py3 = True
-else:
-    py3 = False
+import sys
 
+from six import unichr, PY3
 
 if sys.version_info >= (2, 7) or sys.version_info >= (3, 2):
     import unittest as ut
@@ -108,7 +106,7 @@ class TestCase(ut.TestCase):
 # See also h5py issue #263 and ipython #466
 # To test for this, run the testsuite with LC_ALL=C
 try:
-    testfile, fname = tempfile.mkstemp(u'\u03b7')
+    testfile, fname = tempfile.mkstemp(unichr(0x03b7))
 except UnicodeError:
     unicode_filenames = False
 else:
diff --git a/h5py/tests/old/test_attrs.py b/h5py/tests/old/test_attrs.py
index dae9b66..d67df4b 100644
--- a/h5py/tests/old/test_attrs.py
+++ b/h5py/tests/old/test_attrs.py
@@ -15,10 +15,14 @@
     are tested by module test_attrs_data.
 """
 
+from __future__ import absolute_import
+
+import six
+
 import numpy as np
 import collections
 
-from common import TestCase, ut
+from .common import TestCase, ut
 
 from h5py.highlevel import File
 from h5py import h5a,  h5t
@@ -44,7 +48,7 @@ class TestAccess(BaseAttrs):
     def test_create(self):
         """ Attribute creation by direct assignment """
         self.f.attrs['a'] = 4.0
-        self.assertEqual(self.f.attrs.keys(), ['a'])
+        self.assertEqual(list(self.f.attrs.keys()), ['a'])
         self.assertEqual(self.f.attrs['a'], 4.0)
 
     def test_overwrite(self):
@@ -112,7 +116,7 @@ class TestUnicode(BaseAttrs):
 
     def test_unicode(self):
         """ Access via Unicode string with non-ascii characters """
-        name = u"Omega \u03A9"
+        name = six.u("Omega") + six.unichr(0x03A9)
         self.f.attrs[name] = 42
         out = self.f.attrs[name]
         self.assertEqual(out, 42)
diff --git a/h5py/tests/old/test_attrs_data.py b/h5py/tests/old/test_attrs_data.py
index 8c87cdf..1c92d77 100644
--- a/h5py/tests/old/test_attrs_data.py
+++ b/h5py/tests/old/test_attrs_data.py
@@ -13,9 +13,13 @@
     Covers all data read/write and type-conversion operations for attributes.
 """
 
+from __future__ import absolute_import
+
+import six
+
 import numpy as np
 
-from .common import TestCase, ut, py3
+from .common import TestCase, ut
 
 import h5py
 from h5py import h5a, h5s, h5t
@@ -173,10 +177,10 @@ class TestTypes(BaseAttrs):
     def test_unicode_scalar(self):
         """ Storage of variable-length unicode strings (auto-creation) """
 
-        self.f.attrs['x'] = u"Hello\u2340!!"
+        self.f.attrs['x'] = six.u("Hello") + six.unichr(0x2340) + six.u("!!")
         out = self.f.attrs['x']
-        self.assertEqual(out, u"Hello\u2340!!")
-        self.assertEqual(type(out), unicode)
+        self.assertEqual(out, six.u("Hello") + six.unichr(0x2340) + six.u("!!"))
+        self.assertEqual(type(out), six.text_type)
 
         aid = h5py.h5a.open(self.f.id, b"x")
         tid = aid.get_type()
@@ -213,11 +217,11 @@ class TestEmpty(BaseAttrs):
 
     def test_itervalues(self):
         with self.assertRaises(IOError):
-            list(self.f.attrs.itervalues())
+            list(six.itervalues(self.f.attrs))
 
     def test_iteritems(self):
         with self.assertRaises(IOError):
-            list(self.f.attrs.iteritems())
+            list(six.iteritems(self.f.attrs))
 
 
 class TestWriteException(BaseAttrs):
diff --git a/h5py/tests/old/test_base.py b/h5py/tests/old/test_base.py
index b5f8f3e..d9192b0 100644
--- a/h5py/tests/old/test_base.py
+++ b/h5py/tests/old/test_base.py
@@ -13,8 +13,12 @@
     Tests features common to all high-level objects, like the .name property.
 """
 
+from __future__ import absolute_import
+
+import six
+
 from h5py import File
-from .common import ut, TestCase, py3, unicode_filenames
+from .common import ut, TestCase, unicode_filenames
 
 import numpy as np
 import os
@@ -46,11 +50,11 @@ class TestRepr(BaseTest):
         repr() works correctly with Unicode names
     """
 
-    USTRING = u"\xfc\xdf"
+    USTRING = six.unichr(0xfc) + six.unichr(0xdf)
 
     def _check_type(self, obj):
-        if py3:
-            self.assertIsInstance(repr(obj), unicode)
+        if six.PY3:
+            self.assertIsInstance(repr(obj), six.text_type)
         else:
             self.assertIsInstance(repr(obj), bytes)
 
@@ -73,7 +77,7 @@ class TestRepr(BaseTest):
     @ut.skipIf(not unicode_filenames, "Filesystem unicode support required")
     def test_file(self):
         """ File object repr() with unicode """
-        fname = tempfile.mktemp(self.USTRING+u'.hdf5')
+        fname = tempfile.mktemp(self.USTRING+six.u('.hdf5'))
         try:
             with File(fname,'w') as f:
                 self._check_type(f)
diff --git a/h5py/tests/old/test_dataset.py b/h5py/tests/old/test_dataset.py
index 24feec0..24e7182 100644
--- a/h5py/tests/old/test_dataset.py
+++ b/h5py/tests/old/test_dataset.py
@@ -16,8 +16,12 @@
     2. Type conversion for read and write (currently untested)
 """
 
+from __future__ import absolute_import
+
 import sys
 
+import six
+
 import numpy as np
 
 from .common import ut, TestCase
@@ -43,9 +47,9 @@ class TestRepr(BaseDataset):
     def test_repr_open(self):
         """ repr() works on live and dead datasets """
         ds = self.f.create_dataset('foo', (4,))
-        self.assertIsInstance(repr(ds), basestring)
+        self.assertIsInstance(repr(ds), six.string_types)
         self.f.close()
-        self.assertIsInstance(repr(ds), basestring)
+        self.assertIsInstance(repr(ds), six.string_types)
 
 
 class TestCreateShape(BaseDataset):
@@ -521,7 +525,7 @@ class TestAutoCreate(BaseDataset):
 
     def test_vlen_unicode(self):
         """ Assignment of a unicode string produces a vlen unicode dataset """
-        self.f['x'] = u"Hello there\u2034"
+        self.f['x'] = six.u("Hello there") + six.unichr(0x2034)
         ds = self.f['x']
         tid = ds.id.get_type()
         self.assertEqual(type(tid), h5py.h5t.TypeStringID)
@@ -664,7 +668,7 @@ class TestStrings(BaseDataset):
 
     def test_vlen_unicode(self):
         """ Vlen unicode dataset maps to vlen utf-8 in the file """
-        dt = h5py.special_dtype(vlen=unicode)
+        dt = h5py.special_dtype(vlen=six.text_type)
         ds = self.f.create_dataset('x', (100,), dtype=dt)
         tid = ds.id.get_type()
         self.assertEqual(type(tid), h5py.h5t.TypeStringID)
@@ -701,12 +705,12 @@ class TestStrings(BaseDataset):
     def test_roundtrip_vlen_unicode(self):
         """ Writing and reading to unicode dataset preserves type and content
         """
-        dt = h5py.special_dtype(vlen=unicode)
+        dt = h5py.special_dtype(vlen=six.text_type)
         ds = self.f.create_dataset('x', (100,), dtype=dt)
-        data = u"Hello\u2034"
+        data = six.u("Hello") + six.unichr(0x2034)
         ds[0] = data
         out = ds[0]
-        self.assertEqual(type(out), unicode)
+        self.assertEqual(type(out), six.text_type)
         self.assertEqual(out, data)
 
     def test_roundtrip_fixed_bytes(self):
@@ -724,7 +728,7 @@ class TestStrings(BaseDataset):
     def test_unicode_write_error(self):
         """ Writing a non-utf8 byte string to a unicode vlen dataset raises
         ValueError """
-        dt = h5py.special_dtype(vlen=unicode)
+        dt = h5py.special_dtype(vlen=six.text_type)
         ds = self.f.create_dataset('x', (100,), dtype=dt)
         data = "Hello\xef"
         with self.assertRaises(ValueError):
@@ -733,12 +737,12 @@ class TestStrings(BaseDataset):
     def test_unicode_write_bytes(self):
         """ Writing valid utf-8 byte strings to a unicode vlen dataset is OK
         """
-        dt = h5py.special_dtype(vlen=unicode)
+        dt = h5py.special_dtype(vlen=six.text_type)
         ds = self.f.create_dataset('x', (100,), dtype=dt)
-        data = u"Hello there\u2034"
+        data = six.u("Hello there") + six.unichr(0x2034)
         ds[0] = data.encode('utf8')
         out = ds[0]
-        self.assertEqual(type(out), unicode)
+        self.assertEqual(type(out), six.text_type)
         self.assertEqual(out, data)
 
 
@@ -935,6 +939,19 @@ class TestVlen(BaseDataset):
         self.assertArrayEqual(ds[0], np.arange(3))
         self.assertArrayEqual(ds[1], np.arange(3))
 
+    def test_reuse_from_other(self):
+        dt = h5py.special_dtype(vlen=int)
+        ds = self.f.create_dataset('vlen', (1,), dtype=dt)
+        self.f.create_dataset('vlen2', (1,), ds[()].dtype)
+
+    def test_reuse_struct_from_other(self):
+        dt = [('a', int), ('b', h5py.special_dtype(vlen=int))]
+        ds = self.f.create_dataset('vlen', (1,), dtype=dt)
+        fname = self.f.filename
+        self.f.close()
+        self.f = h5py.File(fname)
+        self.f.create_dataset('vlen2', (1,), self.f['vlen']['b'][()].dtype)
+
     def test_convert(self):
         dt = h5py.special_dtype(vlen=int)
         ds = self.f.create_dataset('vlen', (3,), dtype=dt)
@@ -961,6 +978,70 @@ class TestVlen(BaseDataset):
         ds[:, :] = np.array([[np.arange(2), np.arange(2)],
                              [np.arange(2), np.arange(2)]])
 
+    def _help_float_testing(self, np_dt, dataset_name='vlen'):
+        """
+        Helper for testing various vlen numpy data types.
+        :param np_dt: Numpy datatype to test
+        :param dataset_name: String name of the dataset to create for testing.
+        """
+        dt = h5py.special_dtype(vlen=np_dt)
+        ds = self.f.create_dataset(dataset_name, (5,), dtype=dt)
+
+        # Create some arrays, and assign them to the dataset
+        array_0 = np.array([1., 2., 30.], dtype=np_dt)
+        array_1 = np.array([100.3, 200.4, 98.1, -10.5, -300.0], dtype=np_dt)
+
+        # Test that a numpy array of different type gets cast correctly
+        array_2 = np.array([1, 2, 8], dtype=np.dtype('int32'))
+        casted_array_2 = array_2.astype(np_dt)
+
+        # Test that we can set a list of floats.
+        list_3 = [1., 2., 900., 0., -0.5]
+        list_array_3 = np.array(list_3, dtype=np_dt)
+
+        # Test that a list of integers gets casted correctly
+        list_4 = [-1, -100, 0, 1, 9999, 70]
+        list_array_4 = np.array(list_4, dtype=np_dt)
+
+        ds[0] = array_0
+        ds[1] = array_1
+        ds[2] = array_2
+        ds[3] = list_3
+        ds[4] = list_4
+
+        self.assertArrayEqual(array_0, ds[0])
+        self.assertArrayEqual(array_1, ds[1])
+        self.assertArrayEqual(casted_array_2, ds[2])
+        self.assertArrayEqual(list_array_3, ds[3])
+        self.assertArrayEqual(list_array_4, ds[4])
+
+        # Test that we can reassign arrays in the dataset
+        list_array_3 = np.array([0.3, 2.2], dtype=np_dt)
+
+        ds[0] = list_array_3[:]
+
+        self.assertArrayEqual(list_array_3, ds[0])
+
+        # Make sure we can close the file.
+        self.f.flush()
+        self.f.close()
+
+    def test_numpy_float16(self):
+        np_dt = np.dtype('float16')
+        self._help_float_testing(np_dt)
+
+    def test_numpy_float32(self):
+        np_dt = np.dtype('float32')
+        self._help_float_testing(np_dt)
+
+    def test_numpy_float64_from_dtype(self):
+        np_dt = np.dtype('float64')
+        self._help_float_testing(np_dt)
+
+    def test_numpy_float64_2(self):
+        np_dt = np.float64
+        self._help_float_testing(np_dt)
+
 
 class TestLowOpen(BaseDataset):
 
diff --git a/h5py/tests/old/test_datatype.py b/h5py/tests/old/test_datatype.py
index 0d2c470..84e015c 100644
--- a/h5py/tests/old/test_datatype.py
+++ b/h5py/tests/old/test_datatype.py
@@ -13,6 +13,10 @@
     Tests "committed" file-resident datatype objects.
 """
 
+from __future__ import absolute_import
+
+import six
+
 import numpy as np
 
 from .common import ut, TestCase
@@ -39,9 +43,9 @@ class TestCreation(BaseType):
         """ repr() on datatype objects """
         self.f['foo'] = np.dtype('S10')
         dt = self.f['foo']
-        self.assertIsInstance(repr(dt), basestring)
+        self.assertIsInstance(repr(dt), six.string_types)
         self.f.close()
-        self.assertIsInstance(repr(dt), basestring)
+        self.assertIsInstance(repr(dt), six.string_types)
 
 
     def test_appropriate_low_level_id(self):
diff --git a/h5py/tests/old/test_dimension_scales.py b/h5py/tests/old/test_dimension_scales.py
index 7eef162..309a9d6 100644
--- a/h5py/tests/old/test_dimension_scales.py
+++ b/h5py/tests/old/test_dimension_scales.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 import sys
 
 import numpy as np
diff --git a/h5py/tests/old/test_file.py b/h5py/tests/old/test_file.py
index 473bf46..ff9d19c 100644
--- a/h5py/tests/old/test_file.py
+++ b/h5py/tests/old/test_file.py
@@ -13,10 +13,12 @@
     Tests all aspects of File objects, including their creation.
 """
 
-from __future__ import with_statement
+from __future__ import absolute_import, with_statement
 
 import os, stat
 
+import six
+
 from .common import ut, TestCase, unicode_filenames
 from h5py.highlevel import File
 import h5py
@@ -287,13 +289,19 @@ class TestUserblock(TestCase):
     """
 
     def test_create_blocksize(self):
-        """ User blocks created with w, w- and properties work correctly """
+        """ User blocks created with w, w-, x and properties work correctly """
         f = File(self.mktemp(),'w-', userblock_size=512)
         try:
             self.assertEqual(f.userblock_size, 512)
         finally:
             f.close()
 
+        f = File(self.mktemp(),'x', userblock_size=512)
+        try:
+            self.assertEqual(f.userblock_size, 512)
+        finally:
+            f.close()
+
         f = File(self.mktemp(),'w', userblock_size=512)
         try:
             self.assertEqual(f.userblock_size, 512)
@@ -388,11 +396,11 @@ class TestUnicode(TestCase):
     def test_unicode(self):
         """ Unicode filenames can be used, and retrieved properly via .filename
         """
-        fname = self.mktemp(prefix = u'\u201a')
+        fname = self.mktemp(prefix = six.unichr(0x201a))
         fid = File(fname, 'w')
         try:
             self.assertEqual(fid.filename, fname)
-            self.assertIsInstance(fid.filename, unicode)
+            self.assertIsInstance(fid.filename, six.text_type)
         finally:
             fid.close()
 
@@ -476,9 +484,9 @@ class TestRepr(TestCase):
     def test_repr(self):
         """ __repr__ behaves itself when files are open and closed """
         fid = File(self.mktemp())
-        self.assertIsInstance(repr(fid), basestring)
+        self.assertIsInstance(repr(fid), six.string_types)
         fid.close()
-        self.assertIsInstance(repr(fid), basestring)
+        self.assertIsInstance(repr(fid), six.string_types)
 
 class TestFilename(TestCase):
 
@@ -492,7 +500,7 @@ class TestFilename(TestCase):
         fid = File(fname, 'w')
         try:
             self.assertEqual(fid.filename, fname)
-            self.assertIsInstance(fid.filename, unicode)
+            self.assertIsInstance(fid.filename, six.text_type)
         finally:
             fid.close()
 
diff --git a/h5py/tests/old/test_group.py b/h5py/tests/old/test_group.py
index 25329a1..7dedc4b 100644
--- a/h5py/tests/old/test_group.py
+++ b/h5py/tests/old/test_group.py
@@ -15,11 +15,16 @@
 
     1. Method create_dataset is tested in module test_dataset
 """
+
+from __future__ import absolute_import
+
 import collections
 import numpy as np
 import os
 import sys
 
+import six
+
 from .common import ut, TestCase
 import h5py
 from h5py.highlevel import File, Group, SoftLink, HardLink, ExternalLink
@@ -44,9 +49,9 @@ class TestRepr(BaseGroup):
     def test_repr(self):
         """ repr() works on Group objects """
         g = self.f.create_group('foo')
-        self.assertIsInstance(g, basestring)
+        self.assertIsInstance(g, six.string_types)
         self.f.close()
-        self.assertIsInstance(g, basestring)
+        self.assertIsInstance(g, six.string_types)
 
 class TestCreate(BaseGroup):
 
@@ -72,7 +77,7 @@ class TestCreate(BaseGroup):
 
     def test_unicode(self):
         """ Unicode names are correctly stored """
-        name = u"/Name\u4500"
+        name = six.u("/Name") + six.unichr(0x4500)
         group = self.f.create_group(name)
         self.assertEqual(group.name, name)
         self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_UTF8)
@@ -80,7 +85,7 @@ class TestCreate(BaseGroup):
     def test_unicode_default(self):
         """ Unicode names convertible to ASCII are stored as ASCII (issue 239)
         """
-        name = u"/Hello, this is a name"
+        name = six.u("/Hello, this is a name")
         group = self.f.create_group(name)
         self.assertEqual(group.name, name)
         self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_ASCII)
@@ -243,9 +248,9 @@ class TestRepr(BaseGroup):
     def test_repr(self):
         """ Opened and closed groups provide a useful __repr__ string """
         g = self.f.create_group('foo')
-        self.assertIsInstance(repr(g), basestring)
+        self.assertIsInstance(repr(g), six.string_types)
         g.id._close()
-        self.assertIsInstance(repr(g), basestring)
+        self.assertIsInstance(repr(g), six.string_types)
 
 class BaseMapping(BaseGroup):
 
@@ -292,33 +297,33 @@ class TestContains(BaseGroup):
         """ "in" builtin works for containership (byte and Unicode) """
         self.f.create_group('a')
         self.assertIn(b'a', self.f)
-        self.assertIn(u'a', self.f)
+        self.assertIn(six.u('a'), self.f)
         self.assertIn(b'/a', self.f)
-        self.assertIn(u'/a', self.f)
+        self.assertIn(six.u('/a'), self.f)
         self.assertNotIn(b'mongoose', self.f)
-        self.assertNotIn(u'mongoose', self.f)
+        self.assertNotIn(six.u('mongoose'), self.f)
 
     def test_exc(self):
         """ "in" on closed group returns False (see also issue 174) """
         self.f.create_group('a')
         self.f.close()
         self.assertFalse(b'a' in self.f)
-        self.assertFalse(u'a' in self.f)
+        self.assertFalse(six.u('a') in self.f)
 
     def test_empty(self):
         """ Empty strings work properly and aren't contained """
-        self.assertNotIn(u'', self.f)
+        self.assertNotIn(six.u(''), self.f)
         self.assertNotIn(b'', self.f)
 
     def test_dot(self):
         """ Current group "." is always contained """
         self.assertIn(b'.', self.f)
-        self.assertIn(u'.', self.f)
+        self.assertIn(six.u('.'), self.f)
 
     def test_root(self):
         """ Root group (by itself) is contained """
         self.assertIn(b'/', self.f)
-        self.assertIn(u'/', self.f)
+        self.assertIn(six.u('/'), self.f)
 
     def test_trailing_slash(self):
         """ Trailing slashes are unconditionally ignored """
@@ -413,7 +418,7 @@ class TestPy2Dict(BaseMapping):
         self.assertSameElements([x for x in self.f.iteritems()],
             [(x, self.f.get(x)) for x in self.groups])
 
- at ut.skipIf(sys.version_info[0] != 3, "Py3")
+ at ut.skipIf(not six.PY3, "Py3")
 class TestPy3Dict(BaseMapping):
 
     def test_keys(self):
@@ -429,8 +434,8 @@ class TestPy3Dict(BaseMapping):
         vv = getattr(self.f, 'values')()
         self.assertSameElements(list(vv), [self.f.get(x) for x in self.groups])
         self.assertEqual(len(vv), len(self.groups))
-        with self.assertRaises(TypeError):
-            b'x' in vv
+        for x in self.groups:
+            self.assertIn(self.f.get(x), vv)
 
     def test_items(self):
         """ .items provides an item view """
@@ -440,6 +445,91 @@ class TestPy3Dict(BaseMapping):
         for x in self.groups:
             self.assertIn((x, self.f.get(x)), iv)
 
+class TestAdditionalMappingFuncs(BaseMapping):
+    """
+    Feature: Other dict methods (pop, pop_item, clear, update, setdefault) are
+    available.
+    """
+    def setUp(self):
+        self.f = File(self.mktemp(), 'w')
+        for x in ('/test/a','/test/b','/test/c','/test/d'):
+            self.f.create_group(x)
+        self.group = self.f['test']
+
+    def tearDown(self):
+        if self.f:
+            self.f.close()
+
+    def test_pop_item(self):
+        """.pop_item exists and removes item"""
+        key, val = self.group.popitem()
+        self.assertNotIn(key, self.group)
+
+    def test_pop(self):
+        """.pop exists and removes specified item"""
+        self.group.pop('a')
+        self.assertNotIn('a', self.group)
+
+    def test_pop_default(self):
+        """.pop falls back to default"""
+        # e shouldn't exist as a group
+        value = self.group.pop('e', None)
+        self.assertEqual(value, None)
+
+    def test_pop_raises(self):
+        """.pop raises KeyError for non-existence"""
+        # e shouldn't exist as a group
+        with self.assertRaises(KeyError):
+            key = self.group.pop('e')
+
+    def test_clear(self):
+        """.clear removes groups"""
+        self.group.clear()
+        self.assertEqual(len(self.group), 0)
+
+    def test_update_dict(self):
+        """.update works with dict"""
+        new_items = {'e': np.array([42])}
+        self.group.update(new_items)
+        self.assertIn('e', self.group)
+
+    def test_update_iter(self):
+        """.update works with list"""
+        new_items = [
+            ('e', np.array([42])),
+            ('f', np.array([42]))
+        ]
+        self.group.update(new_items)
+        self.assertIn('e', self.group)
+
+    def test_update_kwargs(self):
+        """.update works with kwargs"""
+        new_items = {'e': np.array([42])}
+        self.group.update(**new_items)
+        self.assertIn('e', self.group)
+
+    def test_setdefault(self):
+        """.setdefault gets group if it exists"""
+        value = self.group.setdefault('a')
+        self.assertEqual(value, self.group.get('a'))
+
+    def test_setdefault_with_default(self):
+        """.setdefault gets default if group doesn't exist"""
+        # e shouldn't exist as a group
+        # 42 used as groups should be strings
+        value = self.group.setdefault('e', np.array([42]))
+        self.assertEqual(value, 42)
+
+    def test_setdefault_no_default(self):
+        """
+        .setdefault gets None if group doesn't exist, but as None isn't defined
+        as data for a dataset, this should raise a TypeError.
+        """
+        # e shouldn't exist as a group
+        with self.assertRaises(TypeError):
+            self.group.setdefault('e')
+
+
 class TestGet(BaseGroup):
 
     """
@@ -562,7 +652,7 @@ class TestSoftLinks(BaseGroup):
     def test_srepr(self):
         """ SoftLink path repr """
         sl = SoftLink('/foo')
-        self.assertIsInstance(repr(sl), basestring)
+        self.assertIsInstance(repr(sl), six.string_types)
 
     def test_create(self):
         """ Create new soft link by assignment """
@@ -606,7 +696,7 @@ class TestExternalLinks(TestCase):
     def test_erepr(self):
         """ External link repr """
         el = ExternalLink('foo.hdf5','/foo')
-        self.assertIsInstance(repr(el), basestring)
+        self.assertIsInstance(repr(el), six.string_types)
 
     def test_create(self):
         """ Creating external links """
diff --git a/h5py/tests/old/test_h5.py b/h5py/tests/old/test_h5.py
index d759653..bbbb88d 100644
--- a/h5py/tests/old/test_h5.py
+++ b/h5py/tests/old/test_h5.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 try:
     import unittest2 as ut
 except ImportError:
diff --git a/h5py/tests/old/test_h5f.py b/h5py/tests/old/test_h5f.py
index 76ef362..5dbbe02 100644
--- a/h5py/tests/old/test_h5f.py
+++ b/h5py/tests/old/test_h5f.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 try:
     import unittest2 as ut
 except ImportError:
diff --git a/h5py/tests/old/test_h5p.py b/h5py/tests/old/test_h5p.py
index 8ca69a6..468ca54 100644
--- a/h5py/tests/old/test_h5p.py
+++ b/h5py/tests/old/test_h5p.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 try:
     import unittest2 as ut
 except ImportError:
diff --git a/h5py/tests/old/test_h5t.py b/h5py/tests/old/test_h5t.py
index 3d81122..2b2a83d 100644
--- a/h5py/tests/old/test_h5t.py
+++ b/h5py/tests/old/test_h5t.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 try:
     import unittest2 as ut
 except ImportError:
diff --git a/h5py/tests/old/test_objects.py b/h5py/tests/old/test_objects.py
index b568884..bd80517 100644
--- a/h5py/tests/old/test_objects.py
+++ b/h5py/tests/old/test_objects.py
@@ -7,6 +7,8 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 try:
     import unittest2 as ut
 except ImportError:
diff --git a/h5py/tests/old/test_selections.py b/h5py/tests/old/test_selections.py
index 76f9699..4a9dc21 100644
--- a/h5py/tests/old/test_selections.py
+++ b/h5py/tests/old/test_selections.py
@@ -11,11 +11,13 @@
     Tests for the (internal) selections module
 """
 
+from __future__ import absolute_import
+
 import numpy as np
 import h5py
 import h5py._hl.selections2 as sel
 
-from common import TestCase, ut
+from .common import TestCase, ut
 
 class TestTypeGeneration(TestCase):
 
diff --git a/h5py/tests/old/test_slicing.py b/h5py/tests/old/test_slicing.py
index 6ff3c8c..fba6cec 100644
--- a/h5py/tests/old/test_slicing.py
+++ b/h5py/tests/old/test_slicing.py
@@ -16,9 +16,13 @@
     specific fields of a compound type.
 """
 
+from __future__ import absolute_import
+
+import six
+
 import numpy as np
 
-from .common import ut, TestCase, py3
+from .common import ut, TestCase
 
 import h5py
 from h5py import h5s, h5t, h5d
@@ -298,18 +302,18 @@ class TestFieldNames(BaseSlicing):
 
     def test_read(self):
         """ Test read with field selections (bytes and unicode) """
-        if not py3:
+        if not six.PY3:
             # Byte strings are only allowed for field names on Py2
             self.assertArrayEqual(self.dset[b'a'], self.data['a'])
-        self.assertArrayEqual(self.dset[u'a'], self.data['a'])
+        self.assertArrayEqual(self.dset[six.u('a')], self.data['a'])
 
     def test_unicode_names(self):
         """ Unicode field names for for read and write """
-        self.assertArrayEqual(self.dset[u'a'], self.data['a'])
-        self.dset[u'a'] = 42
+        self.assertArrayEqual(self.dset[six.u('a')], self.data['a'])
+        self.dset[six.u('a')] = 42
         data = self.data.copy()
         data['a'] = 42
-        self.assertArrayEqual(self.dset[u'a'], data['a'])
+        self.assertArrayEqual(self.dset[six.u('a')], data['a'])
 
     def test_write(self):
         """ Test write with field selections """
diff --git a/h5py/utils.pyx b/h5py/utils.pyx
index 6c539b1..a640579 100644
--- a/h5py/utils.pyx
+++ b/h5py/utils.pyx
@@ -60,7 +60,6 @@ cdef int check_numpy(ndarray arr, hid_t space_id, int write):
     cdef int required_flags
     cdef hsize_t arr_rank
     cdef hsize_t space_rank
-    cdef hsize_t *space_dims = NULL
     cdef int i
 
     if arr is None:
@@ -78,34 +77,6 @@ cdef int check_numpy(ndarray arr, hid_t space_id, int write):
             PyErr_SetString(TypeError, "Array must be C-contiguous")
             return -1
 
-    # Validate dataspace compatibility, if it's provided
-
-    if space_id > 0:
-
-        arr_rank = arr.nd
-        space_rank = H5Sget_simple_extent_ndims(space_id)
-
-        if arr_rank != space_rank:
-            err_msg = "Numpy array rank %d must match dataspace rank %d." % (arr_rank, space_rank)
-            PyErr_SetString(TypeError, err_msg)
-            return -1
-
-        space_dims = <hsize_t*>malloc(sizeof(hsize_t)*space_rank)
-        try:
-            space_rank = H5Sget_simple_extent_dims(space_id, space_dims, NULL)
-
-            for i from 0 < i < space_rank:
-
-                if write:
-                    if PyArray_DIM(arr,i) < space_dims[i]:
-                        PyErr_SetString(TypeError, "Array dimensions are too small for the dataspace.")
-                        return -1
-                else:
-                    if PyArray_DIM(arr,i) > space_dims[i]:
-                        PyErr_SetString(TypeError, "Array dimensions are too large for the dataspace.")
-                        return -1
-        finally:
-            free(space_dims)
     return 1
 
 cpdef int check_numpy_write(ndarray arr, hid_t space_id=-1) except -1:
diff --git a/h5py/version.py b/h5py/version.py
index ea8e89f..4d65d38 100644
--- a/h5py/version.py
+++ b/h5py/version.py
@@ -7,12 +7,14 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+from __future__ import absolute_import
+
 from . import h5 as _h5
 from distutils.version import StrictVersion as _sv
 import sys
 import numpy
 
-version = "2.4.0"
+version = "2.5.0"
 
 _exp = _sv(version)
 
diff --git a/lzf/README.txt b/lzf/README.txt
index c6ad62c..c5a1c3b 100644
--- a/lzf/README.txt
+++ b/lzf/README.txt
@@ -15,8 +15,14 @@ is released under the BSD license (see LICENSE.txt for details).
 Using the filter from HDF5
 --------------------------
 
-There is exactly one new public function declared in lzf_filter.h, with
-the following signature:
+With HDF5 version 1.8.11 or later the filter can be loaded dynamically by the
+HDF5 library.  The filter needs to be compiled as a plugin as described below
+that is placed in the default plugin path /usr/local/hdf5/lib/plugin/.  The
+plugin path can be overridden with the environment variable HDF5_PLUGIN_PATH.
+
+With older HDF5 versions, or when statically linking the filter to your program,
+the filter must be registered manually. There is exactly one new public function
+declared in lzf_filter.h, with the following signature:
 
     int register_lzf(void)
 
@@ -38,17 +44,23 @@ version of the LZF compression library.  Since the filter is stateless, it's
 recommended to statically link the entire thing into your program; for
 example:
 
-    $ gcc -O2 -lhdf5 lzf/*.c lzf_filter.c myprog.c -o myprog
+    $ gcc -O2 lzf/*.c lzf_filter.c myprog.c -lhdf5 -o myprog
 
 It can also be built as a shared library, although you will have to install
 the resulting library somewhere the runtime linker can find it:
 
-    $ gcc -O2 -lhdf5 -fPIC -shared lzf/*.c lzf_filter.c -o liblzf_filter.so
+    $ gcc -O2 -fPIC -shared lzf/*.c lzf_filter.c -lhdf5 -o liblzf_filter.so
 
 A similar procedure should be used for building C++ code.  As in these
 examples, using option -O1 or higher is strongly recommended for increased
 performance.
 
+With HDF5 version 1.8.11 or later the filter can be dynamically loaded as a
+plugin.  The filter is built as a shared library that is *not* linked against
+the HDF5 library:
+
+    $ gcc -O2 -fPIC -shared lzf/*.c lzf_filter.c -o liblzf_filter.so
+
 
 Contact
 -------
diff --git a/lzf/lzf_filter.c b/lzf/lzf_filter.c
index c6dd4b0..b27aa7c 100644
--- a/lzf/lzf_filter.c
+++ b/lzf/lzf_filter.c
@@ -63,32 +63,44 @@ size_t lzf_filter(unsigned flags, size_t cd_nelmts,
 
 herr_t lzf_set_local(hid_t dcpl, hid_t type, hid_t space);
 
+#if H5PY_H5Z_NEWCLS
+static const H5Z_class_t filter_class = {
+    H5Z_CLASS_T_VERS,
+    (H5Z_filter_t)(H5PY_FILTER_LZF),
+    1, 1,
+    "lzf",
+    NULL,
+    (H5Z_set_local_func_t)(lzf_set_local),
+    (H5Z_func_t)(lzf_filter)
+};
+#else
+static const H5Z_class_t filter_class = {
+    (H5Z_filter_t)(H5PY_FILTER_LZF),
+    "lzf",
+    NULL,
+    (H5Z_set_local_func_t)(lzf_set_local),
+    (H5Z_func_t)(lzf_filter)
+};
+#endif
+
+/* Support dynamical loading of LZF filter plugin */
+#if defined(H5_VERSION_GE)
+#if H5_VERSION_GE(1, 8, 11)
+
+#include "H5PLextern.h"
+
+H5PL_type_t H5PLget_plugin_type(void){ return H5PL_TYPE_FILTER; }
+
+const void *H5PLget_plugin_info(void){ return &filter_class; }
+
+#endif
+#endif
 
 /* Try to register the filter, passing on the HDF5 return value */
 int register_lzf(void){
 
     int retval;
 
-#if H5PY_H5Z_NEWCLS
-    H5Z_class_t filter_class = {
-        H5Z_CLASS_T_VERS,
-        (H5Z_filter_t)(H5PY_FILTER_LZF),
-        1, 1,
-        "lzf",
-        NULL,
-        (H5Z_set_local_func_t)(lzf_set_local),
-        (H5Z_func_t)(lzf_filter)
-    };
-#else
-    H5Z_class_t filter_class = {
-        (H5Z_filter_t)(H5PY_FILTER_LZF),
-        "lzf",
-        NULL,
-        (H5Z_set_local_func_t)(lzf_set_local),
-        (H5Z_func_t)(lzf_filter)
-    };
-#endif
-
     retval = H5Zregister(&filter_class);
     if(retval<0){
         PUSH_ERR("register_lzf", H5E_CANTREGISTER, "Can't register LZF filter");
diff --git a/other/garbage.py b/other/garbage.py
new file mode 100644
index 0000000..a607da6
--- /dev/null
+++ b/other/garbage.py
@@ -0,0 +1,29 @@
+# This file is part of h5py, a Python interface to the HDF5 library.
+#
+# http://www.h5py.org
+#
+# Copyright 2008-2013 Andrew Collette and contributors
+#
+# License:  Standard 3-clause BSD; see "license.txt" for full license terms
+#           and contributor agreement.
+
+"""
+    Demonstrates garbage messages printed to stderr for containership
+    testing, when performed in new threads.
+"""
+
+from threading import Thread
+
+import h5py
+
+def demonstrate():
+    with h5py.File('foo', 'w', driver='core') as f:
+        print('x' in f)
+
+if __name__ == '__main__':
+    print("Main thread")
+    demonstrate()
+    thread = Thread(target=demonstrate)
+    print("New thread")
+    thread.start()
+    thread.join()
diff --git a/other/iterate_deadlock.py b/other/iterate_deadlock.py
new file mode 100644
index 0000000..04437cb
--- /dev/null
+++ b/other/iterate_deadlock.py
@@ -0,0 +1,36 @@
+# This file is part of h5py, a Python interface to the HDF5 library.
+#
+# http://www.h5py.org
+#
+# Copyright 2008-2013 Andrew Collette and contributors
+#
+# License:  Standard 3-clause BSD; see "license.txt" for full license terms
+#           and contributor agreement.
+
+"""
+    Demonstrates deadlock related to attribute iteration.
+"""
+
+from threading import Thread
+import sys
+
+import h5py
+
+FNAME = "deadlock.hdf5"
+
+def make_file():
+    with h5py.File(FNAME,'w') as f:
+        for idx in range(1000):
+            f.attrs['%d'%idx] = 1
+
+def list_attributes():
+    with h5py.File(FNAME, 'r') as f:
+        names = list(f.attrs)
+
+if __name__ == '__main__':
+
+    make_file()
+    thread = Thread(target=list_attributes)
+    thread.start()
+    list_attributes()
+    thread.join()
diff --git a/other/vlen_leak.py b/other/vlen_leak.py
new file mode 100644
index 0000000..583b668
--- /dev/null
+++ b/other/vlen_leak.py
@@ -0,0 +1,90 @@
+# This file is part of h5py, a Python interface to the HDF5 library.
+#
+# http://www.h5py.org
+#
+# Copyright 2008-2013 Andrew Collette and contributors
+#
+# License:  Standard 3-clause BSD; see "license.txt" for full license terms
+#           and contributor agreement.
+
+"""
+    Demonstrates memory leak involving variable-length strings.
+"""
+
+import sys
+import resource
+import numpy as np
+
+import h5py
+
+FNAME = 'test.hdf5'
+
+if 'linux' in sys.platform:
+    MAXRSS_BYTES = 1024. # in KiB on linux
+else:
+    MAXRSS_BYTES = 1.
+
+if sys.version_info[0] == 3:
+    xrange = range
+    unicode = str
+
+memory = 0
+def print_memory():
+    global memory
+
+    rubytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss*MAXRSS_BYTES
+    print ("%.2f MB (%.2f since last call)" % (rubytes/(1024.**2), (rubytes-memory)/(1024.**2)))
+    memory = rubytes
+
+
+def make_data(kind):
+    global data
+    global dt
+
+    if kind is bytes:
+        s = b"xx"
+    else:
+        s = b"xx".decode('utf8')
+
+    dt = h5py.special_dtype(vlen=kind)
+    data = np.array([s*100 for idx in xrange(1000)])
+
+
+def ds_leak():
+    print("Testing vlens for dataset r/w")
+    print("-----------------------------")
+    with h5py.File(FNAME,'w') as f:
+        ds = f.create_dataset('dset', (1000,), dtype=dt)
+        for idx in xrange(500):
+            #print idx
+            if idx%100 == 0:
+                print_memory()
+            ds[...] = data
+            ds[...]
+
+
+def attr_leak():
+    print("Testing vlens for attribute r/w")
+    print("-------------------------------")
+    with h5py.File(FNAME,'w') as f:
+        for idx in xrange(500):
+            if idx%100 == 0:
+                print_memory()
+            f.attrs.create('foo', dtype=dt, data=data)
+            f.attrs['foo']
+
+
+if __name__ == '__main__':
+    print("h5py ", h5py.version.version)
+    print("HDF5 ", h5py.version.hdf5_version)
+    print("Bytes test")
+    print("==========")
+    make_data(bytes)
+    attr_leak()
+    ds_leak()
+    print("Unicode test")
+    print("============")
+    make_data(unicode)
+    attr_leak()
+    ds_leak()
+
diff --git a/pavement.py b/pavement.py
new file mode 100644
index 0000000..ee98b15
--- /dev/null
+++ b/pavement.py
@@ -0,0 +1,35 @@
+from paver.easy import *
+import os
+
+DLLS = ['h5py_hdf5.dll', 'h5py_hdf5_hl.dll', 'szip.dll', 'zlib.dll']
+
+ at task
+def release_unix():
+    sh('python setup.py clean')
+    sh('python setup.py configure --reset --hdf5-version=1.8.4')
+    sh('python setup.py build -f')
+    sh('python setup.py test')
+    sh('python setup.py sdist')
+    print("Unix release done.  Distribution tar file is in dist/")
+
+ at task
+def release_windows():
+    for pyver in (26, 27, 32, 33, 34):
+        exe = r'C:\Python%d\Python.exe' % pyver
+        hdf5 = r'c:\hdf5\Python%d' % pyver
+        sh('%s setup.py clean' % exe)
+        sh('%s setup.py configure --reset --hdf5-version=1.8.13 --hdf5=%s' % (exe, hdf5))
+        for dll in DLLS:
+            sh('copy c:\\hdf5\\Python%d\\bin\\%s h5py /Y' % (pyver, dll))
+        sh('%s setup.py build -f' % exe)
+        sh('%s setup.py test' % exe)
+        sh('%s setup.py bdist_wininst' % exe)
+    print ("Windows exe release done.  Distribution files are in dist/")
+    for dll in DLLS:
+        os.unlink('h5py\\%s' % dll)
+    
+ at task
+ at consume_args
+def git_summary(options):
+    sh('git log --no-merges --pretty=oneline --abbrev-commit %s..HEAD'%options.args[0])
+    sh('git shortlog -s -n %s..HEAD'%options.args[0])
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 861a9f5..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[egg_info]
-tag_build = 
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/setup.py b/setup.py
index be3f8e0..1108fa8 100755
--- a/setup.py
+++ b/setup.py
@@ -19,17 +19,10 @@ import sys
 import os
 import os.path as op
 
-if sys.version_info[0] >= 3:
-    import lib2to3.refactor
-    from distutils.command.build_py \
-         import build_py_2to3 as build_py
-else:
-    from distutils.command.build_py import build_py
-    
 import setup_build, setup_configure
 
 
-VERSION = '2.4.0'
+VERSION = '2.5.0'
 
 
 # --- Custom Distutils commands -----------------------------------------------
@@ -80,8 +73,7 @@ class test(Command):
             sys.path = oldpath
         
         
-CMDCLASS = {'build_py': build_py,
-            'build_ext': setup_build.h5py_build_ext,
+CMDCLASS = {'build_ext': setup_build.h5py_build_ext,
             'configure': setup_configure.configure,
             'test': test, }
 
@@ -143,6 +135,7 @@ setup(
   package_data = package_data,
   ext_modules = [Extension('h5py.x',['x.c'])],  # To trick build into running build_ext
   requires = ['numpy (>=1.6.1)', 'Cython (>=0.17)'],
-  install_requires = ['numpy>=1.6.1', 'Cython>=0.17'],
+  install_requires = ['numpy>=1.6.1', 'Cython>=0.17', 'six'],
+  setup_requires = ['pkgconfig', 'six'],
   cmdclass = CMDCLASS,
 )
diff --git a/setup_build.py b/setup_build.py
index a65e767..bc3a8b0 100644
--- a/setup_build.py
+++ b/setup_build.py
@@ -13,6 +13,7 @@ from distutils.command.build_ext import build_ext
 import sys
 import os
 import os.path as op
+import subprocess
 from functools import reduce
 import api_gen
 
@@ -69,8 +70,19 @@ class h5py_build_ext(build_ext):
         enter the build process.
         """
         import numpy
+        import pkgconfig
 
         settings = COMPILER_SETTINGS.copy()
+
+        try:
+            if pkgconfig.exists('hdf5'):
+                pkgcfg = pkgconfig.parse("hdf5")
+                settings['include_dirs'].extend(pkgcfg['include_dirs'])
+                settings['library_dirs'].extend(pkgcfg['library_dirs'])
+                settings['define_macros'].extend(pkgcfg['define_macros'])
+        except EnvironmentError:
+            pass
+
         settings['include_dirs'] += [numpy.get_include()]
         if config.mpi:
             import mpi4py
@@ -93,6 +105,35 @@ class h5py_build_ext(build_ext):
         return [make_extension(m) for m in MODULES]
         
         
+    @staticmethod
+    def run_system_cython(pyx_files):
+        try:
+            retcode = subprocess.call(['cython', '--fast-fail', '--verbose'] + pyx_files)
+            if not retcode == 0:
+                raise Exception('ERROR: Cython failed')
+        except OSError as e:
+            print("ERROR: cython exec failed. Is cython not in the path? ", str(e))
+            raise
+        except Exception as e:
+            print("ERROR: cython exec failed", str(e))
+            raise
+
+    def check_rerun_cythonize(self):
+        """ Check whether the cythonize() call produced the expected .c files.
+        If the expected .c files are not found then cython from the system path will
+        be executed in order to produce the missing files. """
+
+        missing_c_src_files = []
+        for c_src_file in [ext.sources[0] for ext in self.extensions]:
+            if not op.isfile(c_src_file):
+                missing_c_src_files.append( c_src_file )
+        if missing_c_src_files:
+            print("WARNING: cythonize() failed to create all .c files (setuptools too old?)")
+            pyx_files = [os.path.splitext(fname)[0] + ".pyx" for fname in missing_c_src_files]
+            print("         Executing system cython on pyx files: ", str(pyx_files))
+            self.run_system_cython(pyx_files)
+
+
     def run(self):
         """ Distutils calls this method to run the command """
         
@@ -119,6 +160,7 @@ class h5py_build_ext(build_ext):
 
 DEF MPI = %(mpi)s
 DEF HDF5_VERSION = %(version)s
+DEF SWMR_MIN_HDF5_VERSION = (1,9,178)
 """
                 s %= {'mpi': bool(config.mpi),
                       'version': tuple(int(x) for x in config.hdf5_version.split('.'))}
@@ -126,8 +168,10 @@ DEF HDF5_VERSION = %(version)s
                 f.write(s)
         
         # Run Cython
+        print("Executing cythonize()")
         self.extensions = cythonize(self._make_extensions(config),
                             force=config.rebuild_required or self.force)
+        self.check_rerun_cythonize()
         
         # Perform the build
         build_ext.run(self)
diff --git a/setup_configure.py b/setup_configure.py
index fb69413..fc7fb08 100644
--- a/setup_configure.py
+++ b/setup_configure.py
@@ -193,6 +193,8 @@ def autodetect_version(hdf5_dir=None):
     import re
     import ctypes
     from ctypes import byref
+
+    import pkgconfig
     
     if sys.platform.startswith('darwin'):
         regexp = re.compile(r'^libhdf5.dylib')
@@ -200,6 +202,11 @@ def autodetect_version(hdf5_dir=None):
         regexp = re.compile(r'^libhdf5.so')
         
     libdirs = ['/usr/local/lib', '/opt/local/lib']
+    try:
+        if pkgconfig.exists("hdf5"):
+            libdirs.append(pkgconfig.parse("hdf5")['library_dirs'])
+    except EnvironmentError:
+        pass
     if hdf5_dir is not None:
         libdirs.insert(0, op.join(hdf5_dir, 'lib'))
 
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..9af6228
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,12 @@
+[tox]
+envlist = py26,py27,py32,py33,py34
+[testenv]
+deps =
+    numpy>=1.0.1
+    Cython>=0.13
+commands =
+    python setup.py test
+[testenv:py26]
+deps =
+    unittest2
+    {[testenv]deps}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git



More information about the debian-science-commits mailing list