[libvigraimpex] 28/30: Imported Upstream version 1.10.0+git20160211.167be93

Daniel Stender danstender-guest at moszumanska.debian.org
Sun Feb 14 00:55:33 UTC 2016


This is an automated email from the git hooks/post-receive script.

danstender-guest pushed a commit to branch master
in repository libvigraimpex.

commit a79bc3f83167fe8d9468f355b2699f6ecbe78e73
Author: Daniel Stender <debian at danielstender.com>
Date:   Sun Feb 14 01:08:35 2016 +0100

    Imported Upstream version 1.10.0+git20160211.167be93
---
 CMakeLists.txt                                     |  86 +-
 README.md                                          |   3 +-
 appveyor.yml                                       |  22 +
 config/FindHDF5.cmake                              |  91 ++-
 config/FindVIGRANUMPY_DEPENDENCIES.cmake           | 174 ++--
 config/VigraDetectThreading.cmake                  |  22 +-
 config/VigraSetDefaults.cmake                      |  34 +-
 config/checkHDF5version.c                          |   8 +-
 config/vigra-config.in                             |  14 +-
 docsrc/makeFunctionIndex.py                        |   1 +
 docsrc/tutorial.dxx                                |  10 +-
 include/vigra/affine_registration_fft.hxx          |   6 +-
 include/vigra/config.hxx                           |  28 +-
 include/vigra/config_version.hxx                   |   4 +-
 include/vigra/hdf5impex.hxx                        | 881 +++++++++++----------
 include/vigra/multi_blocking.hxx                   |   2 +-
 include/vigra/multi_convolution.hxx                |  16 +-
 include/vigra/multi_histogram.hxx                  |   1 +
 include/vigra/numpy_array.hxx                      |  39 +-
 include/vigra/numpy_array_converters.hxx           | 663 +++++++++++++---
 include/vigra/numpy_array_taggedshape.hxx          | 289 +++----
 include/vigra/python_utility.hxx                   | 230 +++---
 include/vigra/threading.hxx                        | 102 ++-
 include/vigra/threadpool.hxx                       | 104 ++-
 src/impex/CMakeLists.txt                           |  10 +-
 src/impex/viff.cxx                                 |  34 +-
 test/blockwisealgorithms/CMakeLists.txt            |  20 +-
 test/checkUnitTests.py                             |   6 +-
 test/correlation/CMakeLists.txt                    |   4 +-
 test/multiarray/test_chunked.cxx                   | 391 ++++-----
 test/registration/CMakeLists.txt                   |   2 +-
 test/threadpool/CMakeLists.txt                     |   8 +-
 test/threadpool/test.cxx                           |  19 +-
 vigranumpy/docsrc/CMakeLists.txt                   |  18 +-
 vigranumpy/docsrc/conf.py.cmake2.in                |   6 +-
 vigranumpy/docsrc/conf.py.in                       |   5 +-
 vigranumpy/docsrc/index.rst                        | 257 +++---
 vigranumpy/examples/VigraGraphs.ipynb              | 619 ++++++++-------
 vigranumpy/examples/blocking.py                    |  18 +-
 vigranumpy/examples/gaussian_rank.py               |  17 -
 .../examples/graph_agglomerative_clustering.py     |   1 +
 vigranumpy/examples/graph_watersheds.py            |   4 +-
 vigranumpy/examples/grid_graph_shortestpath.py     |  18 +-
 vigranumpy/examples/merge_graph.py                 |  15 +-
 vigranumpy/examples/non_local_mean_2d_color.py     |  56 +-
 vigranumpy/examples/rag_features.py                |  19 -
 vigranumpy/examples/shock_filter.py                |  10 +-
 vigranumpy/lib/CMakeLists.txt                      |  17 +-
 vigranumpy/lib/__init__.py                         | 255 +++---
 vigranumpy/lib/arraytypes.py                       |  37 +-
 vigranumpy/lib/axistags.py                         |  75 +-
 vigranumpy/lib/pyqt/imagewindow.py                 |  26 +-
 vigranumpy/lib/pyqt/overlays.py                    |   3 +-
 vigranumpy/lib/pyqt/viewer2svg.py                  |   4 +-
 vigranumpy/lib/tagged_array.py                     | 101 +--
 vigranumpy/lib/ufunc.py                            | 223 +++---
 vigranumpy/src/CMakeLists.txt                      |   3 +
 vigranumpy/src/core/CMakeLists.txt                 |  56 +-
 vigranumpy/src/core/axistags.cxx                   |  27 +-
 vigranumpy/src/core/colors.cxx                     | 117 +--
 vigranumpy/src/core/converters.cxx                 |   5 +-
 vigranumpy/src/core/convolution.cxx                | 287 +++----
 vigranumpy/src/core/edgedetection.cxx              | 128 +--
 vigranumpy/src/core/geometry.cxx                   |   6 +-
 vigranumpy/src/core/impex.cxx                      |  64 +-
 vigranumpy/src/core/morphology.cxx                 | 726 +++++++----------
 vigranumpy/src/core/multi_array_chunked.cxx        | 184 ++++-
 vigranumpy/src/core/pythonaccumulator.hxx          |  13 +-
 vigranumpy/src/core/segmentation.cxx               | 375 +++------
 vigranumpy/src/core/tensors.cxx                    | 831 +++++++++++--------
 vigranumpy/src/core/vigranumpycore.cxx             |  19 +-
 vigranumpy/src/fourier/CMakeLists.txt              |   9 +-
 vigranumpy/test/CMakeLists.txt                     |   1 +
 vigranumpy/test/test1.py                           |  10 +-
 vigranumpy/test/test2.py                           |   7 +-
 vigranumpy/test/test3.py                           |   9 +-
 vigranumpy/test/test4.py                           |  12 +-
 vigranumpy/test/test_arraytypes.py                 |  53 +-
 vigranumpy/test/test_color.py                      |   7 +-
 vigranumpy/test/test_impex.py                      |   9 +-
 vigranumpy/test/test_multidef.py                   | 582 ++++++++++++++
 vigranumpy/test/test_rf.py                         |   6 +-
 vigranumpy/test/test_segmentation.py               |   7 +-
 83 files changed, 5117 insertions(+), 3564 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2a7dc9f..f1c6ad7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -25,7 +25,6 @@ SET(CMAKE_MODULE_PATH  ${CMAKE_MODULE_PATH}  ${vigra_SOURCE_DIR}/config)
 
 include(VigraSetDefaults)
 include(VigraCMakeUtils)
-
 INCLUDE_DIRECTORIES(${vigra_SOURCE_DIR}/include)
 
 IF(VIGRA_STATIC_LIB)
@@ -111,7 +110,23 @@ IF(WITH_HDF5)
     VIGRA_FIND_PACKAGE(HDF5)
 ENDIF()
 
+IF(WITH_LEMON)
+    VIGRA_FIND_PACKAGE(LEMON)
+ENDIF()
+
+SET(DOXYGEN_SKIP_DOT TRUE)
+FIND_PACKAGE(Doxygen)
+
+FIND_PACKAGE(PythonInterp ${PYTHON_VERSION})
+
+##################################################
+#
+#     configure boost dependency
+#
+##################################################
+
 set(WITH_BOOST "OFF")
+set(BOOST_MINIMUM_VERSION "1.40.0")
 set(WITH_BOOST_COMPONENTS "")
 
 IF(WITH_BOOST_GRAPH)
@@ -120,46 +135,28 @@ ENDIF()
 
 IF(WITH_VIGRANUMPY)
     set(WITH_BOOST "ON")
-    set(WITH_BOOST_COMPONENTS ${WITH_BOOST_COMPONENTS} python)
+    # Do not add "python" to WITH_BOOST_COMPONENTS. It is more
+    # reliabe to use 'FIND_LIBRARY' in FindVIGRANUMPY_DEPENDENCIES.
 ENDIF()
 
 IF(WITH_BOOST_THREAD)
     set(WITH_BOOST "ON")
     set(WITH_BOOST_COMPONENTS ${WITH_BOOST_COMPONENTS} thread system date_time chrono)
-    ADD_DEFINITIONS(-DUSE_BOOST_THREAD)
+    set(BOOST_MINIMUM_VERSION "1.55.0")
 ENDIF()
 
 IF(WITH_BOOST)
-    VIGRA_FIND_PACKAGE( Boost 1.40.0 COMPONENTS ${WITH_BOOST_COMPONENTS})
-
-    if(Boost_FOUND)
-        INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
-        if(WITH_BOOST_COMPONENTS)
-            # configure boost's autolink magic to use the right library name
-            # (default on Windows is a mangled name like 'boost_python-vc110-mt-1_51.lib')
-            if(("${Boost_PYTHON_LIBRARY}" MATCHES "boost_python\\.lib") OR
-               ("${Boost_SYSTEM_LIBRARY}" MATCHES "boost_system\\.lib"))
-                ADD_DEFINITIONS(-DBOOST_AUTO_LINK_NOMANGLE)
-            endif()
-        endif()
-    endif()
+    VIGRA_FIND_PACKAGE(Boost ${BOOST_MINIMUM_VERSION} COMPONENTS ${WITH_BOOST_COMPONENTS})
+    # FIXME: the following may be unstable between cmake versions
+    SET(Boost_LIB_SUFFIX "${_boost_COMPILER}${_boost_MULTITHREADED}-${Boost_LIB_VERSION}")
 ENDIF()
 
-IF(WITH_LEMON)
-    VIGRA_FIND_PACKAGE(LEMON)
-ENDIF()
-
-SET(DOXYGEN_SKIP_DOT TRUE)
-FIND_PACKAGE(Doxygen)
-FIND_PACKAGE(PythonInterp 2)
-
-IF(WITH_VIGRANUMPY)
-    FIND_PACKAGE( VIGRANUMPY_DEPENDENCIES )
-ENDIF()
-
-IF(WITH_VALGRIND)
-    FIND_PROGRAM(VALGRIND_EXECUTABLE valgrind)
-ENDIF()
+if(WITH_BOOST AND Boost_FOUND)
+    INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
+    IF(WITH_BOOST_THREAD)
+        ADD_DEFINITIONS(-DUSE_BOOST_THREAD)
+    ENDIF()
+endif()
 
 # Must be included AFTER boost is found.
 include(VigraDetectThreading)
@@ -169,6 +166,33 @@ include(VigraConfigureThreading)
 include(VigraDetectCppVersion)
 VIGRA_DETECT_CPP_VERSION()
 
+IF(WITH_VIGRANUMPY)
+    FIND_PACKAGE(VIGRANUMPY_DEPENDENCIES)
+ENDIF()
+
+if(WITH_BOOST AND Boost_FOUND)
+    # configure boost's autolink magic to use the right library name
+    # (default on Windows is a mangled name like 'boost_system-vc110-mt-1_51.lib')
+    if((Boost_SYSTEM_LIBRARY MATCHES "boost_system\\.lib") OR
+       (Boost_PYTHON_LIBRARY MATCHES "boost_python\\.lib") OR
+       (Boost_PYTHON_LIBRARY MATCHES "boost_python${PYTHON_VERSION_MAJOR}\\.lib"))
+        set(Boost_MANGLED_NAMES 0)
+        ADD_DEFINITIONS(-DBOOST_AUTO_LINK_NOMANGLE)
+    else()
+        set(Boost_MANGLED_NAMES 1)
+    endif()
+    if((Boost_SYSTEM_LIBRARY AND
+        NOT Boost_SYSTEM_LIBRARY MATCHES "libboost_system") OR
+       (Boost_PYTHON_LIBRARY AND
+        NOT Boost_PYTHON_LIBRARY MATCHES "libboost_python"))
+        ADD_DEFINITIONS(-DBOOST_ALL_DYN_LINK)
+    endif()
+endif()
+
+IF(WITH_VALGRIND)
+    FIND_PROGRAM(VALGRIND_EXECUTABLE valgrind)
+ENDIF()
+
 ########################################
 #  finalize compiler flags
 SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}"
diff --git a/README.md b/README.md
index 961d151..d0a844d 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,8 @@
 VIGRA Computer Vision Library
 =============================
 
-[![Build Status](https://travis-ci.org/ukoethe/vigra.png?branch=master)](https://travis-ci.org/ukoethe/vigra)
+[![Travis CI Status](https://travis-ci.org/ukoethe/vigra.svg?branch=master)](https://travis-ci.org/ukoethe/vigra)
+[![AppVeyor Status](https://ci.appveyor.com/api/projects/status/github/ukoethe/vigra?branch=master&svg=true)](https://ci.appveyor.com/project/ukoethe/vigra/branch/master)
 
                 Copyright 1998-2013 by Ullrich Koethe
 
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..7cf206d
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,22 @@
+platform:
+    - x64
+
+environment:
+    matrix:
+        - PY_VERSION: "2.7"
+          VC_VERSION: "11"
+        - PY_VERSION: "3.5"
+          VC_VERSION: "14"
+
+build_script:
+    - if "%PY_VERSION%" == "2.7" set PATH=C:\\Miniconda-x64\\Scripts;%PATH%
+    - if "%PY_VERSION%" == "3.5" set PATH=C:\\Miniconda3-x64\\Scripts;%PATH%
+    - if "%PY_VERSION%" == "2.7" conda create -q --yes -n python -c ukoethe --override-channels visual-studio=%VC_VERSION%.0 python=%PY_VERSION% jpeg libpng libtiff hdf5 fftw boost numpy nose
+    - if "%PY_VERSION%" == "3.5" conda create -q --yes -n python python=%PY_VERSION% jpeg libpng libtiff hdf5 numpy nose
+    - activate python
+    - if "%PY_VERSION%" == "3.5" conda install -q --yes -c ukoethe --override-channels visual-studio=%VC_VERSION%.0 fftw boost
+    - cd %APPVEYOR_BUILD_FOLDER%
+    - mkdir build
+    - cd build
+    - cmake .. -G "Visual Studio %VC_VERSION% Win64" -DTEST_VIGRANUMPY=1 -DDEPENDENCY_SEARCH_PREFIX="%CONDA_ACTIVE_ENV%/Library"
+    - cmake --build . --target check --config Release
diff --git a/config/FindHDF5.cmake b/config/FindHDF5.cmake
index 3244212..a842c7c 100644
--- a/config/FindHDF5.cmake
+++ b/config/FindHDF5.cmake
@@ -1,48 +1,56 @@
 # - Find HDF5, a library for reading and writing self describing array data.
 #
-FIND_PATH(HDF5_INCLUDE_DIR hdf5.h)
+
+FIND_PATH(HDF5_INCLUDE_DIR hdf5.h PATH_SUFFIXES hdf5/serial)
 
 if(HDF5_INCLUDE_DIR)
     SET(HDF5_TRY_COMPILE_INCLUDE_DIR "-DINCLUDE_DIRECTORIES:STRING=${HDF5_INCLUDE_DIR}")
 
-    FIND_LIBRARY(HDF5_CORE_LIBRARY NAMES hdf5dll hdf5  )
-    FIND_LIBRARY(HDF5_HL_LIBRARY NAMES hdf5_hldll hdf5_hl  )
-
-    # FIXME: as of version 1.8.9 and 1.8.10-patch1 (but NOT 1.8.10), these flags are
-    #        already set correctly => remove or set conditionally according to version
-    IF(WIN32 AND HDF5_CORE_LIBRARY MATCHES "dll.lib$")
-        SET(HDF5_CFLAGS "-D_HDF5USEDLL_")
-        SET(HDF5_CPPFLAGS "-D_HDF5USEDLL_ -DHDF5CPP_USEDLL")
-    ELSE()
-        SET(HDF5_CFLAGS)
-        SET(HDF5_CPPFLAGS)
-    ENDIF()
-
-    SET(HDF5_VERSION_MAJOR 1)
-    SET(HDF5_VERSION_MINOR 8)
-
     set(HDF5_SUFFICIENT_VERSION FALSE)
-    TRY_COMPILE(HDF5_SUFFICIENT_VERSION 
+    TRY_COMPILE(HDF5_SUFFICIENT_VERSION
                 ${CMAKE_BINARY_DIR} ${PROJECT_SOURCE_DIR}/config/checkHDF5version.c
-                COMPILE_DEFINITIONS "-DMIN_MAJOR=${HDF5_VERSION_MAJOR} -DMIN_MINOR=${HDF5_VERSION_MINOR}"
-                CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}") 
-            
-    if(HDF5_SUFFICIENT_VERSION)
-        MESSAGE(STATUS 
-               "Checking HDF5 version (at least ${HDF5_VERSION_MAJOR}.${HDF5_VERSION_MINOR}): ok")
+                CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}")
+
+    if(NOT HDF5_SUFFICIENT_VERSION)
+        MESSAGE(STATUS "   HDF5: unable to compile a simple test program.\n      (include path: '${HDF5_INCLUDE_DIR}')" )
     else()
-        MESSAGE( STATUS "HDF5: need at least version ${HDF5_VERSION_MAJOR}.${HDF5_VERSION_MINOR}" )
+        SET(HDF5_VERSION_MAJOR 1)
+        SET(HDF5_VERSION_MINOR 8)
+        set(HDF5_SUFFICIENT_VERSION FALSE)
+        TRY_COMPILE(HDF5_SUFFICIENT_VERSION
+                    ${CMAKE_BINARY_DIR} ${PROJECT_SOURCE_DIR}/config/checkHDF5version.c
+                    COMPILE_DEFINITIONS "-DCHECK_VERSION=1 -DMIN_MAJOR=${HDF5_VERSION_MAJOR} -DMIN_MINOR=${HDF5_VERSION_MINOR}"
+                    CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}")
+
+        if(NOT HDF5_SUFFICIENT_VERSION)
+            MESSAGE(STATUS "   HDF5: need at least version ${HDF5_VERSION_MAJOR}.${HDF5_VERSION_MINOR}" )
+        else()
+            MESSAGE(STATUS
+                   "   Checking HDF5 version (at least ${HDF5_VERSION_MAJOR}.${HDF5_VERSION_MINOR}): ok")
+        endif()
     endif()
-    
+
+    # Only configure HDF5 if a suitable version of the library was found
     if(HDF5_SUFFICIENT_VERSION)
-        # Only test for HDF5 features if a suitable version of the library was 
-        # found previously.
+
+        FIND_LIBRARY(HDF5_CORE_LIBRARY NAMES hdf5dll hdf5 PATH_SUFFIXES hdf5/serial )
+        FIND_LIBRARY(HDF5_HL_LIBRARY NAMES hdf5_hldll hdf5_hl PATH_SUFFIXES hdf5/serial )
+
+        # FIXME: as of version 1.8.9 and 1.8.10-patch1 (but NOT 1.8.10), these flags are
+        #        already set correctly => remove or set conditionally according to version
+        IF(WIN32 AND HDF5_CORE_LIBRARY MATCHES "dll.lib$")
+            SET(HDF5_CFLAGS "-D_HDF5USEDLL_")
+            SET(HDF5_CPPFLAGS "-D_HDF5USEDLL_ -DHDF5CPP_USEDLL")
+        ELSE()
+            SET(HDF5_CFLAGS)
+            SET(HDF5_CPPFLAGS)
+        ENDIF()
 
         set(HDF5_USES_ZLIB FALSE)
         TRY_COMPILE(HDF5_USES_ZLIB
                    ${CMAKE_BINARY_DIR} ${PROJECT_SOURCE_DIR}/config/checkHDF5usesCompression.c
                    COMPILE_DEFINITIONS "-DH5_SOMETHING=H5_HAVE_FILTER_DEFLATE"
-                   CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}") 
+                   CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}")
 
         if(HDF5_USES_ZLIB)
             FIND_LIBRARY(HDF5_Z_LIBRARY NAMES zlib1 zlib z )
@@ -53,11 +61,11 @@ if(HDF5_INCLUDE_DIR)
         endif()
 
         set(HDF5_USES_SZLIB FALSE)
-        TRY_COMPILE(HDF5_USES_SZLIB 
+        TRY_COMPILE(HDF5_USES_SZLIB
                     ${CMAKE_BINARY_DIR} ${PROJECT_SOURCE_DIR}/config/checkHDF5usesCompression.c
                     COMPILE_DEFINITIONS "-DH5_SOMETHING=H5_HAVE_FILTER_SZIP"
-                    CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}") 
-                
+                    CMAKE_FLAGS "${HDF5_TRY_COMPILE_INCLUDE_DIR}")
+
         if(HDF5_USES_SZLIB)
             FIND_LIBRARY(HDF5_SZ_LIBRARY NAMES szlibdll sz szip)
             set(HDF5_SZLIB_OK ${HDF5_SZ_LIBRARY})
@@ -65,17 +73,24 @@ if(HDF5_INCLUDE_DIR)
             set(HDF5_SZLIB_OK TRUE)
             set(HDF5_SZ_LIBRARY "")
         endif()
-    
     endif()
 endif()
 
-# handle the QUIETLY and REQUIRED arguments and set HDF5_FOUND to TRUE if 
+# handle the QUIETLY and REQUIRED arguments and set HDF5_FOUND to TRUE if
 # all listed variables are TRUE
 INCLUDE(FindPackageHandleStandardArgs)
 
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG HDF5_CORE_LIBRARY 
-        HDF5_HL_LIBRARY HDF5_ZLIB_OK HDF5_SZLIB_OK HDF5_INCLUDE_DIR HDF5_SUFFICIENT_VERSION)
-        
+if(NOT HDF5_INCLUDE_DIR)
+    FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG HDF5_INCLUDE_DIR)
+elseif(NOT HDF5_SUFFICIENT_VERSION)
+    # undo unsuccessful configuration
+    set(HDF5_INCLUDE_DIR "")
+    FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG HDF5_SUFFICIENT_VERSION)
+else()
+    FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG HDF5_CORE_LIBRARY
+        HDF5_HL_LIBRARY HDF5_ZLIB_OK HDF5_SZLIB_OK HDF5_INCLUDE_DIR)
+endif()
+
 IF(HDF5_FOUND)
     SET(HDF5_LIBRARIES ${HDF5_CORE_LIBRARY} ${HDF5_HL_LIBRARY} ${HDF5_Z_LIBRARY} ${HDF5_SZ_LIBRARY})
 ELSE()
@@ -84,4 +99,4 @@ ELSE()
     SET(HDF5_Z_LIBRARY    HDF5_Z_LIBRARY-NOTFOUND)
     SET(HDF5_SZ_LIBRARY   HDF5_SZ_LIBRARY-NOTFOUND)
 ENDIF(HDF5_FOUND)
-    
+
diff --git a/config/FindVIGRANUMPY_DEPENDENCIES.cmake b/config/FindVIGRANUMPY_DEPENDENCIES.cmake
index cd03011..cfaafd0 100644
--- a/config/FindVIGRANUMPY_DEPENDENCIES.cmake
+++ b/config/FindVIGRANUMPY_DEPENDENCIES.cmake
@@ -2,65 +2,133 @@
 #
 MESSAGE(STATUS "Checking VIGRANUMPY_DEPENDENCIES")
 
-FIND_PACKAGE(PythonInterp 2)
-
-IF(PYTHONINTERP_FOUND)
-    # check that Python version 2.x is used
-    execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                         "import sys; print(sys.version[0])"
-                          OUTPUT_VARIABLE PYTHON_MAJOR_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
-    IF(${PYTHON_MAJOR_VERSION} EQUAL 2)
-        SET(PYTHONINTERP_V2_FOUND 1)
-    ELSE()
-        MESSAGE(STATUS "vigranumpy currently requires Python 2.x.")
-        MESSAGE(STATUS "Make sure that Python 2 is in your PATH or use 'cmake_gui' to set the PYTHON_EXECUTABLE variable manually.")
-        SET(PYTHONINTERP_V2_FOUND 0)
-    ENDIF()
-ELSE()
-    SET(PYTHONINTERP_V2_FOUND 0)
+IF(NOT PYTHONINTERP_FOUND)
+    FIND_PACKAGE(PythonInterp ${PYTHON_VERSION})
 ENDIF()
 
-IF(PYTHONINTERP_V2_FOUND)
+IF(PYTHONINTERP_FOUND)
 
-#    this command cannot be used because its results are often inconsistent
-#    with the Python interpreter found previously (e.g. libraries or includes
-#    from incompatible installations)
-#    FIND_PACKAGE(PythonLibs)
+    # Note:
+    #  'FIND_PACKAGE(PythonLibs)' is unreliable because results are often inconsistent
+    #  with the Python interpreter found previously (e.g. libraries or includes
+    #  from incompatible installations). Thus, we ask Python itself for the information.
+    #
 
-    # find Python library
+    ######################################################################
+    #
+    #      find Python prefix
+    #
+    ######################################################################
     execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                     "import sys; print sys.exec_prefix"
+                     "import sys; print(sys.exec_prefix)"
                       OUTPUT_VARIABLE PYTHON_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE)
+    execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
+                     "import sys; print(sys.version.split()[0])"
+                      OUTPUT_VARIABLE PYTHON_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
+    execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
+                     "import sys; print(sys.version_info[0])"
+                      OUTPUT_VARIABLE PYTHON_VERSION_MAJOR OUTPUT_STRIP_TRAILING_WHITESPACE)
+    execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
+                     "import sys; print(sys.version_info[1])"
+                      OUTPUT_VARIABLE PYTHON_VERSION_MINOR OUTPUT_STRIP_TRAILING_WHITESPACE)
 
-    IF(APPLE AND ${PYTHON_PREFIX} MATCHES ".*framework.*")
-        SET(PYTHON_LIBRARIES "${PYTHON_PREFIX}/Python"
-            CACHE FILEPATH "Python libraries"
-            FORCE)
-    ELSE()
-        execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                         "import sys; skip = 2 if sys.platform.startswith('win') else 1; print 'python' + sys.version[0:3:skip]"
-                          OUTPUT_VARIABLE PYTHON_LIBRARY_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
-        FIND_LIBRARY(PYTHON_LIBRARIES ${PYTHON_LIBRARY_NAME} HINTS "${PYTHON_PREFIX}" 
-                     PATH_SUFFIXES lib lib64 libs DOC "Python libraries")
-    ENDIF()
+    MESSAGE(STATUS "Using Python ${PYTHON_VERSION} at ${PYTHON_EXECUTABLE}")
 
-    # find Python includes
+    ######################################################################
+    #
+    #      find Python includes
+    #
+    ######################################################################
     execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                    "from distutils.sysconfig import *; print get_python_inc()"
+                    "from distutils.sysconfig import *; print(get_python_inc())"
                     OUTPUT_VARIABLE PYTHON_INCLUDE OUTPUT_STRIP_TRAILING_WHITESPACE)
     SET(PYTHON_INCLUDE_PATH ${PYTHON_INCLUDE}
         CACHE PATH "Path to Python include files"
         FORCE)
 
-    IF(PYTHON_LIBRARIES AND PYTHON_INCLUDE_PATH)
-        MESSAGE(STATUS "Found Python libraries: ${PYTHON_LIBRARIES}")
+    IF(PYTHON_INCLUDE_PATH)
         MESSAGE(STATUS "Found Python includes:  ${PYTHON_INCLUDE_PATH}")
-        SET(PYTHONLIBS_FOUND TRUE)
     ELSE()
-        MESSAGE(STATUS "Could NOT find Python libraries and/or includes")
+        MESSAGE(STATUS "Could NOT find Python includes")
+    ENDIF()
+
+    ######################################################################
+    #
+    #      find Python library
+    #
+    ######################################################################
+    IF(APPLE AND ${PYTHON_PREFIX} MATCHES ".*framework.*")
+        SET(PYTHON_LIBRARIES "${PYTHON_PREFIX}/Python"
+            CACHE FILEPATH "Python libraries"
+            FORCE)
+    ELSE()
+        IF(WIN32)
+            set(PYTHON_LIBRARY_NAME python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR})
+        ELSE()
+            execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
+                             "from distutils.sysconfig import *; print(get_config_var('LDLIBRARY'))"
+                              OUTPUT_VARIABLE PYTHON_LIBRARY_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
+            execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
+                             "from distutils.sysconfig import *; print(get_config_var('LIBDIR'))"
+        			           OUTPUT_VARIABLE PYTHON_LIBRARY_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE)
+        ENDIF()
+        FIND_LIBRARY(PYTHON_LIBRARIES ${PYTHON_LIBRARY_NAME} HINTS "${PYTHON_LIBRARY_PREFIX}" "${PYTHON_PREFIX}"
+                     PATH_SUFFIXES lib lib64 libs DOC "Python libraries")
+        unset(PYTHON_LIBRARY_PREFIX)
+    ENDIF()
+
+    IF(PYTHON_LIBRARIES)
+        MESSAGE(STATUS "Found Python library: ${PYTHON_LIBRARIES}")
+    ELSE()
+        MESSAGE(STATUS "Could NOT find Python library")
+    ENDIF()
+
+    ######################################################################
+    #
+    #      find boost::python library
+    #
+    ######################################################################
+    # 'FIND_PACKAGE(Boost COMPONENTS python)' is unreliable because it often selects
+    # boost_python for the wrong Python version
+    IF(Boost_FOUND)
+        IF(Boost_USE_MULTITHREADED)
+            # define names for thread-safe library variants
+            SET(BOOST_PYTHON_NAMES
+                    boost_python-py${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}-mt
+                    boost_python-${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}-mt
+                    boost_python${PYTHON_VERSION_MAJOR}-mt
+                    boost_python-mt)
+        ENDIF()
+        IF(Boost_LIB_SUFFIX)
+            SET(BOOST_PYTHON_NAMES ${BOOST_PYTHON_NAMES}
+                # Windows with mangled library names
+                boost_python${PYTHON_VERSION_MAJOR}${Boost_LIB_SUFFIX}
+                boost_python${Boost_LIB_SUFFIX})
+        ENDIF()
+
+        # define names for boost_python library variants
+        # (may or may not be thread-safe)
+        SET(BOOST_PYTHON_NAMES ${BOOST_PYTHON_NAMES}
+                # Linux with multiple Python versions
+                boost_python-py${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}
+                # Gentoo
+                boost_python-${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}
+                # Mac with Python 3
+                boost_python${PYTHON_VERSION_MAJOR}
+                # default
+                boost_python)
+
+        FIND_LIBRARY(Boost_PYTHON_LIBRARY
+                     NAMES ${BOOST_PYTHON_NAMES}
+                     HINTS "${Boost_LIBRARY_DIR}"
+                     DOC "boost_python libraries")
     ENDIF()
 
-    VIGRA_FIND_PACKAGE( Boost 1.40.0 COMPONENTS python )
+    if(Boost_PYTHON_LIBRARY)
+        MESSAGE(STATUS "Found boost_python library: ${Boost_PYTHON_LIBRARY}")
+    else()
+        MESSAGE(STATUS "Could NOT find boost_python library")
+    endif()
 
     ######################################################################
     #
@@ -70,7 +138,7 @@ IF(PYTHONINTERP_V2_FOUND)
     ######################################################################
     IF(NOT DEFINED VIGRANUMPY_INSTALL_DIR OR VIGRANUMPY_INSTALL_DIR MATCHES "^$")
         execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                         "from distutils.sysconfig import *; print get_python_lib(1)"
+                         "from distutils.sysconfig import *; print(get_python_lib(1))"
                           OUTPUT_VARIABLE PYTHON_SITE_PACKAGES OUTPUT_STRIP_TRAILING_WHITESPACE)
         FILE(TO_CMAKE_PATH ${PYTHON_SITE_PACKAGES} VIGRANUMPY_INSTALL_DIR)
     ENDIF()
@@ -143,7 +211,7 @@ IF(PYTHONINTERP_V2_FOUND)
     #
     ######################################################################
     execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-                     "import sys; p = sys.platform; print 'windows' if p.startswith('win') else p"
+                     "import sys; p = sys.platform; print('windows') if p.startswith('win') else p"
                       OUTPUT_VARIABLE PYTHON_PLATFORM OUTPUT_STRIP_TRAILING_WHITESPACE)
 
     ######################################################################
@@ -153,8 +221,8 @@ IF(PYTHONINTERP_V2_FOUND)
     ######################################################################
     INCLUDE(FindPackageHandleStandardArgs)
     FIND_PACKAGE_HANDLE_STANDARD_ARGS(VIGRANUMPY_DEPENDENCIES DEFAULT_MSG
-                         PYTHONINTERP_V2_FOUND PYTHONLIBS_FOUND
-                         Boost_PYTHON_FOUND PYTHON_NUMPY_INCLUDE_DIR VIGRANUMPY_INSTALL_DIR)
+                         PYTHONINTERP_FOUND PYTHON_INCLUDE_PATH PYTHON_LIBRARIES
+                         Boost_PYTHON_LIBRARY PYTHON_NUMPY_INCLUDE_DIR VIGRANUMPY_INSTALL_DIR)
 
     IF(NOT VIGRANUMPY_INCLUDE_DIRS OR VIGRANUMPY_INCLUDE_DIRS MATCHES "-NOTFOUND")
         #note that the numpy include dir is set _before_ the python include dir, such that
@@ -165,10 +233,14 @@ IF(PYTHONINTERP_V2_FOUND)
     SET(VIGRANUMPY_INCLUDE_DIRS ${VIGRANUMPY_INCLUDE_DIRS}
         CACHE PATH "include directories needed by VIGRA Python bindings"
         FORCE)
-    IF(NOT VIGRANUMPY_LIBRARIES OR VIGRANUMPY_LIBRARIES MATCHES "-NOTFOUND")
-        SET(VIGRANUMPY_LIBRARIES ${PYTHON_LIBRARIES} ${Boost_PYTHON_LIBRARY})
-    ENDIF()
-    SET(VIGRANUMPY_LIBRARIES ${VIGRANUMPY_LIBRARIES}
-        CACHE FILEPATH "libraries needed by VIGRA Python bindings"
-        FORCE)
+    SET(VIGRANUMPY_LIBRARIES ${PYTHON_LIBRARIES} ${Boost_PYTHON_LIBRARY})
+
+    if(TEST_VIGRANUMPY AND NOT VIGRANUMPY_DEPENDENCIES_FOUND)
+        MESSAGE(FATAL_ERROR "  vigranumpy dependencies NOT found while TEST_VIGRANUMPY=1")
+    endif()
+    if(TEST_VIGRANUMPY AND PYTHON_NOSETESTS_NOT_FOUND)
+        MESSAGE(FATAL_ERROR "  nosetests NOT found while TEST_VIGRANUMPY=1")
+    endif()
+ELSE()
+    MESSAGE(STATUS "Python not found. Make sure that Python is in your PATH or use 'cmake-gui' to set the PYTHON_EXECUTABLE variable manually.")
 ENDIF()
diff --git a/config/VigraDetectThreading.cmake b/config/VigraDetectThreading.cmake
index 87a7ff3..d1e3b50 100644
--- a/config/VigraDetectThreading.cmake
+++ b/config/VigraDetectThreading.cmake
@@ -23,16 +23,22 @@ else()
     set(ORIG_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
 
     if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.0")
-        SET(CXX_THREADING_FLAGS "-pthread -std=c++0x")
-    elseif(CMAKE_COMPILER_IS_GNUCXX)
-        SET(CXX_THREADING_FLAGS "-pthread -std=c++11")
+        SET(CXX_THREADING_FLAG  "-pthread")
+        SET(CXX_VERSION_FLAG    "-std=c++0x")
+    elseif(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX)
+        SET(CXX_THREADING_FLAG "-pthread")
+        SET(CXX_VERSION_FLAG    "-std=c++11")
     elseif(NOT MSVC)
-        SET(CXX_THREADING_FLAGS "-std=c++11")
+        SET(CXX_THREADING_FLAG "")
+        SET(CXX_VERSION_FLAG "-std=c++11")
     endif()
 
     # add the threading flags if they are not already there
-    if(CXX_THREADING_FLAGS AND NOT ("${CMAKE_CXX_FLAGS}" MATCHES "pthread"))
-         SET(CMAKE_CXX_FLAGS "${CXX_THREADING_FLAGS} ${CMAKE_CXX_FLAGS}")
+    if(CXX_THREADING_FLAG AND NOT (CMAKE_CXX_FLAGS MATCHES "-pthread"))
+        SET(CMAKE_CXX_FLAGS "${CXX_THREADING_FLAG} ${CMAKE_CXX_FLAGS}")
+    endif()
+    if(CXX_VERSION_FLAG AND NOT (CMAKE_CXX_FLAGS MATCHES "-std=c\\+\\+"))
+        SET(CMAKE_CXX_FLAGS "${CXX_VERSION_FLAG} ${CMAKE_CXX_FLAGS}")
     endif()
     TRY_COMPILE(STD_THREADING_FOUND
         ${CMAKE_BINARY_DIR} ${PROJECT_SOURCE_DIR}/config/check_std_thread.cxx
@@ -40,8 +46,8 @@ else()
 
     if(STD_THREADING_FOUND)
         MESSAGE(STATUS "Checking for threading support:   std::thread")
-        if(CXX_THREADING_FLAGS)
-            MESSAGE(STATUS "    (added compiler flags: ${CXX_THREADING_FLAGS})")
+        if(CXX_THREADING_FLAG OR CXX_VERSION_FLAG)
+            MESSAGE(STATUS "    (added compiler flags: ${CXX_THREADING_FLAG} ${CXX_VERSION_FLAG})")
         endif()
         set_property(GLOBAL PROPERTY THREADING_IMPLEMENTATION "std")
     else()
diff --git a/config/VigraSetDefaults.cmake b/config/VigraSetDefaults.cmake
index 3457126..7b788f9 100644
--- a/config/VigraSetDefaults.cmake
+++ b/config/VigraSetDefaults.cmake
@@ -34,62 +34,68 @@ ENDIF()
 SET(WITH_HDF5 ${WITH_HDF5}
     CACHE BOOL "Build HDF5 import/export ?"
     FORCE)
-    
+
 OPTION(WITH_OPENEXR "Support for the OpenEXR graphics format" OFF)
 OPTION(WITH_LEMON "Support for the Lemon Graph library " OFF)
 OPTION(WITH_BOOST_GRAPH "Support for the BOOST Graph library " OFF)
 
 OPTION(WITH_BOOST_THREAD "Use boost::thread instead of std::thread" OFF)
 
-IF(NOT DEFINED WITH_VIGRANUMPY)
+OPTION(TEST_VIGRANUMPY "Consider lack of vigranumpy or failed vigranumpy test an error?" OFF)
+
+IF(TEST_VIGRANUMPY OR NOT DEFINED WITH_VIGRANUMPY)
     SET(WITH_VIGRANUMPY "ON")
 ENDIF()
 SET(WITH_VIGRANUMPY ${WITH_VIGRANUMPY}
     CACHE BOOL "Build VIGRA Python bindings ?"
     FORCE)
-    
+
 IF(NOT DEFINED WITH_VALGRIND)
     SET(WITH_VALGRIND "OFF")
 ENDIF()
 SET(WITH_VALGRIND ${WITH_VALGRIND}
     CACHE BOOL "Perform valgrind memory testing upon 'make ctest' ?"
     FORCE)
-    
+
 IF(NOT DEFINED LIBDIR_SUFFIX)
     SET(LIBDIR_SUFFIX "")
 ENDIF()
 SET(LIBDIR_SUFFIX ${LIBDIR_SUFFIX}
-    CACHE STRING "Define suffix of lib directory name (empty string or 32 or 64)." 
+    CACHE STRING "Define suffix of lib directory name (empty string or 32 or 64)."
     FORCE)
-    
+
 IF(NOT DEFINED DEPENDENCY_SEARCH_PREFIX)
     SET(DEPENDENCY_SEARCH_PREFIX "")
-ENDIF()    
+ENDIF()
 SET(DEPENDENCY_SEARCH_PREFIX ${DEPENDENCY_SEARCH_PREFIX}
     CACHE PATH "Additional search prefixes (used by Find... macros)."
     FORCE)
 
 IF(NOT DEFINED AUTOEXEC_TESTS)
     SET(AUTOEXEC_TESTS "ON")
-ENDIF()    
+ENDIF()
 SET(AUTOEXEC_TESTS ${AUTOEXEC_TESTS}
     CACHE BOOL "Automatically execute each test after compilation ?"
     FORCE)
 
 IF(NOT DEFINED AUTOBUILD_TESTS)
     SET(AUTOBUILD_TESTS "OFF")
-ENDIF()    
+ENDIF()
 SET(AUTOBUILD_TESTS ${AUTOBUILD_TESTS}
     CACHE BOOL "Compile tests as part of target 'all' (resp. 'ALL_BUILD') ?"
     FORCE)
 
 IF(NOT DEFINED VIGRA_STATIC_LIB)
     SET(VIGRA_STATIC_LIB "OFF")
-ENDIF()    
+ENDIF()
 SET(VIGRA_STATIC_LIB ${VIGRA_STATIC_LIB}
     CACHE BOOL "Whether to build vigra as a static library ?"
     FORCE)
 
+if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+    SET(CMAKE_COMPILER_IS_CLANGXX 1)
+endif()
+
 # This is only executed once on the first cmake run.
 IF(NOT VIGRA_DEFAULTS_INIT)
     SET(VIGRA_DEFAULTS_INIT TRUE CACHE INTERNAL "initial flags set")
@@ -99,16 +105,16 @@ IF(NOT VIGRA_DEFAULTS_INIT)
             CACHE STRING "Choose the type of build, options are None Release Debug RelWithDebInfo MinSizeRel."
             FORCE)
     ENDIF ()
-    
+
     IF(NOT DEFINED VALGRIND_SUPPRESSION_FILE)
         SET(VALGRIND_SUPPRESSION_FILE ""
             CACHE FILEPATH "File containing valgrind error suppression rules."
             FORCE)
     ENDIF()
-    
+
     # initial compiler flags can be set here, this is only
     # executed once in the first configure run.
-    IF(CMAKE_COMPILER_IS_GNUCXX)
+    IF(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX)
         IF(NOT CMAKE_CXX_FLAGS)
             if(NOT MINGW AND NOT MACOSX)
                 SET(CMAKE_CXX_FLAGS "-W -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Wno-unused-variable -Wno-type-limits")
@@ -119,7 +125,7 @@ IF(NOT VIGRA_DEFAULTS_INIT)
         IF(NOT CMAKE_C_FLAGS)
             SET(CMAKE_C_FLAGS "-W -Wall -Wextra -pedantic -std=c99 -Wno-sign-compare")
         ENDIF()
-    ENDIF(CMAKE_COMPILER_IS_GNUCXX)
+    ENDIF()
 
     SET(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} CACHE STRING
         "Flags used by the compiler during all build types."
diff --git a/config/checkHDF5version.c b/config/checkHDF5version.c
index 7906980..a9a7b3b 100644
--- a/config/checkHDF5version.c
+++ b/config/checkHDF5version.c
@@ -1,8 +1,10 @@
 #include <hdf5.h>
 
-#if (H5_VERS_MAJOR < MIN_MAJOR) || \
-   ((H5_VERS_MAJOR == MIN_MAJOR) && (H5_VERS_MINOR < MIN_MINOR))
-#error "insufficient HDF5 version"
+#if defined(CHECK_VERSION)
+# if (H5_VERS_MAJOR < MIN_MAJOR) || \
+     ((H5_VERS_MAJOR == MIN_MAJOR) && (H5_VERS_MINOR < MIN_MINOR))
+#  error "insufficient HDF5 version"
+# endif
 #endif
 
 int main()
diff --git a/config/vigra-config.in b/config/vigra-config.in
index b403f0d..9ff75c2 100644
--- a/config/vigra-config.in
+++ b/config/vigra-config.in
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import division, print_function
+
 from optparse import OptionParser
 import sys
 
@@ -58,10 +60,10 @@ def filename2ldflags(libFilename):
     return result
 
 if op.version:
-    print "@vigra_version@"
+    print("@vigra_version@")
 
 if op.cppflags: # was: --cppflags|--cxxincludes|--cxxflags|--cincludes|--cflags
-    print '-I at CMAKE_INSTALL_PREFIX@/include'
+    print('-I at CMAKE_INSTALL_PREFIX@/include')
 
 if op.impex_lib: # was: --impex_lib|--impex-lib|--libs
     ldflags = []
@@ -79,16 +81,16 @@ if op.impex_lib: # was: --impex_lib|--impex-lib|--libs
             if fl not in ldflags:
                 ldflags.append(fl)
 
-    print " ".join(ldflags)
+    print(" ".join(ldflags))
 
 if op.fftw_lib:
     if not hasFFTW:
         sys.stderr.write("VIGRA was configured without FFTW switches, libpath unknown!\n")
         sys.exit(1)
-    print " ".join(filename2ldflags('@FFTW3_LIBRARY@'))
+    print(" ".join(filename2ldflags('@FFTW3_LIBRARY@')))
 
 if op.include_path: # was: --include_path|--include-path|--includepath
-    print '@CMAKE_INSTALL_PREFIX@/include'
+    print('@CMAKE_INSTALL_PREFIX@/include')
 
 if op.docdir:
-    print '@DOCDIR@'
+    print('@DOCDIR@')
diff --git a/docsrc/makeFunctionIndex.py b/docsrc/makeFunctionIndex.py
index 3b1e784..787c548 100644
--- a/docsrc/makeFunctionIndex.py
+++ b/docsrc/makeFunctionIndex.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import division, print_function
 
 import re
 import glob
diff --git a/docsrc/tutorial.dxx b/docsrc/tutorial.dxx
index fe43cf9..922df1e 100644
--- a/docsrc/tutorial.dxx
+++ b/docsrc/tutorial.dxx
@@ -126,7 +126,7 @@
     MultiArray<2, RGBValue<UInt8> > rgb_image(Shape2(256, 128));
     \endcode
 
-    vigra::RGBValue<ValueType> is a specialised 3-dimensional vector containing ValueType elements. Arbitrary short vectors can be stored in the <tt>TinyVector<ValueType, SIZE></tt> class, which is the base class of RGBValue. It's length must be specified at compile time in the template parameter <tt>SIZE</tt>. Vectors whose length is known at compile time are very useful for the compiler to produce highly optimized code. Therefore, <tt>Shape2</tt> and it's higher-dimensional coursins ar [...]
+    vigra::RGBValue<ValueType> is a specialised 3-dimensional vector containing ValueType elements. Arbitrary short vectors can be stored in the <tt>TinyVector<ValueType, SIZE></tt> class, which is the base class of RGBValue. It's length must be specified at compile time in the template parameter <tt>SIZE</tt>. Vectors whose length is known at compile time are very useful for the compiler to produce highly optimized code. Therefore, <tt>Shape2</tt> and it's higher-dimensional cousins are [...]
 
     Alternatively you can use a 3-dimensional array <tt>vigra::MultiArray<3, unsigned
     char></tt> to represent a color image. The third dimension has size 3 and contains the
@@ -951,22 +951,22 @@
     
     \code
     >>> a = vigra.ScalarImage((30, 20))
-    >>> print "%s \n %r" % (a.shape, a.axistags)
+    >>> print("%s \n %r" % (a.shape, a.axistags))
     (30L, 20L)
      x y
 
     >>> a = vigra.RGBImage((30, 20))
-    >>> print "%s \n %r" % (a.shape, a.axistags)
+    >>> print("%s \n %r" % (a.shape, a.axistags))
     (30L, 20L, 3L)
      x y c
 
     >>> a = vigra.ScalarVolume((30, 20, 10))
-    >>> print "%s \n %r" % (a.shape, a.axistags)
+    >>> print("%s \n %r" % (a.shape, a.axistags))
     (30L, 20L, 10L)
      x y z
 
     >>> a = vigra.RGBVolume((30, 20, 10))
-    >>> print "%s \n %r" % (a.shape, a.axistags)
+    >>> print("%s \n %r" % (a.shape, a.axistags))
     (30L, 20L, 10L, 3L)
      x y z c
     \endcode
diff --git a/include/vigra/affine_registration_fft.hxx b/include/vigra/affine_registration_fft.hxx
index 64f16d6..411e8bb 100644
--- a/include/vigra/affine_registration_fft.hxx
+++ b/include/vigra/affine_registration_fft.hxx
@@ -508,7 +508,6 @@ void estimateGlobalTranslation(SrcIterator    s_ul, SrcIterator  s_lr, SrcAccess
                                Diff2D border = Diff2D(0,0))
 {
     typename SrcIterator::difference_type s_shape = s_lr - s_ul;
-    typename DestIterator::difference_type d_shape = d_lr - d_ul;
 
     //determine matrix by using 5 quater-matches and a maximum likelihood decision:
     Diff2D q_shape = (s_shape - border - border)/2;
@@ -684,7 +683,6 @@ estimateGlobalRotationTranslation(SrcIterator s_ul, SrcIterator s_lr, SrcAccesso
                                   double & translation_correlation,
                                   Diff2D border = Diff2D(0,0))
 {
-    typename SrcIterator::difference_type s_shape = s_lr - s_ul;
     typename DestIterator::difference_type d_shape = d_lr - d_ul;
 
     //First step: Estimate rotation from img2 -> img1.
@@ -702,8 +700,8 @@ estimateGlobalRotationTranslation(SrcIterator s_ul, SrcIterator s_lr, SrcAccesso
     //Third step: find rotation between temp image (of step 2) and dest:
     Matrix<double> translation_matrix;
     estimateGlobalTranslation(srcImageRange(tmp),
-                                 srcIterRange(d_ul, d_lr, d_acc),
-                                 translation_matrix,
+                              srcIterRange(d_ul, d_lr, d_acc),
+                              translation_matrix,
                               translation_correlation,
                               border);
 
diff --git a/include/vigra/config.hxx b/include/vigra/config.hxx
index 4362f29..8854abd 100644
--- a/include/vigra/config.hxx
+++ b/include/vigra/config.hxx
@@ -106,21 +106,25 @@
     #if _MSC_VER < 1400
         #define VIGRA_NO_WORKING_STRINGSTREAM
     #endif
-    
+
     #if _MSC_VER < 1600
         #define VIGRA_NO_UNIQUE_PTR
     #endif
-    
+
+    #if _MSC_VER < 1800
+        #define VIGRA_NO_VARIADIC_TEMPLATES
+    #endif
+
     #define VIGRA_NEED_BIN_STREAMS
-    
+
     #define VIGRA_NO_THREADSAFE_STATIC_INIT  // at least up to _MSC_VER <= 1600, probably higher
-    
-    // usage: 
+
+    // usage:
     //   static int * p = VIGRA_SAFE_STATIC(p, new int(42));
     //
     #define VIGRA_SAFE_STATIC(p, v) \
     0; while(p == 0) ::vigra::detail::safeStaticInit(&p, v)
-    
+
     namespace vigra { namespace detail {
     template <class T>
     inline void safeStaticInit(T ** p, T * v)
@@ -129,7 +133,7 @@
             delete v;
     }
     }} // namespace vigra::detail
-    
+
     #ifndef VIGRA_ENABLE_ANNOYING_WARNINGS
         #pragma warning ( disable: 4244 4267) // implicit integer conversion warnings
     #endif
@@ -157,10 +161,10 @@
         #define VIGRA_NO_WORKING_STRINGSTREAM
     #endif
     #define HAS_HASH_CONTAINERS
-    
+
     // these warnings produce too many false positives to be useful
-    #pragma GCC diagnostic ignored "-Wshadow"  
-    
+    #pragma GCC diagnostic ignored "-Wshadow"
+
     #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
         #if defined(__APPLE__)
             #define VIGRA_NO_UNIQUE_PTR
@@ -304,8 +308,8 @@
 #  define VIGRA_SHARED_PTR  std::shared_ptr
 #endif
 
-#ifndef VIGRA_NO_THREADSAFE_STATIC_INIT    
-    // usage: 
+#ifndef VIGRA_NO_THREADSAFE_STATIC_INIT
+    // usage:
     //   static int * p = VIGRA_SAFE_STATIC(p, new int(42));
     //
     #define VIGRA_SAFE_STATIC(p, v) v
diff --git a/include/vigra/config_version.hxx b/include/vigra/config_version.hxx
index 59d13ab..181e23e 100644
--- a/include/vigra/config_version.hxx
+++ b/include/vigra/config_version.hxx
@@ -38,8 +38,8 @@
 #define VIGRA_CONFIG_VERSION_HXX
 
     #define VIGRA_VERSION_MAJOR 1
-    #define VIGRA_VERSION_MINOR 10
+    #define VIGRA_VERSION_MINOR 11
     #define VIGRA_VERSION_PATCH 0
-    #define VIGRA_VERSION "1.10.0"
+    #define VIGRA_VERSION "1.11.0"
 
 #endif /* VIGRA_CONFIG_VERSION_HXX */
diff --git a/include/vigra/hdf5impex.hxx b/include/vigra/hdf5impex.hxx
index 6a0e8d0..a0647ff 100644
--- a/include/vigra/hdf5impex.hxx
+++ b/include/vigra/hdf5impex.hxx
@@ -107,21 +107,21 @@ inline bool isHDF5(char const * filename)
 }
 
     /** \brief Temporarily disable HDF5's native error output.
-    
+
         This should be used when you want to call an HDF5 function
         that is known to fail (e.g. during testing), or when you want
         to use an alternative error reporting mechanism (e.g. exceptions).
-        
+
         <b>Usage:</b>
-        
+
         <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
         Namespace: vigra
         \code
         {
             HDF5DisableErrorOutput hdf5DisableErrorOutput;
-        
+
             ... // call some HDF5 function
-            
+
         } // restore the original error reporting in the destructor of HDF5DisableErrorOutput
         \endcode
     */
@@ -129,11 +129,11 @@ class HDF5DisableErrorOutput
 {
     H5E_auto2_t old_func_;
     void *old_client_data_;
-    
+
     HDF5DisableErrorOutput(HDF5DisableErrorOutput const &);
     HDF5DisableErrorOutput & operator=(HDF5DisableErrorOutput const &);
 
-  public:    
+  public:
     HDF5DisableErrorOutput()
     : old_func_(0)
     , old_client_data_(0)
@@ -150,36 +150,36 @@ class HDF5DisableErrorOutput
 
     /** \brief Wrapper for unique hid_t objects.
 
-    This class offers the functionality of <tt>std::unique_ptr</tt> for HDF5 handles 
-    (type <tt>hid_t</tt>). Unfortunately, <tt>std::unique_ptr</tt> cannot be used directly 
+    This class offers the functionality of <tt>std::unique_ptr</tt> for HDF5 handles
+    (type <tt>hid_t</tt>). Unfortunately, <tt>std::unique_ptr</tt> cannot be used directly
     for this purpose because it only works with pointers, whereas <tt>hid_t</tt> is an integer type.
-    
+
     Newly created or opened HDF5 handles are stored as objects of type <tt>hid_t</tt>. When the handle
-    is no longer needed, the appropriate close function must be called. However, if a function is 
-    aborted by an exception, this is difficult to ensure. Class HDF5Handle is a smart pointer that 
-    solves this problem by calling the close function in the destructor (This is analogous to how 
-    <tt>std::unique_ptr</tt> calls 'delete' on the contained pointer). A pointer to the close function 
-    must be passed to the constructor, along with an error message that is raised when 
-    creation/opening fails. 
-    
-    When a <tt>HDF5Handle</tt> is created or assigned from another one, ownership passes on to the  
+    is no longer needed, the appropriate close function must be called. However, if a function is
+    aborted by an exception, this is difficult to ensure. Class HDF5Handle is a smart pointer that
+    solves this problem by calling the close function in the destructor (This is analogous to how
+    <tt>std::unique_ptr</tt> calls 'delete' on the contained pointer). A pointer to the close function
+    must be passed to the constructor, along with an error message that is raised when
+    creation/opening fails.
+
+    When a <tt>HDF5Handle</tt> is created or assigned from another one, ownership passes on to the
     left-hand-side handle object, and the right-hand-side objects is resest to a NULL handle.
-    
-    Since <tt>HDF5Handle</tt> objects are convertible to <tt>hid_t</tt>, they can be used in the code 
+
+    Since <tt>HDF5Handle</tt> objects are convertible to <tt>hid_t</tt>, they can be used in the code
     in place of the latter.
 
     <b>Usage:</b>
 
     \code
-    HDF5Handle file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT), 
-                       &H5Fclose, 
+    HDF5Handle file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT),
+                       &H5Fclose,
                        "Error message when H5Fopen() fails.");
-                       
+
     ... // use file_id in the same way as a plain hid_t object
-    
+
     // pass ownership to a new handle object
     HDF5Handle new_handle(file_id);
-    
+
     assert(file_id.get() == 0);
     \endcode
 
@@ -190,11 +190,11 @@ class HDF5Handle
 {
 public:
     typedef herr_t (*Destructor)(hid_t);
-    
+
 private:
     hid_t handle_;
     Destructor destructor_;
-    
+
 public:
 
         /** \brief Default constructor.
@@ -208,7 +208,7 @@ public:
         /** \brief Create a wrapper for a hid_t object.
 
         The hid_t object \a h is assumed to be the return value of an open or create function.
-        It will be closed with the given close function \a destructor as soon as this 
+        It will be closed with the given close function \a destructor as soon as this
         HDF5Handle is destructed, except when \a destructor is a NULL pointer (in which
         case nothing happens at destruction time). If \a h has a value that indicates
         failed opening or creation (by HDF5 convention, this means that \a h is negative),
@@ -217,10 +217,10 @@ public:
         <b>Usage:</b>
 
         \code
-        HDF5Handle file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT), 
-                           &H5Fclose, 
+        HDF5Handle file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT),
+                           &H5Fclose,
                            "Error message.");
-                           
+
         ... // use file_id in the same way
         \endcode
         */
@@ -233,7 +233,7 @@ public:
     }
 
         /** \brief Copy constructor.
-        
+
             Hands over ownership of the RHS handle (analogous to <tt>std::unique_pt</tt>).
         */
     HDF5Handle(HDF5Handle const & h)
@@ -242,9 +242,9 @@ public:
     {
         const_cast<HDF5Handle &>(h).handle_ = 0;
     }
-    
+
         /** \brief Assignment.
-            Calls close() for the LHS handle and hands over ownership of the 
+            Calls close() for the LHS handle and hands over ownership of the
             RHS handle (analogous to <tt>std::unique_pt</tt>).
         */
     HDF5Handle & operator=(HDF5Handle const & h)
@@ -266,7 +266,7 @@ public:
     {
         close();
     }
-    
+
         /** \brief Explicitly call the stored destructor (if one has been registered in the
              constructor) for the contained handle and set the wrapper to NULL. Returns
              a negative value when the destructor call for the handle fails, and
@@ -281,7 +281,7 @@ public:
         destructor_ = 0;
         return res;
     }
-    
+
         /** \brief Return the contained handle and set the wrapper to NULL
             without calling <tt>close()</tt>.
         */
@@ -292,9 +292,9 @@ public:
         destructor_ = 0;
         return res;
     }
-    
+
         /** \brief Reset the wrapper to a new handle.
-        
+
              Equivalent to <tt>handle = HDF5Handle(h, destructor, error_message)</tt>.
         */
     void reset(hid_t h, Destructor destructor, const char * error_message)
@@ -308,11 +308,11 @@ public:
             destructor_ = destructor;
         }
     }
-    
+
         /** \brief Swap the contents of two handle wrappers.
-        
+
             Also available as <tt>std::swap(handle1, handle2)</tt>.
-        */    
+        */
     void swap(HDF5Handle & h)
     {
         std::swap(handle_, h.handle_);
@@ -330,8 +330,8 @@ public:
 
         /** \brief Convert to a plain hid_t object.
 
-        This function ensures that hid_t objects can be transparently replaced with 
-        HDF5Handle objects in user code. Do not call a close function on the return 
+        This function ensures that hid_t objects can be transparently replaced with
+        HDF5Handle objects in user code. Do not call a close function on the return
         value - a crash will be likely otherwise.
         */
     operator hid_t() const
@@ -371,33 +371,33 @@ public:
 
     /** \brief Wrapper for shared hid_t objects.
 
-    This class offers the functionality of <tt>std::shared_ptr</tt> for HDF5 handles 
-    (type <tt>hid_t</tt>). Unfortunately, <tt>std::shared_ptr</tt> cannot be used directly 
+    This class offers the functionality of <tt>std::shared_ptr</tt> for HDF5 handles
+    (type <tt>hid_t</tt>). Unfortunately, <tt>std::shared_ptr</tt> cannot be used directly
     for this purpose because it only works with pointers, whereas <tt>hid_t</tt> is an integer type.
-    
+
     Newly created or opened HDF5 handles are stored as objects of type <tt>hid_t</tt>. When the handle
-    is no longer needed, the appropriate close function must be called. However, if a function is 
-    aborted by an exception, this is difficult to ensure. Class HDF5HandleShared is a smart pointer 
-    that solves this problem by calling the close function in the destructor of the handle's last 
+    is no longer needed, the appropriate close function must be called. However, if a function is
+    aborted by an exception, this is difficult to ensure. Class HDF5HandleShared is a smart pointer
+    that solves this problem by calling the close function in the destructor of the handle's last
     owner (This is analogous to how <tt>std::shared_ptr</tt> calls 'delete' on the contained
-    pointer). A pointer to the close function must be passed to the constructor, along with an error 
-    message that is raised when creation/opening fails. 
-    
+    pointer). A pointer to the close function must be passed to the constructor, along with an error
+    message that is raised when creation/opening fails.
+
     When a <tt>HDF5HandleShared</tt> is created or assigned from another one, ownership is shared
     between the two handles, and the value returned by <tt>use_count()</tt> increases by one.
-    
-    Since <tt>HDF5HandleShared</tt> objects are convertible to <tt>hid_t</tt>, they can be used in the code 
+
+    Since <tt>HDF5HandleShared</tt> objects are convertible to <tt>hid_t</tt>, they can be used in the code
     in place of the latter.
 
     <b>Usage:</b>
 
     \code
-    HDF5HandleShared file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT), 
-                             &H5Fclose, 
+    HDF5HandleShared file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT),
+                             &H5Fclose,
                              "Error message when H5Fopen() fails.");
-                       
+
     ... // use file_id in the same way as a plain hid_t object
-    
+
     // share ownership between same_id and file_id
     HDF5HandleShared same_id(file_id);
     assert(same_id.use_count() == 2);
@@ -411,12 +411,12 @@ class HDF5HandleShared
 {
 public:
     typedef herr_t (*Destructor)(hid_t);
-    
+
 private:
     hid_t handle_;
     Destructor destructor_;
     size_t * refcount_;
-    
+
 public:
 
         /** \brief Default constructor.
@@ -431,7 +431,7 @@ public:
         /** \brief Create a shared wrapper for a plain hid_t object.
 
         The hid_t object \a h is assumed to be the return value of an open or create function.
-        It will be closed with the given close function \a destructor as soon as this 
+        It will be closed with the given close function \a destructor as soon as this
         HDF5HandleShared is destructed, except when \a destructor is a NULL pointer (in which
         case nothing happens at destruction time). If \a h has a value that indicates
         failed opening or creation (by HDF5 convention, this means that \a h is negative),
@@ -440,10 +440,10 @@ public:
         <b>Usage:</b>
 
         \code
-        HDF5HandleShared file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT), 
-                                 &H5Fclose, 
+        HDF5HandleShared file_id(H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT),
+                                 &H5Fclose,
                                  "Error message.");
-                           
+
         ... // use file_id in the same way
         \endcode
         */
@@ -469,9 +469,9 @@ public:
         if(refcount_)
             ++(*refcount_);
     }
-    
+
         /** \brief Assignment.
-            Call close() for the present LHS handle and share ownership with the 
+            Call close() for the present LHS handle and share ownership with the
             RHS handle (analogous to <tt>std::shared_ptr</tt>).
         */
     HDF5HandleShared & operator=(HDF5HandleShared const & h)
@@ -494,10 +494,10 @@ public:
     {
         close();
     }
-    
-        /** \brief Close the handle if this is the unique (i.e. last) owner. 
-        
-             Decrements the reference counter and calls the destructor function of 
+
+        /** \brief Close the handle if this is the unique (i.e. last) owner.
+
+             Decrements the reference counter and calls the destructor function of
              the handle (if one has been registered in the constructor) when the counter
              reaches zero. Sets this wrapper to NULL in any case. Returns
              a negative value when the destructor call for the handle fails, and
@@ -509,7 +509,7 @@ public:
         if(refcount_)
         {
             --(*refcount_);
-            if(*refcount_ == 0) 
+            if(*refcount_ == 0)
             {
                 if(destructor_)
                     res = (*destructor_)(handle_);
@@ -521,9 +521,9 @@ public:
         refcount_ = 0;
         return res;
     }
-    
-        /** \brief Reset the handle to a new value. 
-        
+
+        /** \brief Reset the handle to a new value.
+
              Equivalent to <tt>handle = HDF5HandleShared(h, destructor, error_message)</tt>.
         */
     void reset(hid_t h, Destructor destructor, const char * error_message)
@@ -539,7 +539,7 @@ public:
                 refcount_ = new size_t(1);
         }
     }
-    
+
         /** \brief Get the number of owners of the contained handle.
         */
     size_t use_count() const
@@ -548,20 +548,20 @@ public:
                  ? *refcount_
                  : 0;
     }
-    
+
         /** \brief Check if this is the unique owner of the conatined handle.
-        
+
             Equivalent to <tt>handle.use_count() == 1</tt>.
         */
     bool unique() const
     {
         return use_count() == 1;
     }
-    
+
         /** \brief Swap the contents of two handle wrappers.
-        
+
             Also available as <tt>std::swap(handle1, handle2)</tt>.
-        */    
+        */
     void swap(HDF5HandleShared & h)
     {
         std::swap(handle_, h.handle_);
@@ -580,8 +580,8 @@ public:
 
         /** \brief Convert to a plain hid_t object.
 
-        This function ensures that hid_t objects can be transparently replaced with 
-        HDF5HandleShared objects in user code. Do not call a close function on the return 
+        This function ensures that hid_t objects can be transparently replaced with
+        HDF5HandleShared objects in user code. Do not call a close function on the return
         value - a crash will be likely otherwise.
         */
     operator hid_t() const
@@ -652,7 +652,7 @@ namespace vigra {
 /** \brief Argument object for the function readHDF5().
 
 See \ref readHDF5() for a usage example. This object must be
-used to read an image or array from an HDF5 file 
+used to read an image or array from an HDF5 file
 and enquire about its properties.
 
 <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
@@ -661,16 +661,16 @@ Namespace: vigra
 class HDF5ImportInfo
 {
   public:
-    enum PixelType { UINT8, UINT16, UINT32, UINT64, 
+    enum PixelType { UINT8, UINT16, UINT32, UINT64,
                      INT8, INT16, INT32, INT64,
                      FLOAT, DOUBLE };
 
         /** Construct HDF5ImportInfo object.
 
-            The dataset \a pathInFile in the HDF5 file \a filename is accessed to 
+            The dataset \a pathInFile in the HDF5 file \a filename is accessed to
             read its properties. \a pathInFile may contain '/'-separated group
             names, but must end with the name of the desired dataset:
-            
+
             \code
             HDF5ImportInfo info(filename, "/group1/group2/my_dataset");
             \endcode
@@ -700,10 +700,10 @@ class HDF5ImportInfo
     VIGRA_EXPORT MultiArrayIndex numDimensions() const;
 
         /** Get the shape of the dataset represented by this info object.
-            
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This function therefore reverses the axis
-            order relative to the file contents. That is, when the axes in the file are 
+            order relative to the file contents. That is, when the axes in the file are
             ordered as 'z', 'y', 'x', this function will return the shape in the order
             'x', 'y', 'z'.
          */
@@ -713,10 +713,10 @@ class HDF5ImportInfo
     }
 
         /** Get the shape (length) of the dataset along dimension \a dim.
-            
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This function therefore reverses the axis
-            order relative to the file contents. That is, when the axes in the file are 
+            order relative to the file contents. That is, when the axes in the file are
             ordered as 'z', 'y', 'x', this function will return the shape in the order
             'x', 'y', 'z'.
          */
@@ -742,7 +742,7 @@ class HDF5ImportInfo
 
         /** Query the pixel type of the dataset.
 
-            Same as getPixelType(), but the result is returned as a 
+            Same as getPixelType(), but the result is returned as a
             ImageImportInfo::PixelType enum. This is useful to implement
             a switch() on the pixel type.
 
@@ -777,7 +777,7 @@ struct HDF5TypeTraits
         std::runtime_error("getH5DataType(): invalid type");
         return 0;
     }
-    
+
     static int numberOfBands()
     {
         std::runtime_error("numberOfBands(): invalid type");
@@ -857,7 +857,7 @@ struct HDF5TypeTraits<char*>
         H5Tset_size(stringtype, H5T_VARIABLE);
         return stringtype;
     }
-    
+
     static int numberOfBands()
     {
         return 1;
@@ -873,7 +873,7 @@ struct HDF5TypeTraits<const char*>
         H5Tset_size(stringtype, H5T_VARIABLE);
         return stringtype;
     }
-    
+
     static int numberOfBands()
     {
         return 1;
@@ -931,10 +931,10 @@ string attributes can be attached to any dataset or group. Group- or dataset-han
 are encapsulated in the class and managed automatically. The internal file-system like
 structure can be accessed by functions like "cd()" or "mkdir()".
 
-Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
 Fortran-order, while HDF5 uses C-order. This means that a VIGRA MultiArray,
 whose indices represent the 'x'-, 'y'-, and 'z'-axis in that order, is reversed
-upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'. 
+upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'.
 Likewise, the order is reversed upon reading.
 
 <b>Example:</b>
@@ -960,11 +960,11 @@ class HDF5File
 
     // current group handle
     HDF5Handle cGroupHandle_;
-    
+
   private:
     // time tagging of datasets, turned off (= 0) by default.
     int track_time;
-    
+
     bool read_only_;
 
     // helper classes for ls() and listAttributes()
@@ -973,7 +973,7 @@ class HDF5File
         virtual void insert(const std::string &) = 0;
         virtual ~ls_closure() {}
     };
-    
+
     // datastructure to hold a std::vector<std::string>
     struct lsOpData : public ls_closure
     {
@@ -984,7 +984,7 @@ class HDF5File
             objects.push_back(x);
         }
     };
-    
+
     // datastructure to hold an associative container
     template<class Container>
     struct ls_container_data : public ls_closure
@@ -1007,7 +1007,7 @@ class HDF5File
             OpenMode::New creates a new file. If the file already exists, it is overwritten.
 
             OpenMode::ReadWrite opens a file for reading/writing. The file will be created if it doesn't exist.
-            
+
             OpenMode::ReadOnly opens a file for reading. The file as well as any dataset to be accessed must already exist.
         */
     enum OpenMode {
@@ -1017,7 +1017,7 @@ class HDF5File
         OpenReadOnly,     // Open file in read-only mode.
         ReadOnly = OpenReadOnly, // Alias for OpenReadOnly
         Replace,          // for ChunkedArrayHDF5: replace dataset if it exists, create otherwise
-        Default           // for ChunkedArrayHDF5: use New if file doesn't exist, 
+        Default           // for ChunkedArrayHDF5: use New if file doesn't exist,
                           //                           ReadOnly if file and dataset exist
                           //                           Open otherwise
     };
@@ -1041,10 +1041,10 @@ class HDF5File
 
         /** \brief Open or create an HDF5File object.
 
-        Creates or opens HDF5 file with given filename. 
+        Creates or opens HDF5 file with given filename.
         The current group is set to "/".
-        
-        Note that the HDF5File class is not copyable (the copy constructor is 
+
+        Note that the HDF5File class is not copyable (the copy constructor is
         private to enforce this).
         */
     HDF5File(std::string filePath, OpenMode mode, bool track_creation_times = false)
@@ -1056,13 +1056,13 @@ class HDF5File
         /** \brief Initialize an HDF5File object from HDF5 file handle
 
         Initializes an HDF5File object corresponding to the HDF5 file
-        opened elsewhere. If \a fileHandle is constructed with a 
+        opened elsewhere. If \a fileHandle is constructed with a
         <tt>NULL</tt> destructor, ownership is not transferred
         to the new HDF5File object, and you must ensure that the file is
         not closed while the new HDF5File object is in use. Otherwise,
         ownership will be shared.
-        
-        The current group is set to the specified \a pathname. If 
+
+        The current group is set to the specified \a pathname. If
         \a read_only is 'true', you cannot create new datasets or
         overwrite data.
 
@@ -1075,9 +1075,9 @@ class HDF5File
                       bool read_only = false)
     : fileHandle_(fileHandle),
       read_only_(read_only)
-      
+
     {
-        // get group handle for given pathname        
+        // get group handle for given pathname
         // calling openCreateGroup_ without setting a valid cGroupHandle does
         // not work. Starting from root() is a safe bet.
         root();
@@ -1103,7 +1103,7 @@ class HDF5File
       track_time(other.track_time),
       read_only_(other.read_only_)
     {
-        cGroupHandle_ = HDF5Handle(openCreateGroup_(other.currentGroupName_()), &H5Gclose, 
+        cGroupHandle_ = HDF5Handle(openCreateGroup_(other.currentGroupName_()), &H5Gclose,
                                    "HDF5File(HDF5File const &): Failed to open group.");
     }
 
@@ -1117,7 +1117,7 @@ class HDF5File
         // the operating system, see
         // http://www.hdfgroup.org/HDF5/doc/RM/RM_H5F.html#File-Close .
     }
-    
+
         /** \brief Assign a HDF5File object.
 
             Calls close() on the present file and The new object will refer to the same file and group as \a other.
@@ -1128,7 +1128,7 @@ class HDF5File
         {
             close();
             fileHandle_ = other.fileHandle_;
-            cGroupHandle_ = HDF5Handle(openCreateGroup_(other.currentGroupName_()), &H5Gclose, 
+            cGroupHandle_ = HDF5Handle(openCreateGroup_(other.currentGroupName_()), &H5Gclose,
                                        "HDF5File::operator=(): Failed to open group.");
             track_time = other.track_time;
             read_only_ = other.read_only_;
@@ -1140,29 +1140,29 @@ class HDF5File
     {
         return fileHandle_.use_count();
     }
-    
+
     bool isOpen() const
     {
         return fileHandle_ != 0;
     }
-    
+
     bool isReadOnly() const
     {
         return read_only_;
     }
-    
+
     void setReadOnly(bool stat=true)
     {
         read_only_ = stat;
     }
-  
+
         /** \brief Open or create the given file in the given mode and set the group to "/".
             If another file is currently open, it is first closed.
          */
     void open(std::string filePath, OpenMode mode)
     {
         close();
-        
+
         std::string errorMessage = "HDF5File.open(): Could not open or create file '" + filePath + "'.";
         fileHandle_ = HDF5HandleShared(createFile_(filePath, mode), &H5Fclose, errorMessage.c_str());
         cGroupHandle_ = HDF5Handle(openCreateGroup_("/"), &H5Gclose, "HDF5File.open(): Failed to open root group.");
@@ -1214,7 +1214,7 @@ class HDF5File
 
         return true;
     }
-    
+
         /** \brief Change the current group to its parent group.
             Returns true if successful, false otherwise. If unsuccessful,
             the group will not change.
@@ -1222,7 +1222,7 @@ class HDF5File
     inline bool cd_up(int levels)
     {
         std::string groupName = currentGroupName_();
-        
+
         for(int i = 0; i<levels; i++)
         {
             if(!cd_up())
@@ -1244,12 +1244,12 @@ class HDF5File
     {
         vigra_precondition(!isReadOnly(),
             "HDF5File::mkdir(): file is read-only.");
-        
+
         std::string message = "HDF5File::mkdir(): Could not create group '" + groupName + "'.\n";
 
         // make groupName clean
         groupName = get_absolute_path(groupName);
-        
+
         HDF5Handle(openCreateGroup_(groupName.c_str()),&H5Gclose,message.c_str());
     }
 
@@ -1261,7 +1261,7 @@ class HDF5File
     {
         vigra_precondition(!isReadOnly(),
             "HDF5File::cd_mk(): file is read-only.");
-        
+
         std::string  message = "HDF5File::cd_mk(): Could not create group '" + groupName + "'.";
 
         // make groupName clean
@@ -1291,9 +1291,9 @@ class HDF5File
     }
 
         /** \brief List the contents of the current group into a container-like
-                   object via insert(). 
-                   
-            Only datasets and groups are inserted, other objects (e.g., datatypes) are ignored. 
+                   object via insert().
+
+            Only datasets and groups are inserted, other objects (e.g., datatypes) are ignored.
             Group names always have a trailing "/".
 
             The argument cont is presumably an associative container, however,
@@ -1354,15 +1354,15 @@ class HDF5File
     }
 
         /** \brief Get the shape of each dimension of a certain dataset.
-            
+
            Normally, this function is called after determining the dimension of the
             dataset using \ref getDatasetDimensions().
             If the first character is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
-            
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This function therefore reverses the axis
-            order relative to the file contents. That is, when the axes in the file are 
+            order relative to the file contents. That is, when the axes in the file are
             ordered as 'z', 'y', 'x', this function will return the shape in the order
             'x', 'y', 'z'.
         */
@@ -1423,7 +1423,7 @@ class HDF5File
             else if(datasize == 8)
                 return "DOUBLE";
         }
-        else if(dataclass == H5T_INTEGER)   
+        else if(dataclass == H5T_INTEGER)
         {
             if(datasign == H5T_SGN_NONE)
             {
@@ -1450,7 +1450,7 @@ class HDF5File
         }
         return "UNKNOWN";
     }
-        
+
         /** \brief Obtain the HDF5 handle of a dataset.
         */
     HDF5Handle getDatasetHandle(std::string const & datasetName) const
@@ -1458,7 +1458,7 @@ class HDF5File
         std::string errorMessage = "HDF5File::getDatasetHandle(): Unable to open dataset '" + datasetName + "'.";
         return HDF5Handle(getDatasetHandle_(get_absolute_path(datasetName)), &H5Dclose, errorMessage.c_str());
     }
-        
+
         /** \brief Obtain a shared HDF5 handle of a dataset.
         */
     HDF5HandleShared getDatasetHandleShared(std::string const & datasetName) const
@@ -1469,7 +1469,7 @@ class HDF5File
 
         /** \brief Obtain the HDF5 handle of a group (create the group if it doesn't exist).
          */
-    HDF5Handle getGroupHandle(std::string group_name, 
+    HDF5Handle getGroupHandle(std::string group_name,
                               std::string function_name = "HDF5File::getGroupHandle()")
     {
         std::string errorMessage = function_name + ": Group '" + group_name + "' not found.";
@@ -1478,7 +1478,7 @@ class HDF5File
         group_name = get_absolute_path(group_name);
 
         // group must exist
-        vigra_precondition(group_name == "/" || H5Lexists(fileHandle_, group_name.c_str(), H5P_DEFAULT) != 0, 
+        vigra_precondition(group_name == "/" || H5Lexists(fileHandle_, group_name.c_str(), H5P_DEFAULT) != 0,
                            errorMessage.c_str());
 
         // open group and return group handle
@@ -1489,7 +1489,7 @@ class HDF5File
     void ls_H5Aiterate(std::string const & group_or_dataset, ls_closure & data) const
     {
         H5O_type_t h5_type = get_object_type_(group_or_dataset);
-        vigra_precondition(h5_type == H5O_TYPE_GROUP || h5_type == H5O_TYPE_DATASET, 
+        vigra_precondition(h5_type == H5O_TYPE_GROUP || h5_type == H5O_TYPE_DATASET,
             "HDF5File::listAttributes(): object \"" + group_or_dataset + "\" is neither a group nor a dataset.");
         // get object handle
         HDF5Handle object_handle(h5_type == H5O_TYPE_GROUP
@@ -1505,7 +1505,7 @@ class HDF5File
     }
 
         /** \brief List the attribute names of the given group or dataset.
-        
+
             If \a group_or_dataset is empty or <tt>"."</tt> (a dot), the command
             refers to the current group of this file object.
         */
@@ -1517,9 +1517,9 @@ class HDF5File
         return list;
     }
 
-        /** \brief Insert the attribute names of the given group or dataset into the given 
+        /** \brief Insert the attribute names of the given group or dataset into the given
                    \a container by calling <tt>container.insert(std::string)</tt>.
-        
+
             If \a group_or_dataset is empty or <tt>"."</tt> (a dot), the command
             refers to the current group of this file object.
         */
@@ -1545,8 +1545,8 @@ class HDF5File
           * In contrast to datasets, subarray access, chunks and compression are not available.
           */
     template<unsigned int N, class T, class Stride>
-    inline void writeAttribute(std::string object_name, 
-                               std::string attribute_name, 
+    inline void writeAttribute(std::string object_name,
+                               std::string attribute_name,
                                const MultiArrayView<N, T, Stride> & array)
     {
         // make object_name clean
@@ -1556,8 +1556,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, int SIZE, class Stride>
-    inline void writeAttribute(std::string datasetName, 
-                               std::string attributeName, 
+    inline void writeAttribute(std::string datasetName,
+                               std::string attributeName,
                                const MultiArrayView<N, TinyVector<T, SIZE>, Stride> & array)
     {
         // make datasetName clean
@@ -1567,8 +1567,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, class Stride>
-    inline void writeAttribute(std::string datasetName, 
-                               std::string attributeName, 
+    inline void writeAttribute(std::string datasetName,
+                               std::string attributeName,
                                const MultiArrayView<N, RGBValue<T>, Stride> & array)
     {
         // make datasetName clean
@@ -1580,37 +1580,37 @@ class HDF5File
         /** \brief Write a single value.
           Specialization of the write function for simple datatypes
          */
-    inline void writeAttribute(std::string object_name, std::string attribute_name, char data) 
+    inline void writeAttribute(std::string object_name, std::string attribute_name, char data)
         { writeAtomicAttribute(object_name,attribute_name,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, signed char data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, signed char data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, signed short data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, signed short data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, signed int data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, signed int data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, signed long data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, signed long data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, signed long long data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, signed long long data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned char data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned char data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned short data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned short data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned int data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned int data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned long data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned long data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned long long data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, unsigned long long data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, float data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, float data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, double data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, double data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, long double data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, long double data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, const char* data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, const char* data)
         { writeAtomicAttribute(datasetName,attributeName,data); }
-    inline void writeAttribute(std::string datasetName, std::string attributeName, std::string const & data) 
+    inline void writeAttribute(std::string datasetName, std::string attributeName, std::string const & data)
         { writeAtomicAttribute(datasetName,attributeName,data.c_str()); }
 
         /** \brief Test if attribute exists.
@@ -1632,8 +1632,8 @@ class HDF5File
           * In contrast to datasets, subarray access is not available.
           */
     template<unsigned int N, class T, class Stride>
-    inline void readAttribute(std::string object_name, 
-                              std::string attribute_name, 
+    inline void readAttribute(std::string object_name,
+                              std::string attribute_name,
                               MultiArrayView<N, T, Stride> array)
     {
         // make object_name clean
@@ -1643,8 +1643,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, int SIZE, class Stride>
-    inline void readAttribute(std::string datasetName, 
-                              std::string attributeName, 
+    inline void readAttribute(std::string datasetName,
+                              std::string attributeName,
                               MultiArrayView<N, TinyVector<T, SIZE>, Stride> array)
     {
         // make datasetName clean
@@ -1654,8 +1654,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, class Stride>
-    inline void readAttribute(std::string datasetName, 
-                              std::string attributeName, 
+    inline void readAttribute(std::string datasetName,
+                              std::string attributeName,
                               MultiArrayView<N, RGBValue<T>, Stride> array)
     {
         // make datasetName clean
@@ -1667,43 +1667,43 @@ class HDF5File
         /** \brief Read a single value.
           Specialization of the read function for simple datatypes
          */
-    inline void readAttribute(std::string object_name, std::string attribute_name, char &data)       
+    inline void readAttribute(std::string object_name, std::string attribute_name, char &data)
         { readAtomicAttribute(object_name,attribute_name,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, signed char &data)        
+    inline void readAttribute(std::string datasetName, std::string attributeName, signed char &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, signed short &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, signed short &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, signed int &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, signed int &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, signed long &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, signed long &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, signed long long &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, signed long long &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned char &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned char &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned short &data)      
+    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned short &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned int &data)      
+    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned int &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned long &data)      
+    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned long &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned long long &data)      
+    inline void readAttribute(std::string datasetName, std::string attributeName, unsigned long long &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, float &data)       
+    inline void readAttribute(std::string datasetName, std::string attributeName, float &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, double &data)      
+    inline void readAttribute(std::string datasetName, std::string attributeName, double &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, long double &data) 
+    inline void readAttribute(std::string datasetName, std::string attributeName, long double &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
-    inline void readAttribute(std::string datasetName, std::string attributeName, std::string &data) 
+    inline void readAttribute(std::string datasetName, std::string attributeName, std::string &data)
         { readAtomicAttribute(datasetName,attributeName,data); }
 
     // Writing data
 
         /** \brief Write multi arrays.
-          
-            Chunks can be activated by setting 
-            \code iChunkSize = size; //size \> 0 
+
+            Chunks can be activated by setting
+            \code iChunkSize = size; //size \> 0
             \endcode .
             The chunks will be hypercubes with edge length size. When <tt>iChunkSize == 0</tt>
             (default), the behavior depends on the <tt>compression</tt> setting: If no
@@ -1711,22 +1711,22 @@ class HDF5File
             chuning is required, and the chunk size is automatically selected such that
             each chunk contains about 300k pixels.
 
-            Compression can be activated by setting 
-            \code compression = parameter; // 0 \< parameter \<= 9 
+            Compression can be activated by setting
+            \code compression = parameter; // 0 \< parameter \<= 9
             \endcode
             where 0 stands for no compression and 9 for maximum compression.
 
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a VIGRA MultiArray,
             whose indices represent the 'x'-, 'y'-, and 'z'-axis in that order, is reversed
-            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'. 
+            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'.
         */
     template<unsigned int N, class T, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, T, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, T, Stride> & array,
                       int iChunkSize = 0, int compression = 0)
     {
         // make datasetName clean
@@ -1743,22 +1743,22 @@ class HDF5File
             Chunks can be activated by providing a MultiArrayShape as chunkSize.
             chunkSize must have equal dimension as array.
 
-            Compression can be activated by setting 
-            \code compression = parameter; // 0 \< parameter \<= 9 
+            Compression can be activated by setting
+            \code compression = parameter; // 0 \< parameter \<= 9
             \endcode
             where 0 stands for no compression and 9 for maximum compression.
 
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a VIGRA MultiArray,
             whose indices represent the 'x'-, 'y'-, and 'z'-axis in that order, is reversed
-            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'. 
+            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'.
         */
     template<unsigned int N, class T, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, T, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, T, Stride> & array,
                       typename MultiArrayShape<N>::type chunkSize, int compression = 0)
     {
         // make datasetName clean
@@ -1773,37 +1773,37 @@ class HDF5File
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a VIGRA MultiArray,
             whose indices represent the 'x'-, 'y'-, and 'z'-axis in that order, is reversed
-            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'. 
+            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'.
         */
     template<unsigned int N, class T, class Stride>
-    inline void writeBlock(std::string datasetName, 
-                           typename MultiArrayShape<N>::type blockOffset, 
+    inline void writeBlock(std::string datasetName,
+                           typename MultiArrayShape<N>::type blockOffset,
                            const MultiArrayView<N, T, Stride> & array)
     {
         // make datasetName clean
         datasetName = get_absolute_path(datasetName);
         typedef detail::HDF5TypeTraits<T> TypeTraits;
-        writeBlock_(datasetName, blockOffset, array, 
+        writeBlock_(datasetName, blockOffset, array,
                     TypeTraits::getH5DataType(), TypeTraits::numberOfBands());
     }
 
     template<unsigned int N, class T, class Stride>
-    inline herr_t writeBlock(HDF5HandleShared dataset, 
-                             typename MultiArrayShape<N>::type blockOffset, 
+    inline herr_t writeBlock(HDF5HandleShared dataset,
+                             typename MultiArrayShape<N>::type blockOffset,
                              const MultiArrayView<N, T, Stride> & array)
     {
         typedef detail::HDF5TypeTraits<T> TypeTraits;
-        return writeBlock_(dataset, blockOffset, array, 
+        return writeBlock_(dataset, blockOffset, array,
                            TypeTraits::getH5DataType(), TypeTraits::numberOfBands());
     }
 
     // non-scalar (TinyVector) and unstrided multi arrays
     template<unsigned int N, class T, int SIZE, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, TinyVector<T, SIZE>, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, TinyVector<T, SIZE>, Stride> & array,
                       int iChunkSize = 0, int compression = 0)
     {
         // make datasetName clean
@@ -1817,8 +1817,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, int SIZE, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, TinyVector<T, SIZE>, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, TinyVector<T, SIZE>, Stride> & array,
                       typename MultiArrayShape<N>::type chunkSize, int compression = 0)
     {
         // make datasetName clean
@@ -1828,9 +1828,9 @@ class HDF5File
     }
 
         /** \brief Write array vectors.
-          
-            Compression can be activated by setting 
-            \code compression = parameter; // 0 \< parameter \<= 9 
+
+            Compression can be activated by setting
+            \code compression = parameter; // 0 \< parameter \<= 9
             \endcode
             where 0 stands for no compression and 9 for maximum compression.
 
@@ -1850,8 +1850,8 @@ class HDF5File
 
     // non-scalar (RGBValue) and unstrided multi arrays
     template<unsigned int N, class T, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, RGBValue<T>, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, RGBValue<T>, Stride> & array,
                       int iChunkSize = 0, int compression = 0)
     {
         // make datasetName clean
@@ -1865,8 +1865,8 @@ class HDF5File
     }
 
     template<unsigned int N, class T, class Stride>
-    inline void write(std::string datasetName, 
-                      const MultiArrayView<N, RGBValue<T>, Stride> & array, 
+    inline void write(std::string datasetName,
+                      const MultiArrayView<N, RGBValue<T>, Stride> & array,
                       typename MultiArrayShape<N>::type chunkSize, int compression = 0)
     {
         // make datasetName clean
@@ -1896,15 +1896,15 @@ class HDF5File
     inline void write(std::string datasetName, std::string const & data) { writeAtomic(datasetName,data.c_str()); }
 
     // Reading data
-    
+
         /** \brief Read data into a multi array.
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a HDF5 dataset,
             whose indices represent the 'z'-, 'y'-, and 'x'-axis in that order, is reversed
-            upon reading into a MultiArrayView, i.e. in the array axis order must be 'x', 'y', 'z'. 
+            upon reading into a MultiArrayView, i.e. in the array axis order must be 'x', 'y', 'z'.
         */
     template<unsigned int N, class T, class Stride>
     inline void read(std::string datasetName, MultiArrayView<N, T, Stride> array)
@@ -1919,10 +1919,10 @@ class HDF5File
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a HDF5 dataset,
             whose indices represent the 'z'-, 'y'-, and 'x'-axis in that order, is reversed
-            upon reading into a MultiArray, i.e. in the array axis order will be 'x', 'y', 'z'. 
+            upon reading into a MultiArray, i.e. in the array axis order will be 'x', 'y', 'z'.
         */
     template<unsigned int N, class T, class Alloc>
     inline void readAndResize(std::string datasetName, MultiArray<N, T, Alloc> & array)
@@ -1996,32 +1996,32 @@ class HDF5File
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a HDF5 dataset,
             whose indices represent the 'z'-, 'y'-, and 'x'-axis in that order, is reversed
-            upon reading into a MultiArray, i.e. in the array axis order will be 'x', 'y', 'z'. 
+            upon reading into a MultiArray, i.e. in the array axis order will be 'x', 'y', 'z'.
         */
     template<unsigned int N, class T, class Stride>
-    inline void readBlock(std::string datasetName, 
-                          typename MultiArrayShape<N>::type blockOffset, 
-                          typename MultiArrayShape<N>::type blockShape, 
+    inline void readBlock(std::string datasetName,
+                          typename MultiArrayShape<N>::type blockOffset,
+                          typename MultiArrayShape<N>::type blockShape,
                           MultiArrayView<N, T, Stride> array)
     {
         // make datasetName clean
         datasetName = get_absolute_path(datasetName);
         typedef detail::HDF5TypeTraits<T> TypeTraits;
-        readBlock_(datasetName, blockOffset, blockShape, array, 
+        readBlock_(datasetName, blockOffset, blockShape, array,
                    TypeTraits::getH5DataType(), TypeTraits::numberOfBands());
     }
 
     template<unsigned int N, class T, class Stride>
-    inline herr_t readBlock(HDF5HandleShared dataset, 
-                          typename MultiArrayShape<N>::type blockOffset, 
-                          typename MultiArrayShape<N>::type blockShape, 
+    inline herr_t readBlock(HDF5HandleShared dataset,
+                          typename MultiArrayShape<N>::type blockOffset,
+                          typename MultiArrayShape<N>::type blockShape,
                           MultiArrayView<N, T, Stride> array)
     {
         typedef detail::HDF5TypeTraits<T> TypeTraits;
-        return readBlock_(dataset, blockOffset, blockShape, array, 
+        return readBlock_(dataset, blockOffset, blockShape, array,
                           TypeTraits::getH5DataType(), TypeTraits::numberOfBands());
     }
 
@@ -2049,7 +2049,7 @@ class HDF5File
         vigra_precondition((N+1) ==  MultiArrayIndex(dimshape.size()) &&
                            SIZE == dimshape[0], // the object in the HDF5 file must have one additional dimension which we interpret as the pixel type bands
             "HDF5File::readAndResize(): Array dimension disagrees with dataset dimension.");
-        
+
         // reshape target MultiArray
         typename MultiArrayShape<N>::type shape;
         for(int k=1; k < static_cast<int>(dimshape.size()); ++k)
@@ -2121,48 +2121,48 @@ class HDF5File
             Chunks can be activated by providing a MultiArrayShape as chunkSize.
             chunkSize must have equal dimension as array.
 
-            Compression can be activated by setting 
-            \code compression = parameter; // 0 \< parameter \<= 9 
+            Compression can be activated by setting
+            \code compression = parameter; // 0 \< parameter \<= 9
             \endcode
-            where 0 stands for no compression and 9 for maximum compression. If 
+            where 0 stands for no compression and 9 for maximum compression. If
             a non-zero compression level is specified, but the chunk size is zero,
             a default chunk size will be chosen (compression always requires chunks).
 
             If the first character of datasetName is a "/", the path will be interpreted as absolute path,
             otherwise it will be interpreted as path relative to the current group.
 
-            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses 
+            Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
             Fortran-order, while HDF5 uses C-order. This means that a VIGRA MultiArray,
             whose indices represent the 'x'-, 'y'-, and 'z'-axis in that order, is reversed
-            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'. 
+            upon writing to an HDF5 file, i.e. in the file the axis order is 'z', 'y', 'x'.
         */
     template<int N, class T>
-    HDF5HandleShared 
-    createDataset(std::string datasetName, 
-                  TinyVector<MultiArrayIndex, N> const & shape, 
-                  typename detail::HDF5TypeTraits<T>::value_type init = 
-                                                     typename detail::HDF5TypeTraits<T>::value_type(), 
+    HDF5HandleShared
+    createDataset(std::string datasetName,
+                  TinyVector<MultiArrayIndex, N> const & shape,
+                  typename detail::HDF5TypeTraits<T>::value_type init =
+                                                     typename detail::HDF5TypeTraits<T>::value_type(),
 #ifdef _MSC_VER
-                  TinyVector<MultiArrayIndex, N> const & chunkSize = TinyVector<MultiArrayIndex, N>(), 
+                  TinyVector<MultiArrayIndex, N> const & chunkSize = TinyVector<MultiArrayIndex, N>(),
 #else
-                  TinyVector<MultiArrayIndex, N> const & chunkSize = (TinyVector<MultiArrayIndex, N>()), 
+                  TinyVector<MultiArrayIndex, N> const & chunkSize = (TinyVector<MultiArrayIndex, N>()),
 #endif
                   int compressionParameter = 0);
 
         // for backwards compatibility
     template<int N, class T>
-    HDF5HandleShared 
-    createDataset(std::string datasetName, 
-                  TinyVector<MultiArrayIndex, N> const & shape, 
-                  T init, 
-                  int iChunkSize, 
+    HDF5HandleShared
+    createDataset(std::string datasetName,
+                  TinyVector<MultiArrayIndex, N> const & shape,
+                  T init,
+                  int iChunkSize,
                   int compressionParameter = 0)
     {
         typename MultiArrayShape<N>::type chunkSize;
         for(int i = 0; i < N; i++){
             chunkSize[i] = iChunkSize;
         }
-        return this->template createDataset<N, T>(datasetName, shape, init, 
+        return this->template createDataset<N, T>(datasetName, shape, init,
                                                   chunkSize, compressionParameter);
     }
 
@@ -2170,7 +2170,8 @@ class HDF5File
         */
     inline void flushToDisk()
     {
-        H5Fflush(fileHandle_, H5F_SCOPE_GLOBAL);
+        if(fileHandle_)
+            H5Fflush(fileHandle_, H5F_SCOPE_GLOBAL);
     }
 
   private:
@@ -2206,9 +2207,9 @@ class HDF5File
             return std::string(begin()+lastPos+1, end());
         }
     };
-    
+
     template <class Shape>
-    ArrayVector<hsize_t> 
+    ArrayVector<hsize_t>
     defineChunks(Shape chunks, Shape const & shape, int numBands, int compression = 0)
     {
         if(prod(chunks) > 0)
@@ -2237,7 +2238,7 @@ class HDF5File
 
         /** \brief takes any path and converts it into an absolute path
              in the current file.
-           
+
              Elements like "." and ".." are treated as expected.
              Links are not supported or resolved.
         */
@@ -2301,7 +2302,7 @@ class HDF5File
 
         return str;
     }
-    
+
   protected:
 
         /* checks if the given path is a relative path.
@@ -2353,10 +2354,10 @@ class HDF5File
                 "HDF5File::open(): cannot open non-existing file in read-only mode.");
             fileId = H5Fcreate(filePath.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
         }
-        else 
+        else
         {
             fclose(pFile);
-            if(mode == OpenReadOnly) 
+            if(mode == OpenReadOnly)
             {
                 fileId = H5Fopen(filePath.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
             }
@@ -2373,9 +2374,9 @@ class HDF5File
         return fileId;
     }
 
-        /* \brief Open a group. 
-        
-           A negative value is returned when the group does not exist or when opening 
+        /* \brief Open a group.
+
+           A negative value is returned when the group does not exist or when opening
            fails for other reasons.
          */
     hid_t openGroup_(std::string groupName) const
@@ -2383,12 +2384,12 @@ class HDF5File
         return const_cast<HDF5File *>(this)->openCreateGroup_(groupName, false);
     }
 
-        /* \brief Open or create a group. 
-        
-           If \a create is <tt>true</tt> and the group does not exist, it will be created, 
+        /* \brief Open or create a group.
+
+           If \a create is <tt>true</tt> and the group does not exist, it will be created,
            including all necessary parent groups. If group creation fails, a negative
            value is returned. Likewise, a negative value is returned when \a create
-           is <tt>false</tt> and the group does not exist or when opening of the group 
+           is <tt>false</tt> and the group does not exist or when opening of the group
            fails for other reasons.
          */
     hid_t openCreateGroup_(std::string groupName, bool create = true)
@@ -2425,8 +2426,8 @@ class HDF5File
                     parent = H5Gcreate(prevParent, group.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
                 else
                     parent = -1;
-            } 
-            else 
+            }
+            else
             {
                 parent = H5Gopen(prevParent, group.c_str(), H5P_DEFAULT);
             }
@@ -2510,10 +2511,10 @@ class HDF5File
         /* low-level write function to write vigra MultiArray data as an attribute
          */
     template<unsigned int N, class T, class Stride>
-    void write_attribute_(std::string name, 
+    void write_attribute_(std::string name,
                           const std::string & attribute_name,
                           const MultiArrayView<N, T, Stride> & array,
-                          const hid_t datatype, 
+                          const hid_t datatype,
                           const int numBandsOfType);
 
         /* Write single value attribute
@@ -2537,9 +2538,9 @@ class HDF5File
         /* low-level read function to read vigra MultiArray data from attributes
          */
     template<unsigned int N, class T, class Stride>
-    void read_attribute_(std::string datasetName, 
-                         std::string attributeName, 
-                         MultiArrayView<N, T, Stride> array, 
+    void read_attribute_(std::string datasetName,
+                         std::string attributeName,
+                         MultiArrayView<N, T, Stride> array,
                          const hid_t datatype, const int numBandsOfType);
 
         /* Read a single value attribute.
@@ -2571,11 +2572,11 @@ class HDF5File
         /* low-level write function to write vigra unstrided MultiArray data
         */
     template<unsigned int N, class T, class Stride>
-    void write_(std::string &datasetName, 
-                       const MultiArrayView<N, T, Stride> & array, 
-                       const hid_t datatype, 
-                       const int numBandsOfType, 
-                       typename MultiArrayShape<N>::type &chunkSize, 
+    void write_(std::string &datasetName,
+                       const MultiArrayView<N, T, Stride> & array,
+                       const hid_t datatype,
+                       const int numBandsOfType,
+                       typename MultiArrayShape<N>::type &chunkSize,
                        int compressionParameter = 0);
 
         /* Write single value as dataset.
@@ -2602,8 +2603,8 @@ class HDF5File
         /* low-level read function to read vigra unstrided MultiArray data
          */
     template<unsigned int N, class T, class Stride>
-    void read_(std::string datasetName, 
-                      MultiArrayView<N, T, Stride> array, 
+    void read_(std::string datasetName,
+                      MultiArrayView<N, T, Stride> array,
                       const hid_t datatype, const int numBandsOfType);
 
         /* Read a single value.
@@ -2638,10 +2639,10 @@ class HDF5File
        /* low-level write function to write vigra unstrided MultiArray data into a sub-block of a dataset
        */
     template<unsigned int N, class T, class Stride>
-    void writeBlock_(std::string datasetName, 
-                     typename MultiArrayShape<N>::type &blockOffset, 
-                     const MultiArrayView<N, T, Stride> & array, 
-                     const hid_t datatype, 
+    void writeBlock_(std::string datasetName,
+                     typename MultiArrayShape<N>::type &blockOffset,
+                     const MultiArrayView<N, T, Stride> & array,
+                     const hid_t datatype,
                      const int numBandsOfType)
     {
         // open dataset if it exists
@@ -2652,26 +2653,26 @@ class HDF5File
             "HDF5File::writeBlock(): write to dataset '" + datasetName + "' via H5Dwrite() failed.");
     }
 
-       /* low-level write function to write vigra unstrided MultiArray data into a 
+       /* low-level write function to write vigra unstrided MultiArray data into a
           sub-block of a dataset.  Returns the result of the internal call
            to <tt>H5Dwrite()</tt>.
        */
     template<unsigned int N, class T, class Stride>
-    herr_t writeBlock_(HDF5HandleShared dataset, 
-                       typename MultiArrayShape<N>::type &blockOffset, 
-                       const MultiArrayView<N, T, Stride> & array, 
-                       const hid_t datatype, 
+    herr_t writeBlock_(HDF5HandleShared dataset,
+                       typename MultiArrayShape<N>::type &blockOffset,
+                       const MultiArrayView<N, T, Stride> & array,
+                       const hid_t datatype,
                        const int numBandsOfType);
 
         /* low-level read function to read vigra unstrided MultiArray data from a sub-block of a dataset.
-        
+
            The array must have the same shape as the block.
         */
     template<unsigned int N, class T, class Stride>
-    void readBlock_(std::string datasetName, 
-                    typename MultiArrayShape<N>::type &blockOffset, 
-                    typename MultiArrayShape<N>::type &blockShape, 
-                    MultiArrayView<N, T, Stride> array, 
+    void readBlock_(std::string datasetName,
+                    typename MultiArrayShape<N>::type &blockOffset,
+                    typename MultiArrayShape<N>::type &blockShape,
+                    MultiArrayView<N, T, Stride> array,
                     const hid_t datatype, const int numBandsOfType)
     {
         std::string errorMessage ("HDF5File::readBlock(): Unable to open dataset '" + datasetName + "'.");
@@ -2682,31 +2683,31 @@ class HDF5File
     }
 
         /* low-level read function to read vigra unstrided MultiArray data from a sub-block of a dataset.
-        
+
            The array must have the same shape as the block. Returns the result of the internal call
            to <tt>H5Dread()</tt>.
         */
     template<unsigned int N, class T, class Stride>
-    herr_t readBlock_(HDF5HandleShared dataset, 
-                      typename MultiArrayShape<N>::type &blockOffset, 
-                      typename MultiArrayShape<N>::type &blockShape, 
-                      MultiArrayView<N, T, Stride> array, 
+    herr_t readBlock_(HDF5HandleShared dataset,
+                      typename MultiArrayShape<N>::type &blockOffset,
+                      typename MultiArrayShape<N>::type &blockShape,
+                      MultiArrayView<N, T, Stride> array,
                       const hid_t datatype, const int numBandsOfType);
 };  /* class HDF5File */
 
 /********************************************************************/
 
 template<int N, class T>
-HDF5HandleShared 
-HDF5File::createDataset(std::string datasetName, 
-                        TinyVector<MultiArrayIndex, N> const & shape, 
-                        typename detail::HDF5TypeTraits<T>::value_type init, 
-                         TinyVector<MultiArrayIndex, N> const & chunkSize, 
+HDF5HandleShared
+HDF5File::createDataset(std::string datasetName,
+                        TinyVector<MultiArrayIndex, N> const & shape,
+                        typename detail::HDF5TypeTraits<T>::value_type init,
+                         TinyVector<MultiArrayIndex, N> const & chunkSize,
                          int compressionParameter)
 {
     vigra_precondition(!isReadOnly(),
         "HDF5File::createDataset(): file is read-only.");
-    
+
     // make datasetName clean
     datasetName = get_absolute_path(datasetName);
 
@@ -2735,7 +2736,7 @@ HDF5File::createDataset(std::string datasetName,
         shape_inv[N-1-k] = shape[k];
 
     // create dataspace
-    HDF5Handle 
+    HDF5Handle
     dataspaceHandle = HDF5Handle(H5Screate_simple(shape_inv.size(), shape_inv.data(), NULL),
                                 &H5Sclose, "HDF5File::createDataset(): unable to create dataspace for scalar data.");
 
@@ -2761,30 +2762,30 @@ HDF5File::createDataset(std::string datasetName,
     }
 
     //create the dataset.
-    HDF5HandleShared datasetHandle(H5Dcreate(parent, setname.c_str(), 
-                                             TypeTraits::getH5DataType(), 
+    HDF5HandleShared datasetHandle(H5Dcreate(parent, setname.c_str(),
+                                             TypeTraits::getH5DataType(),
                                              dataspaceHandle, H5P_DEFAULT, plist, H5P_DEFAULT),
-                                   &H5Dclose, 
+                                   &H5Dclose,
                                    "HDF5File::createDataset(): unable to create dataset.");
     if(parent != cGroupHandle_)
         H5Gclose(parent);
-        
+
     return datasetHandle;
 }
 
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-void HDF5File::write_(std::string &datasetName, 
-                      const MultiArrayView<N, T, Stride> & array, 
-                      const hid_t datatype, 
-                      const int numBandsOfType, 
-                      typename MultiArrayShape<N>::type &chunkSize, 
+void HDF5File::write_(std::string &datasetName,
+                      const MultiArrayView<N, T, Stride> & array,
+                      const hid_t datatype,
+                      const int numBandsOfType,
+                      typename MultiArrayShape<N>::type &chunkSize,
                       int compressionParameter)
 {
     vigra_precondition(!isReadOnly(),
         "HDF5File::write(): file is read-only.");
-        
+
     std::string groupname = SplitString(datasetName).first();
     std::string setname = SplitString(datasetName).last();
 
@@ -2795,7 +2796,7 @@ void HDF5File::write_(std::string &datasetName,
     if(numBandsOfType > 1)
         shape.push_back(numBandsOfType);
 
-    HDF5Handle dataspace(H5Screate_simple(shape.size(), shape.begin(), NULL), &H5Sclose, 
+    HDF5Handle dataspace(H5Screate_simple(shape.size(), shape.begin(), NULL), &H5Sclose,
                          "HDF5File::write(): Can not create dataspace.");
 
     // create and open group:
@@ -2806,7 +2807,7 @@ void HDF5File::write_(std::string &datasetName,
     deleteDataset_(groupHandle, setname.c_str());
 
     // set up properties list
-    HDF5Handle plist(H5Pcreate(H5P_DATASET_CREATE), &H5Pclose, 
+    HDF5Handle plist(H5Pcreate(H5P_DATASET_CREATE), &H5Pclose,
                      "HDF5File::write(): unable to create property list." );
 
     // turn off time tagging of datasets by default.
@@ -2827,7 +2828,7 @@ void HDF5File::write_(std::string &datasetName,
     }
 
     // create dataset
-    HDF5Handle datasetHandle(H5Dcreate(groupHandle, setname.c_str(), datatype, dataspace,H5P_DEFAULT, plist, H5P_DEFAULT), 
+    HDF5Handle datasetHandle(H5Dcreate(groupHandle, setname.c_str(), datatype, dataspace,H5P_DEFAULT, plist, H5P_DEFAULT),
                              &H5Dclose, "HDF5File::write(): Can not create dataset.");
 
     herr_t status = 0;
@@ -2843,7 +2844,7 @@ void HDF5File::write_(std::string &datasetName,
         //        incomplete code for better solutions is below
         // MultiArray<N, T> buffer(array);
         // status = H5Dwrite(datasetHandle, datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer.data());
-        
+
         int offset = numBandsOfType > 1 ? 1 : 0;
         std::reverse(shape.begin(), shape.end());
         if(chunks.size() > 0)
@@ -2869,9 +2870,9 @@ void HDF5File::write_(std::string &datasetName,
         ArrayVector<hsize_t> null(shape.size(), 0),
                              start(shape.size(), 0),
                              count(shape.size(), 1);
-        
+
         count[N-1-offset] = numBandsOfType;
-        
+
         typedef typename MultiArrayShape<N>::type Shape;
         Shape chunkCount, chunkMaxShape;
         for(unsigned int k=offset; k<chunks.size(); ++k)
@@ -2879,7 +2880,7 @@ void HDF5File::write_(std::string &datasetName,
             chunkMaxShape[k-offset] = chunks[k];
             chunkCount[k-offset] = static_cast<MultiArrayIndex>(std::ceil(double(shape[k]) / chunks[k]));
         }
-        
+
         typename CoupledIteratorType<N>::type chunkIter = createCoupledIterator(chunkCount),
                                               chunkEnd  = chunkIter.getEndIterator();
         for(; chunkIter != chunkEnd; ++chunkIter)
@@ -2887,7 +2888,7 @@ void HDF5File::write_(std::string &datasetName,
             Shape chunkStart(chunkIter.point() * chunkMaxShape),
                   chunkStop(min(chunkStart + chunkMaxShape, array.shape()));
             MultiArray<N, T> buffer(array.subarray(chunkStart, chunkStop));
-            
+
             for(unsigned int k=0; k<N; ++k)
             {
                 start[N-1-k] = chunkStart[k];
@@ -2903,13 +2904,13 @@ void HDF5File::write_(std::string &datasetName,
             status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start.data(), NULL, count.data(), NULL);
             if(status < 0)
                 break;
-                
+
             HDF5Handle dataspace2(H5Screate_simple(count.size(), count.data(), NULL),
-                                 &H5Sclose, "HDF5File::write(): unable to create hyperslabs."); 
+                                 &H5Sclose, "HDF5File::write(): unable to create hyperslabs.");
             status = H5Sselect_hyperslab(dataspace2, H5S_SELECT_SET, null.data(), NULL, count.data(), NULL);
             if(status < 0)
                 break;
-                
+
             status = H5Dwrite(datasetHandle, datatype, dataspace2, filespace, H5P_DEFAULT, buffer.data());
             if(status < 0)
                 break;
@@ -2922,15 +2923,15 @@ void HDF5File::write_(std::string &datasetName,
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-herr_t HDF5File::writeBlock_(HDF5HandleShared datasetHandle, 
-                             typename MultiArrayShape<N>::type &blockOffset, 
-                             const MultiArrayView<N, T, Stride> & array, 
-                             const hid_t datatype, 
+herr_t HDF5File::writeBlock_(HDF5HandleShared datasetHandle,
+                             typename MultiArrayShape<N>::type &blockOffset,
+                             const MultiArrayView<N, T, Stride> & array,
+                             const hid_t datatype,
                              const int numBandsOfType)
 {
     vigra_precondition(!isReadOnly(),
         "HDF5File::writeBlock(): file is read-only.");
-        
+
     ArrayVector<hsize_t> boffset, bshape, bones(N+1, 1);
     hssize_t dimensions = getDatasetDimensions_(datasetHandle);
     if(numBandsOfType > 1)
@@ -2964,7 +2965,7 @@ herr_t HDF5File::writeBlock_(HDF5HandleShared datasetHandle,
 
     // get file dataspace and select the desired block
     HDF5Handle dataspaceHandle (H5Dget_space(datasetHandle),&H5Sclose,"Unable to create target dataspace");
-    H5Sselect_hyperslab(dataspaceHandle, H5S_SELECT_SET, 
+    H5Sselect_hyperslab(dataspaceHandle, H5S_SELECT_SET,
                         boffset.data(), bones.data(), bones.data(), bshape.data());
 
     herr_t status = 0;
@@ -2985,15 +2986,15 @@ herr_t HDF5File::writeBlock_(HDF5HandleShared datasetHandle,
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-void HDF5File::write_attribute_(std::string name, 
+void HDF5File::write_attribute_(std::string name,
                                 const std::string & attribute_name,
                                 const MultiArrayView<N, T, Stride> & array,
-                                const hid_t datatype, 
+                                const hid_t datatype,
                                 const int numBandsOfType)
 {
     vigra_precondition(!isReadOnly(),
         "HDF5File::writeAttribute(): file is read-only.");
-        
+
     // shape of the array. Add one dimension, if array contains non-scalars.
     ArrayVector<hsize_t> shape(array.shape().begin(), array.shape().end());
     std::reverse(shape.begin(), shape.end());
@@ -3051,12 +3052,12 @@ void HDF5File::write_attribute_(std::string name,
     vigra_postcondition(status >= 0,
         "HDF5File::writeAttribute(): write to attribute '" + attribute_name + "' via H5Awrite() failed.");
 }
-    
+
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-void HDF5File::read_(std::string datasetName, 
-                     MultiArrayView<N, T, Stride> array, 
+void HDF5File::read_(std::string datasetName,
+                     MultiArrayView<N, T, Stride> array,
                      const hid_t datatype, const int numBandsOfType)
 {
     //Prepare to read without using HDF5ImportInfo
@@ -3065,13 +3066,13 @@ void HDF5File::read_(std::string datasetName,
     std::string errorMessage ("HDF5File::read(): Unable to open dataset '" + datasetName + "'.");
     HDF5Handle datasetHandle(getDatasetHandle_(datasetName), &H5Dclose, errorMessage.c_str());
 
-    // the object in the HDF5 file may have one additional dimension which we 
+    // the object in the HDF5 file may have one additional dimension which we
     // interprete as the pixel type's bands
     int offset = (numBandsOfType > 1)
                     ? 1
                     : 0;
 
-    vigra_precondition(MultiArrayIndex(N + offset) == MultiArrayIndex(dimshape.size()), 
+    vigra_precondition(MultiArrayIndex(N + offset) == MultiArrayIndex(dimshape.size()),
         "HDF5File::read(): Array dimension disagrees with dataset dimension.");
 
     typename MultiArrayShape<N>::type shape;
@@ -3120,9 +3121,9 @@ void HDF5File::read_(std::string datasetName,
                     break;
             }
         }
-        
+
         count[N-1-offset] = static_cast<hsize_t>(numBandsOfType);
-        
+
         typedef typename MultiArrayShape<N>::type Shape;
         Shape chunkCount, chunkMaxShape;
         for(unsigned int k=offset; k<chunks.size(); ++k)
@@ -3130,7 +3131,7 @@ void HDF5File::read_(std::string datasetName,
             chunkMaxShape[k-offset] = chunks[k];
             chunkCount[k-offset] = (MultiArrayIndex)std::ceil(double(dimshape[k]) / chunks[k]);
         }
-        
+
         typename CoupledIteratorType<N>::type chunkIter = createCoupledIterator(chunkCount),
                                               chunkEnd  = chunkIter.getEndIterator();
         for(; chunkIter != chunkEnd; ++chunkIter)
@@ -3138,7 +3139,7 @@ void HDF5File::read_(std::string datasetName,
             Shape chunkStart(chunkIter.point() * chunkMaxShape),
                   chunkStop(min(chunkStart + chunkMaxShape, array.shape()));
             MultiArray<N, T> buffer(chunkStop - chunkStart);
-            
+
             for(unsigned int k=0; k<N; ++k)
             {
                 start[N-1-k] = chunkStart[k];
@@ -3154,17 +3155,17 @@ void HDF5File::read_(std::string datasetName,
             status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start.data(), NULL, count.data(), NULL);
             if(status < 0)
                 break;
-                
+
             HDF5Handle dataspace(H5Screate_simple(count.size(), count.data(), NULL),
-                                 &H5Sclose, "HDF5File::read(): unable to create hyperslabs."); 
+                                 &H5Sclose, "HDF5File::read(): unable to create hyperslabs.");
             status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, null.data(), NULL, count.data(), NULL);
             if(status < 0)
                 break;
-                
+
             status = H5Dread(datasetHandle, datatype, dataspace, filespace, H5P_DEFAULT, buffer.data());
             if(status < 0)
                 break;
-                
+
             array.subarray(chunkStart, chunkStop) = buffer;
         }
     }
@@ -3175,10 +3176,10 @@ void HDF5File::read_(std::string datasetName,
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-herr_t HDF5File::readBlock_(HDF5HandleShared datasetHandle, 
-                            typename MultiArrayShape<N>::type &blockOffset, 
-                            typename MultiArrayShape<N>::type &blockShape, 
-                            MultiArrayView<N, T, Stride> array, 
+herr_t HDF5File::readBlock_(HDF5HandleShared datasetHandle,
+                            typename MultiArrayShape<N>::type &blockOffset,
+                            typename MultiArrayShape<N>::type &blockShape,
+                            MultiArrayView<N, T, Stride> array,
                             const hid_t datatype, const int numBandsOfType)
 {
     vigra_precondition(blockShape == array.shape(),
@@ -3216,9 +3217,9 @@ herr_t HDF5File::readBlock_(HDF5HandleShared datasetHandle,
                                "Unable to create target dataspace");
 
     // get file dataspace and select the desired block
-    HDF5Handle dataspaceHandle(H5Dget_space(datasetHandle), &H5Sclose, 
+    HDF5Handle dataspaceHandle(H5Dget_space(datasetHandle), &H5Sclose,
                                "Unable to get dataspace");
-    H5Sselect_hyperslab(dataspaceHandle, H5S_SELECT_SET, 
+    H5Sselect_hyperslab(dataspaceHandle, H5S_SELECT_SET,
                         boffset.data(), bones.data(), bones.data(), bshape.data());
 
     herr_t status = 0;
@@ -3242,9 +3243,9 @@ herr_t HDF5File::readBlock_(HDF5HandleShared datasetHandle,
 /********************************************************************/
 
 template<unsigned int N, class T, class Stride>
-void HDF5File::read_attribute_(std::string datasetName, 
-                               std::string attributeName, 
-                               MultiArrayView<N, T, Stride> array, 
+void HDF5File::read_attribute_(std::string datasetName,
+                               std::string attributeName,
+                               MultiArrayView<N, T, Stride> array,
                                const hid_t datatype, const int numBandsOfType)
 {
     std::string dataset_path = get_absolute_path(datasetName);
@@ -3264,7 +3265,7 @@ void HDF5File::read_attribute_(std::string datasetName,
         H5Sget_simple_extent_dims(attr_dataspace_handle, dimshape.data(), NULL);
     else
         dimshape[0] = 1;
-    
+
     // invert the dimensions to guarantee VIGRA-compatible order
     std::reverse(dimshape.begin(), dimshape.end());
 
@@ -3303,36 +3304,36 @@ void HDF5File::read_attribute_(std::string datasetName,
 
 /** \brief Read the data specified by the given \ref vigra::HDF5ImportInfo object
                 and write the into the given 'array'.
-                
-    The array must have the correct number of dimensions and shape for the dataset 
+
+    The array must have the correct number of dimensions and shape for the dataset
     represented by 'info'. When the element type of 'array' differs from the stored element
     type, HDF5 will convert the type on the fly (except when the HDF5 version is 1.6 or below,
     in which case an error will result). Multi-channel element types (i.e. \ref vigra::RGBValue,
     \ref vigra::TinyVector, and \ref vigra::FFTWComplex) are recognized and handled correctly.
-    
+
     <b> Declaration:</b>
-    
+
     \code
     namespace vigra {
         template<unsigned int N, class T, class StrideTag>
-        void 
+        void
         readHDF5(const HDF5ImportInfo &info, MultiArrayView<N, T, StrideTag> array);
     }
     \endcode
-    
+
     <b> Usage:</b>
-    
+
     <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
     Namespace: vigra
-    
+
     \code
-    
+
     HDF5ImportInfo info(filename, dataset_name);
     vigra_precondition(info.numDimensions() == 3, "Dataset must be 3-dimensional.");
-    
+
     MultiArrayShape<3>::type shape(info.shape().begin());
     MultiArray<3, int> array(shape);
-    
+
     readHDF5(info, array);
     \endcode
 */
@@ -3355,7 +3356,7 @@ void readHDF5(const HDF5ImportInfo &info, MultiArrayView<N, T, StrideTag> array,
 inline hid_t openGroup(hid_t parent, std::string group_name)
 {
     //std::cout << group_name << std::endl;
-    size_t last_slash = group_name.find_last_of('/'); 
+    size_t last_slash = group_name.find_last_of('/');
     if (last_slash == std::string::npos || last_slash != group_name.size() - 1)
         group_name = group_name + '/';
     std::string::size_type begin = 0, end = group_name.find('/');
@@ -3363,16 +3364,16 @@ inline hid_t openGroup(hid_t parent, std::string group_name)
     while (end != std::string::npos)
     {
         std::string group(group_name.begin()+begin, group_name.begin()+end);
-        hid_t prev_parent = parent; 
+        hid_t prev_parent = parent;
         parent = H5Gopen(prev_parent, group.c_str(), H5P_DEFAULT);
 
         if(ii != 0)     H5Gclose(prev_parent);
         if(parent < 0)  return parent;
-        ++ii; 
+        ++ii;
         begin = end + 1;
         end = group_name.find('/', begin);
     }
-    return parent; 
+    return parent;
 }
 
 inline hid_t createGroup(hid_t parent, std::string group_name)
@@ -3381,14 +3382,14 @@ inline hid_t createGroup(hid_t parent, std::string group_name)
         group_name = group_name + '/';
     if(group_name == "/")
         return H5Gopen(parent, group_name.c_str(), H5P_DEFAULT);
-    
+
     std::string::size_type begin = 0, end = group_name.find('/');
     int ii =  0;
     while (end != std::string::npos)
     {
         std::string group(group_name.begin()+begin, group_name.begin()+end);
-        hid_t prev_parent = parent; 
-        
+        hid_t prev_parent = parent;
+
         if(H5LTfind_dataset(parent, group.c_str()) == 0)
         {
             parent = H5Gcreate(prev_parent, group.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -3398,11 +3399,11 @@ inline hid_t createGroup(hid_t parent, std::string group_name)
 
         if(ii != 0)     H5Gclose(prev_parent);
         if(parent < 0)  return parent;
-        ++ii; 
+        ++ii;
         begin = end + 1;
         end = group_name.find('/', begin);
     }
-    return parent; 
+    return parent;
 }
 
 inline void deleteDataset(hid_t parent, std::string dataset_name)
@@ -3422,18 +3423,18 @@ inline void deleteDataset(hid_t parent, std::string dataset_name)
             vigra_postcondition(false, "createDataset(): Unable to delete existing data.");
         }
 #endif
-    } 
+    }
 }
 
 inline hid_t createFile(std::string filePath, bool append_ = true)
 {
     FILE * pFile;
     pFile = fopen ( filePath.c_str(), "r" );
-    hid_t file_id; 
+    hid_t file_id;
     if ( pFile == NULL )
     {
         file_id = H5Fcreate(filePath.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-    } 
+    }
     else if(append_)
     {
         fclose( pFile );
@@ -3445,7 +3446,7 @@ inline hid_t createFile(std::string filePath, bool append_ = true)
         std::remove(filePath.c_str());
         file_id = H5Fcreate(filePath.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
     }
-    return file_id; 
+    return file_id;
 }
 
 template<unsigned int N, class T, class Tag>
@@ -3453,9 +3454,9 @@ void createDataset(const char* filePath, const char* pathInFile, const MultiArra
 {
     std::string path_name(pathInFile), group_name, data_set_name, message;
     std::string::size_type delimiter = path_name.rfind('/');
-    
+
     //create or open file
-    file_handle = HDF5Handle(createFile(filePath), &H5Fclose, 
+    file_handle = HDF5Handle(createFile(filePath), &H5Fclose,
                        "createDataset(): unable to open output file.");
 
     // get the groupname and the filename
@@ -3471,7 +3472,7 @@ void createDataset(const char* filePath, const char* pathInFile, const MultiArra
     }
 
     // create all groups
-    HDF5Handle group(createGroup(file_handle, group_name), &H5Gclose, 
+    HDF5Handle group(createGroup(file_handle, group_name), &H5Gclose,
                      "createDataset(): Unable to create and open group. generic v");
 
     // delete the dataset if it already exists
@@ -3503,11 +3504,11 @@ void createDataset(const char* filePath, const char* pathInFile, const MultiArra
                                     &H5Sclose, "createDataset(): unable to create dataspace for scalar data.");
     }
 
-    //alloc memory for dataset. 
-    dataset_handle = HDF5Handle(H5Dcreate(group, 
-                                        data_set_name.c_str(), 
-                                        datatype, 
-                                        dataspace_handle, 
+    //alloc memory for dataset.
+    dataset_handle = HDF5Handle(H5Dcreate(group,
+                                        data_set_name.c_str(),
+                                        datatype,
+                                        dataspace_handle,
                                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT),
                               &H5Dclose, "createDataset(): unable to create dataset.");
 }
@@ -3516,37 +3517,37 @@ void createDataset(const char* filePath, const char* pathInFile, const MultiArra
 
 
 /** \brief Store array data in an HDF5 file.
-                
-    The number of dimensions, shape and element type of the stored dataset is automatically 
+
+    The number of dimensions, shape and element type of the stored dataset is automatically
     determined from the properties of the given \a array. Strided arrays are stored in an
-    unstrided way, i.e. in contiguous scan-order. Multi-channel element types 
+    unstrided way, i.e. in contiguous scan-order. Multi-channel element types
     (i.e. \ref vigra::RGBValue, \ref vigra::TinyVector and \ref vigra::FFTWComplex)
     are recognized and handled correctly
     (in particular, the will form the innermost dimension of the stored dataset).
-    \a pathInFile may contain '/'-separated group names, but must end with the name 
+    \a pathInFile may contain '/'-separated group names, but must end with the name
     of the dataset to be created.
-    
+
     <b> Declaration:</b>
-    
+
     \code
     namespace vigra {
         template<unsigned int N, class T, class StrideTag>
-        void 
-        writeHDF5(const char* filePath, const char* pathInFile, 
+        void
+        writeHDF5(const char* filePath, const char* pathInFile,
                   MultiArrayView<N, T, StrideTag>const  & array);
     }
     \endcode
-    
+
     <b> Usage:</b>
-    
+
     <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
     Namespace: vigra
-    
+
     \code
     MultiArrayShape<3>::type shape(100, 200, 20);
     MultiArray<3, int> array(shape);
     ... // fill array with data
-    
+
     writeHDF5("mydata.h5", "/group1/my_dataset", array);
     \endcode
 */
@@ -3580,7 +3581,7 @@ struct MaxSizeFnc
 
     void operator()(std::string const & in)
     {
-        size = in.size() > size ? 
+        size = in.size() > size ?
                     in.size() :
                     size;
     }
@@ -3589,30 +3590,30 @@ struct MaxSizeFnc
 
 
 #if (H5_VERS_MAJOR == 1 && H5_VERS_MINOR == 8) || DOXYGEN
-/** Write a numeric MultiArray as an attribute with name \a name 
-    of the dataset specified by the handle \a loc. 
+/** Write a numeric MultiArray as an attribute with name \a name
+    of the dataset specified by the handle \a loc.
 
     <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
     Namespace: vigra
 */
 template<size_t N, class T, class C>
-void writeHDF5Attr(hid_t loc, 
-                   const char* name, 
+void writeHDF5Attr(hid_t loc,
+                   const char* name,
                    MultiArrayView<N, T, C> const & array)
 {
     if(H5Aexists(loc, name) > 0)
         H5Adelete(loc, name);
-    
-    ArrayVector<hsize_t> shape(array.shape().begin(), 
+
+    ArrayVector<hsize_t> shape(array.shape().begin(),
                                array.shape().end());
-    HDF5Handle 
+    HDF5Handle
         dataspace_handle(H5Screate_simple(N, shape.data(), NULL),
-                         &H5Sclose, 
+                         &H5Sclose,
                          "writeToHDF5File(): unable to create dataspace.");
-    
-    HDF5Handle attr(H5Acreate(loc, 
-                              name, 
-                              detail::getH5DataType<T>(), 
+
+    HDF5Handle attr(H5Acreate(loc,
+                              name,
+                              detail::getH5DataType<T>(),
                               dataspace_handle,
                               H5P_DEFAULT ,H5P_DEFAULT ),
                     &H5Aclose,
@@ -3625,43 +3626,43 @@ void writeHDF5Attr(hid_t loc,
     H5Awrite(attr, detail::getH5DataType<T>(), buffer.data());
 }
 
-/** Write a string MultiArray as an attribute with name \a name 
-    of the dataset specified by the handle \a loc. 
+/** Write a string MultiArray as an attribute with name \a name
+    of the dataset specified by the handle \a loc.
 
     <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
     Namespace: vigra
 */
 template<size_t N, class C>
-void writeHDF5Attr(hid_t loc, 
-                   const char* name, 
+void writeHDF5Attr(hid_t loc,
+                   const char* name,
                    MultiArrayView<N, std::string, C> const & array)
 {
     if(H5Aexists(loc, name) > 0)
         H5Adelete(loc, name);
-    
-    ArrayVector<hsize_t> shape(array.shape().begin(), 
+
+    ArrayVector<hsize_t> shape(array.shape().begin(),
                                array.shape().end());
-    HDF5Handle 
+    HDF5Handle
         dataspace_handle(H5Screate_simple(N, shape.data(), NULL),
-                         &H5Sclose, 
+                         &H5Sclose,
                          "writeToHDF5File(): unable to create dataspace.");
-    
-    HDF5Handle atype(H5Tcopy (H5T_C_S1), 
-                     &H5Tclose, 
+
+    HDF5Handle atype(H5Tcopy (H5T_C_S1),
+                     &H5Tclose,
                      "writeToHDF5File(): unable to create type.");
 
     detail::MaxSizeFnc max_size;
     max_size = std::for_each(array.data(),array.data()+ array.size(), max_size);
     H5Tset_size (atype, max_size.size);
-    
-    HDF5Handle attr(H5Acreate(loc, 
-                              name, 
-                              atype, 
+
+    HDF5Handle attr(H5Acreate(loc,
+                              name,
+                              atype,
                               dataspace_handle,
                               H5P_DEFAULT ,H5P_DEFAULT ),
                     &H5Aclose,
                     "writeHDF5Attr: unable to create Attribute");
-    
+
     std::string buf ="";
     for(int ii = 0; ii < array.size(); ++ii)
     {
@@ -3671,8 +3672,8 @@ void writeHDF5Attr(hid_t loc,
     H5Awrite(attr, atype, buf.c_str());
 }
 
-/** Write a numeric ArrayVectorView as an attribute with name \a name 
-    of the dataset specified by the handle \a loc. 
+/** Write a numeric ArrayVectorView as an attribute with name \a name
+    of the dataset specified by the handle \a loc.
 
     <b>\#include</b> \<vigra/hdf5impex.hxx\><br>
     Namespace: vigra
@@ -3682,13 +3683,13 @@ inline void writeHDF5Attr(  hid_t loc,
                             const char* name,
                             ArrayVectorView<T>  & array)
 {
-    writeHDF5Attr(loc, name, 
+    writeHDF5Attr(loc, name,
                   MultiArrayView<1, T>(MultiArrayShape<1>::type(array.size()),
                                        array.data()));
 }
 
 /** write an Attribute given a file and a path in the file.
-    the path in the file should have the format 
+    the path in the file should have the format
     [attribute] or /[subgroups/]dataset.attribute or
     /[subgroups/]group.attribute.
     The attribute is written to the root group, a dataset or a subgroup
@@ -3701,9 +3702,9 @@ inline void writeHDF5Attr(  std::string filePath,
 {
     std::string path_name(pathInFile), group_name, data_set_name, message, attr_name;
     std::string::size_type delimiter = path_name.rfind('/');
-    
+
     //create or open file
-    HDF5Handle file_id(createFile(filePath), &H5Fclose, 
+    HDF5Handle file_id(createFile(filePath), &H5Fclose,
                        "writeToHDF5File(): unable to open output file.");
 
     // get the groupname and the filename
@@ -3729,8 +3730,8 @@ inline void writeHDF5Attr(  std::string filePath,
         attr_name = std::string(data_set_name.begin()+delimiter+1, data_set_name.end());
         data_set_name = std::string(data_set_name.begin(), data_set_name.begin()+delimiter);
     }
-    
-    HDF5Handle group(openGroup(file_id, group_name), &H5Gclose, 
+
+    HDF5Handle group(openGroup(file_id, group_name), &H5Gclose,
                      "writeToHDF5File(): Unable to create and open group. attr ver");
 
     if(data_set_name != "/")
diff --git a/include/vigra/multi_blocking.hxx b/include/vigra/multi_blocking.hxx
index 6559ceb..4424d34 100644
--- a/include/vigra/multi_blocking.hxx
+++ b/include/vigra/multi_blocking.hxx
@@ -304,7 +304,7 @@ namespace vigra{
                 }
                 ++i;
             }
-            return std::move(iBlocks);
+            return iBlocks;
         }
 
 
diff --git a/include/vigra/multi_convolution.hxx b/include/vigra/multi_convolution.hxx
index 2e3ad9d..73feee9 100644
--- a/include/vigra/multi_convolution.hxx
+++ b/include/vigra/multi_convolution.hxx
@@ -134,20 +134,23 @@ struct WrapDoubleIteratorTriple
              vigra_precondition(false, function_name + msg);
         }
     }
-    double sigma_scaled(const char *const function_name = "unknown function ") const
+    double sigma_scaled(const char *const function_name = "unknown function ",
+                        bool allow_zero = false) const
     {
         sigma_precondition(sigma_eff(), function_name);
         sigma_precondition(sigma_d(), function_name);
         double sigma_squared = sq(sigma_eff()) - sq(sigma_d());
-        if (sigma_squared > 0.0)
+        if (sigma_squared > 0.0 || (allow_zero && sigma_squared == 0.0))
         {
             return std::sqrt(sigma_squared) / step_size();
         }
         else
         {
-             std::string msg = "(): Scale would be imaginary or zero.";
-             vigra_precondition(false, function_name + msg);
-             return 0;
+            std::string msg = "(): Scale would be imaginary";
+            if(!allow_zero)
+                msg += " or zero";
+            vigra_precondition(false, function_name + msg + ".");
+            return 0;
         }
     }
 };
@@ -1308,7 +1311,8 @@ gaussianSmoothMultiArray( SrcIterator s, SrcShape const & shape, SrcAccessor src
     ArrayVector<Kernel1D<double> > kernels(N);
 
     for (int dim = 0; dim < N; ++dim, ++params)
-        kernels[dim].initGaussian(params.sigma_scaled(function_name), 1.0, opt.window_ratio);
+        kernels[dim].initGaussian(params.sigma_scaled(function_name, true),
+                                  1.0, opt.window_ratio);
 
     separableConvolveMultiArray(s, shape, src, d, dest, kernels.begin(), opt.from_point, opt.to_point);
 }
diff --git a/include/vigra/multi_histogram.hxx b/include/vigra/multi_histogram.hxx
index e8796f9..416d1ff 100644
--- a/include/vigra/multi_histogram.hxx
+++ b/include/vigra/multi_histogram.hxx
@@ -210,6 +210,7 @@ namespace vigra{
         typedef MultiArray<DIM+1, U> OutType;
         typedef typename OutType::difference_type OutCoord;
 
+		// FIXME: crashes on Python3
 
         HistCoord histShape;
         std::copy(image.shape().begin(), image.shape().end(), histShape.begin());
diff --git a/include/vigra/numpy_array.hxx b/include/vigra/numpy_array.hxx
index f94f73d..c82ba3a 100644
--- a/include/vigra/numpy_array.hxx
+++ b/include/vigra/numpy_array.hxx
@@ -38,7 +38,7 @@
 
 #ifndef NPY_NO_DEPRECATED_API
 # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif 
+#endif
 
 #include <Python.h>
 #include <string>
@@ -61,11 +61,11 @@ static inline void import_vigranumpy()
     if(_import_array() < 0)
         pythonToCppException(0);
 
-    // Import vigra to activate the numpy array converters, but ensure that 
+    // Import vigra to activate the numpy array converters, but ensure that
     // cyclic imports (from within vigra itself) are avoided.
-    char const * load_vigra = 
+    char const * load_vigra =
         "import sys\n"
-        "if not sys.modules.has_key('vigra.vigranumpycore'):\n"
+        "if 'vigra.vigranumpycore' not in sys.modules:\n"
         "    import vigra\n";
     pythonToCppException(PyRun_SimpleString(load_vigra) == 0);
 }
@@ -163,7 +163,7 @@ void numpyParseSlicing(Shape const & shape, PyObject * idx, Shape & start, Shape
         start[k] = 0;
         stop[k] = shape[k];
     }
-    
+
     python_ptr index(idx);
     if(!PySequence_Check(index))
     {
@@ -186,9 +186,15 @@ void numpyParseSlicing(Shape const & shape, PyObject * idx, Shape & start, Shape
     for(int k=0; k < N; ++k)
     {
         PyObject * item = PyTuple_GET_ITEM((PyTupleObject *)index.ptr(), kindex);
+#if PY_MAJOR_VERSION < 3
         if(PyInt_Check(item))
         {
             MultiArrayIndex i = PyInt_AsLong(item);
+#else
+        if(PyLong_Check(item))
+        {
+            MultiArrayIndex i = PyLong_AsLong(item);
+#endif
             start[k] = i;
             if(start[k] < 0)
                 start[k] += shape[k];
@@ -198,7 +204,11 @@ void numpyParseSlicing(Shape const & shape, PyObject * idx, Shape & start, Shape
         else if(PySlice_Check(item))
         {
             Py_ssize_t sstart, sstop, step;
+#if PY_MAJOR_VERSION < 3
             if(PySlice_GetIndices((PySliceObject *)item, shape[k], &sstart, &sstop, &step) != 0)
+#else
+            if(PySlice_GetIndices(item, shape[k], &sstart, &sstop, &step) != 0)
+#endif
                 pythonToCppException(0);
             vigra_precondition(step == 1,
                 "numpyParseSlicing(): only unit steps are supported.");
@@ -325,7 +335,7 @@ class NumpyAnyArray
                 "NumpyArray::operator=(): Cannot assign from empty array.");
 
             python_ptr arraytype = getArrayTypeObject();
-            python_ptr f(PyString_FromString("_copyValuesImpl"), python_ptr::keep_count);
+            python_ptr f(pythonFromData("_copyValuesImpl"));
             if(PyObject_HasAttr(arraytype, f))
             {
                 python_ptr res(PyObject_CallMethodObjArgs(arraytype, f.get(),
@@ -470,7 +480,7 @@ class NumpyAnyArray
             return PyArray_DESCR(pyArray())->type_num;
         return -1;
     }
-    
+
         /**
          Constructs a slicing from the given shape objects and calls '__getitem__'.
          */
@@ -481,9 +491,9 @@ class NumpyAnyArray
         unsigned int size = ndim();
         vigra_precondition(start.size() == size && stop.size() == size,
             "NumpyAnyArray::getitem(): shape has wrong dimension.");
-        
+
         difference_type s(this->shape());
-        
+
         python_ptr index(PyTuple_New(size), python_ptr::new_nonzero_reference);
         for(unsigned int k=0; k<size; ++k)
         {
@@ -496,19 +506,18 @@ class NumpyAnyArray
             PyObject * item = 0;
             if(start[k] == stop[k])
             {
-                item = PyInt_FromLong(start[k]);
+                item = pythonFromData(start[k]);
             }
             else
             {
-                python_ptr s0(PyInt_FromLong(start[k]), python_ptr::new_nonzero_reference);
-                python_ptr s1(PyInt_FromLong(stop[k]), python_ptr::new_nonzero_reference);
+                python_ptr s0(pythonFromData(start[k]));
+                python_ptr s1(pythonFromData(stop[k]));
                 item = PySlice_New(s0, s1, 0);
             }
             pythonToCppException(item);
             PyTuple_SET_ITEM((PyTupleObject *)index.ptr(), k, item); // steals reference to item
         }
-        
-        python_ptr func(PyString_FromString("__getitem__"), python_ptr::new_nonzero_reference);
+        python_ptr func(pythonFromData("__getitem__"));
         python_ptr res(PyObject_CallMethodObjArgs(pyObject(), func.ptr(), index.ptr(), NULL),
                        python_ptr::new_nonzero_reference);
         return NumpyAnyArray(res.ptr());
@@ -524,7 +533,7 @@ class NumpyAnyArray
         python_ptr axistags;
         if(pyObject())
         {
-            python_ptr key(PyString_FromString("axistags"), python_ptr::keep_count);
+            python_ptr key(pythonFromData("axistags"));
             axistags.reset(PyObject_GetAttr(pyObject(), key), python_ptr::keep_count);
             if(!axistags)
                 PyErr_Clear();
diff --git a/include/vigra/numpy_array_converters.hxx b/include/vigra/numpy_array_converters.hxx
index 02825b1..c44b8b5 100644
--- a/include/vigra/numpy_array_converters.hxx
+++ b/include/vigra/numpy_array_converters.hxx
@@ -40,7 +40,9 @@
 #include "metaprogramming.hxx"
 #include <boost/python.hpp>
 #include <boost/python/to_python_converter.hpp>
+#include <boost/python/raw_function.hpp>
 #include <set>
+#include <type_traits>
 
 namespace vigra {
 
@@ -169,45 +171,174 @@ FN registerConverters(FN f)
     return f;
 }
 
+namespace detail {
+
+template <class T>
+struct TypeName;
+
+template <class T>
+struct TypeName<Singleband<T>>
+: public TypeName<T>
+{};
+
+template <class T>
+struct TypeName<Multiband<T>>
+: public TypeName<T>
+{};
+
+template <class T, int N>
+struct TypeName<TinyVector<T, N>>
+: public TypeName<T>
+{};
+
+template <>
+struct TypeName<void>
+{
+    static std::string name() {
+        return std::string("void");
+    }
+    static std::string sized_name() {
+        return std::string("void");
+    }
+};
+
+template <>
+struct TypeName<bool>
+{
+    static std::string name() {
+        return std::string("bool");
+    }
+    static std::string sized_name() {
+        return std::string("bool8");
+    }
+};
+
+#define VIGRA_SIGNED_INT_NAME(type) \
+template <> \
+struct TypeName<type> \
+{ \
+    static std::string name() { \
+        return std::string(#type); \
+    } \
+    static std::string sized_name() { \
+        return std::string("int") + std::to_string(sizeof(type)*8); \
+    } \
+};
+
+VIGRA_SIGNED_INT_NAME(signed char)
+VIGRA_SIGNED_INT_NAME(short)
+VIGRA_SIGNED_INT_NAME(int)
+VIGRA_SIGNED_INT_NAME(long)
+VIGRA_SIGNED_INT_NAME(long long)
+
+#define VIGRA_UNSIGNED_INT_NAME(type) \
+template <> \
+struct TypeName<type> \
+{ \
+    static std::string name() { \
+        return std::string(#type); \
+    } \
+    static std::string sized_name() { \
+        return std::string("uint") + std::to_string(sizeof(type)*8); \
+    } \
+};
+
+VIGRA_UNSIGNED_INT_NAME(unsigned char)
+VIGRA_UNSIGNED_INT_NAME(unsigned short)
+VIGRA_UNSIGNED_INT_NAME(unsigned int)
+VIGRA_UNSIGNED_INT_NAME(unsigned long)
+VIGRA_UNSIGNED_INT_NAME(unsigned long long)
+
+#define VIGRA_FLOAT_NAME(type) \
+template <> \
+struct TypeName<type> \
+{ \
+    static std::string name() { \
+        return std::string(#type); \
+    } \
+    static std::string sized_name() { \
+        return std::string("float") + std::to_string(sizeof(type)*8); \
+    } \
+};
+
+VIGRA_FLOAT_NAME(float)
+VIGRA_FLOAT_NAME(double)
+VIGRA_FLOAT_NAME(long double)
+
+#undef VIGRA_SIGNED_INT_NAME
+#undef VIGRA_UNSIGNED_INT_NAME
+#undef VIGRA_FLOAT_NAME
+
+template <class T = void>
+struct ExportDoc
+{
+    static char const * exec(char const *) { return 0; }
+};
+
+template <>
+struct ExportDoc<void>
+{
+    static char const * exec(char const * h) { return h; }
+};
+
+} // namespace detail
 
 } // namespace vigra
 
 namespace boost { namespace python {
 
+// Note: Due to a bug in boost::python::docstring_options,
+//       the documentation must always be associated with the
+//       *last* overload, making the functors defined below a
+//       bit more complicated.
+
 #define VIGRA_PYTHON_MULTITYPE_FUNCTOR(functor_name, function) \
 template <class T> \
 struct functor_name##Impl \
 { \
-    typedef functor_name##Impl type; \
-     \
     static void def(const char * pythonName) \
     { \
+        boost::python::docstring_options doc(false); \
         boost::python::def(pythonName, vigra::registerConverters(&function<T>)); \
     } \
-     \
-    template <class A1> \
-    static void def(const char * pythonName, A1 const & a1) \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T>), a1); \
+        boost::python::docstring_options doc(false); \
+        boost::python::def(pythonName, vigra::registerConverters(&function<T>), args); \
     } \
-     \
-    template <class A1, class A2> \
-    static void def(const char * pythonName, A1 const & a1, A2 const & a2) \
+    \
+    static void def(const char * pythonName, char const * help) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T>), a1, a2); \
+        if(help) \
+            boost::python::def(pythonName, \
+                         vigra::registerConverters(&function<T>), help); \
+        else \
+            def(pythonName); \
     } \
-     \
-    template <class A1, class A2, class A3> \
-    static void def(const char * pythonName, A1 const & a1, A2 const & a2, A3 const & a3) \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args, char const * help) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T>), a1, a2, a3); \
+        if(help) \
+            boost::python::def(pythonName, \
+                         vigra::registerConverters(&function<T>), args, help); \
+        else \
+            def(pythonName, args); \
     } \
 }; \
  \
 template <> \
 struct functor_name##Impl<void> \
 { \
-    typedef void type; \
+    static void def(const char *) {} \
+     \
+    template <class A1> \
+    static void def(const char *, A1 const &) {} \
+     \
+    template <class A1, class A2> \
+    static void def(const char *, A1 const &, A2 const &) {} \
 }; \
  \
 template <class T1, \
@@ -223,58 +354,217 @@ template <class T1, \
           class T11 = void, \
           class T12 = void> \
 struct functor_name \
-: public boost::python::TypeList<typename functor_name##Impl<T1>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T2>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T3>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T4>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T5>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T6>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T7>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T8>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T9>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T10>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T11>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T12>::type, \
-         boost::python::TypeList<void, void> > > > > > > > > > > > > \
-{};
+: public boost::python::PythonMultidefFunctor \
+{ \
+    bool install_fallback_, show_python_signature_; \
+    \
+    functor_name() \
+    : install_fallback_(false) \
+    , show_python_signature_(true) \
+    {} \
+    \
+    functor_name & installFallback() \
+    { \
+        install_fallback_ = true; \
+        return *this; \
+    } \
+    \
+    functor_name & noPythonSignature() \
+    { \
+        show_python_signature_ = false; \
+        return *this; \
+    } \
+    \
+    typedef boost::python::ArgumentMismatchMessage\
+        <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Message; \
+    typedef functor_name##Impl<T1 > F1; \
+    typedef functor_name##Impl<T2 > F2; \
+    typedef functor_name##Impl<T3 > F3; \
+    typedef functor_name##Impl<T4 > F4; \
+    typedef functor_name##Impl<T5 > F5; \
+    typedef functor_name##Impl<T6 > F6; \
+    typedef functor_name##Impl<T7 > F7; \
+    typedef functor_name##Impl<T8 > F8; \
+    typedef functor_name##Impl<T9 > F9; \
+    typedef functor_name##Impl<T10> F10; \
+    typedef functor_name##Impl<T11> F11; \
+    typedef functor_name##Impl<T12> F12; \
+    \
+    void def(const char * pythonName) const \
+    { \
+        boost::python::docstring_options doc(false, false, false); \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        F1 ::def(pythonName); \
+        F2 ::def(pythonName); \
+        F3 ::def(pythonName); \
+        F4 ::def(pythonName); \
+        F5 ::def(pythonName); \
+        F6 ::def(pythonName); \
+        F7 ::def(pythonName); \
+        F8 ::def(pythonName); \
+        F9 ::def(pythonName); \
+        F10::def(pythonName); \
+        F11::def(pythonName); \
+        F12::def(pythonName); \
+    } \
+    \
+    template <class Args> \
+    void def(const char * pythonName, Args const & args) const \
+    { \
+        boost::python::docstring_options doc(false, false, false); \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        F1 ::def(pythonName, args); \
+        F2 ::def(pythonName, args); \
+        F3 ::def(pythonName, args); \
+        F4 ::def(pythonName, args); \
+        F5 ::def(pythonName, args); \
+        F6 ::def(pythonName, args); \
+        F7 ::def(pythonName, args); \
+        F8 ::def(pythonName, args); \
+        F9 ::def(pythonName, args); \
+        F10::def(pythonName, args); \
+        F11::def(pythonName, args); \
+        F12::def(pythonName, args); \
+    } \
+    \
+    void def(const char * pythonName, const char * help) const \
+    { \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        boost::python::docstring_options doc(true, show_python_signature_, false); \
+        F1 ::def(pythonName, detail::ExportDoc<T2 >::exec(help)); \
+        F2 ::def(pythonName, detail::ExportDoc<T3 >::exec(help)); \
+        F3 ::def(pythonName, detail::ExportDoc<T4 >::exec(help)); \
+        F4 ::def(pythonName, detail::ExportDoc<T5 >::exec(help)); \
+        F5 ::def(pythonName, detail::ExportDoc<T6 >::exec(help)); \
+        F6 ::def(pythonName, detail::ExportDoc<T7 >::exec(help)); \
+        F7 ::def(pythonName, detail::ExportDoc<T8 >::exec(help)); \
+        F8 ::def(pythonName, detail::ExportDoc<T9 >::exec(help)); \
+        F9 ::def(pythonName, detail::ExportDoc<T10>::exec(help)); \
+        F10::def(pythonName, detail::ExportDoc<T11>::exec(help)); \
+        F11::def(pythonName, detail::ExportDoc<T12>::exec(help)); \
+        F12::def(pythonName, detail::ExportDoc<   >::exec(help)); \
+    } \
+    \
+    template <class Args> \
+    void def(const char * pythonName, Args const & args, char const * help) const \
+    { \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        boost::python::docstring_options doc(true, show_python_signature_, false); \
+        F1 ::def(pythonName, args, detail::ExportDoc<T2 >::exec(help)); \
+        F2 ::def(pythonName, args, detail::ExportDoc<T3 >::exec(help)); \
+        F3 ::def(pythonName, args, detail::ExportDoc<T4 >::exec(help)); \
+        F4 ::def(pythonName, args, detail::ExportDoc<T5 >::exec(help)); \
+        F5 ::def(pythonName, args, detail::ExportDoc<T6 >::exec(help)); \
+        F6 ::def(pythonName, args, detail::ExportDoc<T7 >::exec(help)); \
+        F7 ::def(pythonName, args, detail::ExportDoc<T8 >::exec(help)); \
+        F8 ::def(pythonName, args, detail::ExportDoc<T9 >::exec(help)); \
+        F9 ::def(pythonName, args, detail::ExportDoc<T10>::exec(help)); \
+        F10::def(pythonName, args, detail::ExportDoc<T11>::exec(help)); \
+        F11::def(pythonName, args, detail::ExportDoc<T12>::exec(help)); \
+        F12::def(pythonName, args, detail::ExportDoc<   >::exec(help)); \
+    } \
+};
 
 #define VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(functor_name, function) \
-template <class T, int N> \
+template <class T, int FROM, int TO> \
 struct functor_name##Impl \
 { \
     typedef functor_name##Impl type; \
      \
     static void def(const char * pythonName) \
     { \
+        functor_name##Impl<T, FROM, FROM>::def(pythonName); \
+        functor_name##Impl<T, FROM+1, TO>::def(pythonName); \
+    } \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args) \
+    { \
+        functor_name##Impl<T, FROM, FROM>::def(pythonName, args); \
+        functor_name##Impl<T, FROM+1, TO>::def(pythonName, args); \
+    } \
+    \
+    static void def(const char * pythonName, char const * help) \
+    { \
+        functor_name##Impl<T, FROM, FROM>::def(pythonName); \
+        functor_name##Impl<T, FROM+1, TO>::def(pythonName, help); \
+    } \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args, char const * help) \
+    { \
+        functor_name##Impl<T, FROM, FROM>::def(pythonName, args); \
+        functor_name##Impl<T, FROM+1, TO>::def(pythonName, args, help); \
+    } \
+}; \
+\
+template <class T, int N> \
+struct functor_name##Impl<T, N, N> \
+{ \
+    typedef functor_name##Impl type; \
+    \
+    static void def(const char * pythonName) \
+    { \
+        boost::python::docstring_options doc(false); \
         boost::python::def(pythonName, vigra::registerConverters(&function<T, N>)); \
     } \
-     \
-    template <class A1> \
-    static void def(const char * pythonName, A1 const & a1) \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T, N>), a1); \
+        boost::python::docstring_options doc(false); \
+        boost::python::def(pythonName, vigra::registerConverters(&function<T, N>), args); \
     } \
-     \
-    template <class A1, class A2> \
-    static void def(const char * pythonName, A1 const & a1, A2 const & a2) \
+    \
+    static void def(const char * pythonName, char const * help) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T, N>), a1, a2); \
+        if(help) \
+            boost::python::def(pythonName, \
+                         vigra::registerConverters(&function<T, N>), help); \
+        else \
+            def(pythonName); \
     } \
-     \
-    template <class A1, class A2, class A3> \
-    static void def(const char * pythonName, A1 const & a1, A2 const & a2, A3 const & a3) \
+    \
+    template <class Args> \
+    static void def(const char * pythonName, Args const & args, char const * help) \
     { \
-        boost::python::def(pythonName, vigra::registerConverters(&function<T, N>), a1, a2, a3); \
+        if(help) \
+            boost::python::def(pythonName, \
+                         vigra::registerConverters(&function<T, N>), args, help); \
+        else \
+            def(pythonName, args); \
     } \
 }; \
- \
+\
+template <int FROM, int TO> \
+struct functor_name##Impl<void, FROM, TO> \
+{ \
+    static void def(const char *) {} \
+    \
+    template <class A1> \
+    static void def(const char *, A1 const &) {} \
+    \
+    template <class A1, class A2> \
+    static void def(const char *, A1 const &, A2 const &) {} \
+}; \
+\
 template <int N> \
-struct functor_name##Impl<void, N> \
+struct functor_name##Impl<void, N, N> \
 { \
-    typedef void type; \
+    static void def(const char *) {} \
+    \
+    template <class A1> \
+    static void def(const char *, A1 const &) {} \
+    \
+    template <class A1, class A2> \
+    static void def(const char *, A1 const &, A2 const &) {} \
 }; \
- \
-template <int N, \
+\
+template <int FROM, int TO, \
           class T1, \
           class T2 = void, \
           class T3 = void, \
@@ -288,80 +578,267 @@ template <int N, \
           class T11 = void, \
           class T12 = void> \
 struct functor_name \
-: public boost::python::TypeList<typename functor_name##Impl<T1, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T2, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T3, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T4, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T5, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T6, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T7, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T8, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T9, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T10, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T11, N>::type, \
-         boost::python::TypeList<typename functor_name##Impl<T12, N>::type, \
-         boost::python::TypeList<void, void> > > > > > > > > > > > > \
-{};
+: public boost::python::PythonMultidefFunctor \
+{ \
+    bool install_fallback_, show_python_signature_; \
+    \
+    functor_name() \
+    : install_fallback_(false) \
+    , show_python_signature_(true) \
+    { \
+        static_assert(FROM <= TO, #functor_name ": dimension range empty (FROM > TO)"); \
+    } \
+    \
+    functor_name & installFallback() \
+    { \
+        install_fallback_ = true; \
+        return *this; \
+    } \
+    \
+    functor_name & noPythonSignature() \
+    { \
+        show_python_signature_ = false; \
+        return *this; \
+    } \
+    \
+    typedef boost::python::ArgumentMismatchMessage\
+        <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Message; \
+    typedef functor_name##Impl<T1 , FROM, TO> F1; \
+    typedef functor_name##Impl<T2 , FROM, TO> F2; \
+    typedef functor_name##Impl<T3 , FROM, TO> F3; \
+    typedef functor_name##Impl<T4 , FROM, TO> F4; \
+    typedef functor_name##Impl<T5 , FROM, TO> F5; \
+    typedef functor_name##Impl<T6 , FROM, TO> F6; \
+    typedef functor_name##Impl<T7 , FROM, TO> F7; \
+    typedef functor_name##Impl<T8 , FROM, TO> F8; \
+    typedef functor_name##Impl<T9 , FROM, TO> F9; \
+    typedef functor_name##Impl<T10, FROM, TO> F10; \
+    typedef functor_name##Impl<T11, FROM, TO> F11; \
+    typedef functor_name##Impl<T12, FROM, TO> F12; \
+    \
+    void def(const char * pythonName) const \
+    { \
+        boost::python::docstring_options doc(false, false, false); \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        F1 ::def(pythonName); \
+        F2 ::def(pythonName); \
+        F3 ::def(pythonName); \
+        F4 ::def(pythonName); \
+        F5 ::def(pythonName); \
+        F6 ::def(pythonName); \
+        F7 ::def(pythonName); \
+        F8 ::def(pythonName); \
+        F9 ::def(pythonName); \
+        F10::def(pythonName); \
+        F11::def(pythonName); \
+        F12::def(pythonName); \
+    } \
+    \
+    template <class Args> \
+    void def(const char * pythonName, Args const & args) const \
+    { \
+        boost::python::docstring_options doc(false, false, false); \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        F1 ::def(pythonName, args); \
+        F2 ::def(pythonName, args); \
+        F3 ::def(pythonName, args); \
+        F4 ::def(pythonName, args); \
+        F5 ::def(pythonName, args); \
+        F6 ::def(pythonName, args); \
+        F7 ::def(pythonName, args); \
+        F8 ::def(pythonName, args); \
+        F9 ::def(pythonName, args); \
+        F10::def(pythonName, args); \
+        F11::def(pythonName, args); \
+        F12::def(pythonName, args); \
+    } \
+    \
+    void def(const char * pythonName, const char * help) const \
+    { \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        boost::python::docstring_options doc(true, show_python_signature_, false); \
+        F1 ::def(pythonName, detail::ExportDoc<T2 >::exec(help)); \
+        F2 ::def(pythonName, detail::ExportDoc<T3 >::exec(help)); \
+        F3 ::def(pythonName, detail::ExportDoc<T4 >::exec(help)); \
+        F4 ::def(pythonName, detail::ExportDoc<T5 >::exec(help)); \
+        F5 ::def(pythonName, detail::ExportDoc<T6 >::exec(help)); \
+        F6 ::def(pythonName, detail::ExportDoc<T7 >::exec(help)); \
+        F7 ::def(pythonName, detail::ExportDoc<T8 >::exec(help)); \
+        F8 ::def(pythonName, detail::ExportDoc<T9 >::exec(help)); \
+        F9 ::def(pythonName, detail::ExportDoc<T10>::exec(help)); \
+        F10::def(pythonName, detail::ExportDoc<T11>::exec(help)); \
+        F11::def(pythonName, detail::ExportDoc<T12>::exec(help)); \
+        F12::def(pythonName, detail::ExportDoc<   >::exec(help)); \
+    } \
+    \
+    template <class Args> \
+    void def(const char * pythonName, Args const & args, char const * help) const \
+    { \
+        if(install_fallback_) \
+            Message::def(pythonName); \
+        boost::python::docstring_options doc(true, show_python_signature_, false); \
+        F1 ::def(pythonName, args, detail::ExportDoc<T2 >::exec(help)); \
+        F2 ::def(pythonName, args, detail::ExportDoc<T3 >::exec(help)); \
+        F3 ::def(pythonName, args, detail::ExportDoc<T4 >::exec(help)); \
+        F4 ::def(pythonName, args, detail::ExportDoc<T5 >::exec(help)); \
+        F5 ::def(pythonName, args, detail::ExportDoc<T6 >::exec(help)); \
+        F6 ::def(pythonName, args, detail::ExportDoc<T7 >::exec(help)); \
+        F7 ::def(pythonName, args, detail::ExportDoc<T8 >::exec(help)); \
+        F8 ::def(pythonName, args, detail::ExportDoc<T9 >::exec(help)); \
+        F9 ::def(pythonName, args, detail::ExportDoc<T10>::exec(help)); \
+        F10::def(pythonName, args, detail::ExportDoc<T11>::exec(help)); \
+        F11::def(pythonName, args, detail::ExportDoc<T12>::exec(help)); \
+        F12::def(pythonName, args, detail::ExportDoc<   >::exec(help)); \
+    } \
+};
 
-template <class Head, class Tail>
-struct TypeList
+struct PythonMultidefFunctor {};
+
+template <class T1,
+          class T2 = void,
+          class T3 = void,
+          class T4 = void,
+          class T5 = void,
+          class T6 = void,
+          class T7 = void,
+          class T8 = void,
+          class T9 = void,
+          class T10 = void,
+          class T11 = void,
+          class T12 = void>
+struct ArgumentMismatchMessage
 {
-    typedef Head head;
-    typedef Tail tail;
+    static std::string message()
+    {
+        std::string res(
+            "No C++ overload matches the arguments. This can have three reasons:\n\n"
+            " * The array arguments may have an unsupported element type. You may need\n"
+            "   to convert your array(s) to another element type using 'array.astype(...)'.\n"
+            "   The function currently supports the following types:\n\n     ");
+        res += vigra::detail::TypeName<T1>::sized_name();
+
+        if(vigra::detail::TypeName<T2>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T2>::sized_name();
+        if(vigra::detail::TypeName<T3>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T3>::sized_name();
+        if(vigra::detail::TypeName<T4>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T4>::sized_name();
+        if(vigra::detail::TypeName<T5>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T5>::sized_name();
+        if(vigra::detail::TypeName<T6>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T6>::sized_name();
+        if(vigra::detail::TypeName<T7>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T7>::sized_name();
+        if(vigra::detail::TypeName<T8>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T8>::sized_name();
+        if(vigra::detail::TypeName<T9>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T9>::sized_name();
+        if(vigra::detail::TypeName<T10>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T10>::sized_name();
+        if(vigra::detail::TypeName<T11>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T11>::sized_name();
+        if(vigra::detail::TypeName<T12>::sized_name() != "void")
+            res += ", " + vigra::detail::TypeName<T12>::sized_name();
+
+        res +=
+            "\n\n"
+            " * The dimension of your array(s) is currently unsupported (consult the\n"
+            "   function's documentation for information about supported dimensions).\n\n"
+            " * You provided an unrecognized argument, or an argument with incorrect type\n"
+            "   (consult the documentation for valid function signatures).\n\n"
+            "Additional overloads can easily be added in the vigranumpy C++ sources.\n"
+            "Please submit an issue at http://github.com/ukoethe/vigra/ to let us know\n"
+            "what you need (or a pull request if you solved it on your own :-).\n\n";
+
+        return res;
+    }
+
+    static void def(const char * pythonName)
+    {
+        docstring_options doc(false, false, false);
+        std::string msg    = message(),
+                    module = extract<std::string>(scope().attr("__name__"))() + ".";
+        msg += "Type 'help(" + module + pythonName + ")' to get full documentation.\n";
+        boost::python::def(pythonName,
+            raw_function([msg](tuple, dict) -> object {
+                throw std::invalid_argument(msg);
+                return object();
+            }, 0));
+    }
 };
 
 // in the sequel, the doc string is only registered with the last
 // overload, so that it shows up only once
-template <class Head, class Tail>
-inline void multidef(char const* functor_name, TypeList<Head, Tail>)
+template <class Functor>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+multidef(char const* python_name, Functor const & f)
 {
-    Head::def(functor_name);
-    multidef(functor_name, Tail());
+    f.def(python_name);
 }
 
-template <class Head, class Tail>
-inline void multidef(char const* functor_name, TypeList<Head, Tail>, const char * help)
+template <class Functor, class Args>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+multidef(char const* python_name, Functor const & f, Args const& args)
 {
-    Head::def(functor_name);
-    multidef(functor_name, Tail(), help);
+    f.def(python_name, args);
 }
 
-template <class Head, class Tail, class Args>
-inline void multidef(char const* functor_name, TypeList<Head, Tail>, Args const& args)
+template <class Functor>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+multidef(char const* python_name, Functor const & f, const char * help)
 {
-    Head::def(functor_name, args);
-    multidef(functor_name, Tail(), args);
+    f.def(python_name, help);
 }
 
-template <class Head, class Tail, class Args>
-inline void multidef(char const* functor_name, TypeList<Head, Tail>, Args const& args, char const * help)
+template <class Functor, class Args>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+multidef(char const* python_name, Functor const & f, Args const& args, const char * help)
 {
-    Head::def(functor_name, args);
-    multidef(functor_name, Tail(), args, help);
+    f.def(python_name, args, help);
 }
 
-template <class Head, class Tail>
-inline void multidef(char const* functor_name, TypeList<Head, TypeList<void, Tail> >)
+// overload def() such that it advises to use multidef() instead
+template <class Functor>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+def(char const* python_name, Functor const & f)
 {
-    Head::def(functor_name);
+    static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value, 
+                  "def(): use multidef() to export multiple overloads.");
 }
 
-template <class Head, class Tail, class Args>
-inline void multidef(char const* functor_name, TypeList<Head, TypeList<void, Tail> >, Args const& args)
+template <class Functor, class Args>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+def(char const* python_name, Functor const & f, Args const& args)
 {
-    Head::def(functor_name, args);
+    static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                  "def(): use multidef() to export multiple overloads.");
 }
 
-template <class Head, class Tail>
-inline void multidef(char const* functor_name, TypeList<Head, TypeList<void, Tail> >, const char * help)
+template <class Functor>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+def(char const* python_name, Functor const & f, const char * help)
 {
-    Head::def(functor_name, help);
+    static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                  "def(): use multidef() to export multiple overloads.");
 }
 
-template <class Head, class Tail, class Args>
-inline void multidef(char const* functor_name, TypeList<Head, TypeList<void, Tail> >, Args const& args, const char * help)
+template <class Functor, class Args>
+inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                               void>::type
+def(char const* python_name, Functor const & f, Args const& args, const char * help)
 {
-    Head::def(functor_name, args, help);
+    static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
+                  "def(): use multidef() to export multiple overloads.");
 }
 
 }} // namespace boost::python
diff --git a/include/vigra/numpy_array_taggedshape.hxx b/include/vigra/numpy_array_taggedshape.hxx
index 44a9ae0..e3dfcce 100644
--- a/include/vigra/numpy_array_taggedshape.hxx
+++ b/include/vigra/numpy_array_taggedshape.hxx
@@ -38,7 +38,7 @@
 
 #ifndef NPY_NO_DEPRECATED_API
 # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#endif 
+#endif
 
 #include <string>
 #include "array_vector.hxx"
@@ -59,23 +59,23 @@ python_ptr getArrayTypeObject()
     return pythonGetAttr(vigra, "standardArrayType", arraytype);
 }
 
-inline 
+inline
 std::string defaultOrder(std::string defaultValue = "C")
 {
     python_ptr arraytype = getArrayTypeObject();
     return pythonGetAttr(arraytype, "defaultOrder", defaultValue);
 }
 
-inline 
+inline
 python_ptr defaultAxistags(int ndim, std::string order = "")
 {
     if(order == "")
         order = defaultOrder();
     python_ptr arraytype = getArrayTypeObject();
-    python_ptr func(PyString_FromString("defaultAxistags"), python_ptr::keep_count);
-    python_ptr d(PyInt_FromLong(ndim), python_ptr::keep_count);
-    python_ptr o(PyString_FromString(order.c_str()), python_ptr::keep_count);
-    python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), o.get(), NULL),
+	python_ptr func(pythonFromData("defaultAxistags"));
+	python_ptr d(pythonFromData(ndim));
+	python_ptr o(pythonFromData(order));
+	python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), o.get(), NULL),
                         python_ptr::keep_count);
     if(axistags)
         return axistags;
@@ -83,13 +83,13 @@ python_ptr defaultAxistags(int ndim, std::string order = "")
     return python_ptr();
 }
 
-inline 
+inline
 python_ptr emptyAxistags(int ndim)
 {
     python_ptr arraytype = getArrayTypeObject();
-    python_ptr func(PyString_FromString("_empty_axistags"), python_ptr::keep_count);
-    python_ptr d(PyInt_FromLong(ndim), python_ptr::keep_count);
-    python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), NULL),
+	python_ptr func(pythonFromData("_empty_axistags"));
+	python_ptr d(pythonFromData(ndim));
+	python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), NULL),
                         python_ptr::keep_count);
     if(axistags)
         return axistags;
@@ -97,15 +97,15 @@ python_ptr emptyAxistags(int ndim)
     return python_ptr();
 }
 
-inline 
+inline
 void
 getAxisPermutationImpl(ArrayVector<npy_intp> & permute,
-                       python_ptr object, const char * name, 
+                       python_ptr object, const char * name,
                        AxisInfo::AxisType type, bool ignoreErrors)
 {
-    python_ptr func(PyString_FromString(name), python_ptr::keep_count);
-    python_ptr t(PyInt_FromLong((long)type), python_ptr::keep_count);
-    python_ptr permutation(PyObject_CallMethodObjArgs(object, func.get(), t.get(), NULL), 
+	python_ptr func(pythonFromData(name));
+	python_ptr t(pythonFromData((long)type));
+	python_ptr permutation(PyObject_CallMethodObjArgs(object, func.get(), t.get(), NULL),
                            python_ptr::keep_count);
     if(!permutation && ignoreErrors)
     {
@@ -113,7 +113,7 @@ getAxisPermutationImpl(ArrayVector<npy_intp> & permute,
         return;
     }
     pythonToCppException(permutation);
-    
+
     if(!PySequence_Check(permutation))
     {
         if(ignoreErrors)
@@ -122,25 +122,33 @@ getAxisPermutationImpl(ArrayVector<npy_intp> & permute,
         PyErr_SetString(PyExc_ValueError, message.c_str());
         pythonToCppException(false);
     }
-        
+
     ArrayVector<npy_intp> res(PySequence_Length(permutation));
     for(int k=0; k<(int)res.size(); ++k)
     {
         python_ptr i(PySequence_GetItem(permutation, k), python_ptr::keep_count);
-        if(!PyInt_Check(i))
-        {
+#if PY_MAJOR_VERSION < 3
+		if(!PyInt_Check(i))
+#else
+		if (!PyLong_Check(i))
+#endif
+		{
             if(ignoreErrors)
                 return;
             std::string message = std::string(name) + "() did not return a sequence of int.";
             PyErr_SetString(PyExc_ValueError, message.c_str());
             pythonToCppException(false);
         }
-        res[k] = PyInt_AsLong(i);
-    }
+#if PY_MAJOR_VERSION < 3
+		res[k] = PyInt_AsLong(i);
+#else
+		res[k] = PyLong_AsLong(i);
+#endif
+	}
     res.swap(permute);
 }
 
-inline 
+inline
 void
 getAxisPermutationImpl(ArrayVector<npy_intp> & permute,
                        python_ptr object, const char * name, bool ignoreErrors)
@@ -157,7 +165,7 @@ getAxisPermutationImpl(ArrayVector<npy_intp> & permute,
 /********************************************************/
 
 // FIXME: right now, we implement this class using the standard
-//        Python C-API only. It would be easier and more efficient 
+//        Python C-API only. It would be easier and more efficient
 //        to use boost::python here, but it would cause NumpyArray
 //        to depend on boost, making it more difficult to use
 //        NumpyArray in connection with other glue code generators.
@@ -165,9 +173,9 @@ class PyAxisTags
 {
   public:
     typedef PyObject * pointer;
-    
+
     python_ptr axistags;
-    
+
     PyAxisTags(python_ptr tags = python_ptr(), bool createCopy = false)
     {
         if(!tags)
@@ -175,7 +183,7 @@ class PyAxisTags
         // FIXME: do a more elaborate type check here?
         if(!PySequence_Check(tags))
         {
-            PyErr_SetString(PyExc_TypeError, 
+            PyErr_SetString(PyExc_TypeError,
                            "PyAxisTags(tags): tags argument must have type 'AxisTags'.");
             pythonToCppException(false);
         }
@@ -183,11 +191,11 @@ class PyAxisTags
         {
             return;
         }
-        
+
         if(createCopy)
         {
-            python_ptr func(PyString_FromString("__copy__"), python_ptr::keep_count);
-            axistags = python_ptr(PyObject_CallMethodObjArgs(tags, func.get(), NULL), 
+			python_ptr func(pythonFromData("__copy__"));
+			axistags = python_ptr(PyObject_CallMethodObjArgs(tags, func.get(), NULL),
                                   python_ptr::keep_count);
         }
         else
@@ -195,15 +203,15 @@ class PyAxisTags
             axistags = tags;
         }
     }
-    
+
     PyAxisTags(PyAxisTags const & other, bool createCopy = false)
     {
         if(!other.axistags)
             return;
         if(createCopy)
         {
-            python_ptr func(PyString_FromString("__copy__"), python_ptr::keep_count);
-            axistags = python_ptr(PyObject_CallMethodObjArgs(other.axistags, func.get(), NULL), 
+			python_ptr func(pythonFromData("__copy__"));
+			axistags = python_ptr(PyObject_CallMethodObjArgs(other.axistags, func.get(), NULL),
                                   python_ptr::keep_count);
         }
         else
@@ -211,7 +219,7 @@ class PyAxisTags
             axistags = other.axistags;
         }
     }
-    
+
     PyAxisTags(int ndim, std::string const & order = "")
     {
         if(order != "")
@@ -219,14 +227,14 @@ class PyAxisTags
         else
             axistags = detail::emptyAxistags(ndim);
     }
-    
+
     long size() const
     {
         return axistags
                    ? PySequence_Length(axistags)
                    : 0;
     }
-    
+
     long channelIndex(long defaultVal) const
     {
         return pythonGetAttr(axistags, "channelIndex", defaultVal);
@@ -241,7 +249,7 @@ class PyAxisTags
     {
         return channelIndex() != size();
     }
-    
+
     long innerNonchannelIndex(long defaultVal) const
     {
         return pythonGetAttr(axistags, "innerNonchannelIndex", defaultVal);
@@ -256,9 +264,9 @@ class PyAxisTags
     {
         if(!axistags)
             return;
-        python_ptr d(PyString_FromString(description.c_str()), python_ptr::keep_count);
-        python_ptr func(PyString_FromString("setChannelDescription"), python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), d.get(), NULL), 
+		python_ptr d(pythonFromData(description));
+		python_ptr func(pythonFromData("setChannelDescription"));
+		python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), d.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
     }
@@ -267,9 +275,9 @@ class PyAxisTags
     {
         if(!axistags)
             return 0.0;
-        python_ptr func(PyString_FromString("resolution"), python_ptr::keep_count);
-        python_ptr i(PyInt_FromLong(index), python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), NULL), 
+		python_ptr func(pythonFromData("resolution"));
+		python_ptr i(pythonFromData(index));
+		python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
         if(!PyFloat_Check(res))
@@ -279,52 +287,51 @@ class PyAxisTags
         }
         return PyFloat_AsDouble(res);
     }
- 
+
     void setResolution(long index, double resolution)
     {
         if(!axistags)
             return;
-        python_ptr func(PyString_FromString("setResolution"), python_ptr::keep_count);
-        python_ptr i(PyInt_FromLong(index), python_ptr::keep_count);
-        python_ptr r(PyFloat_FromDouble(resolution), python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), r.get(), NULL), 
+		python_ptr func(pythonFromData("setResolution"));
+		python_ptr i(pythonFromData(index));
+		python_ptr r(PyFloat_FromDouble(resolution), python_ptr::keep_count);
+        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), r.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
     }
- 
+
     void scaleResolution(long index, double factor)
     {
         if(!axistags)
             return;
-        python_ptr func(PyString_FromString("scaleResolution"), python_ptr::keep_count);
-        python_ptr i(PyInt_FromLong(index), python_ptr::keep_count);
-        python_ptr f(PyFloat_FromDouble(factor), python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), f.get(), NULL), 
+		python_ptr func(pythonFromData("scaleResolution"));
+		python_ptr i(pythonFromData(index));
+		python_ptr f(PyFloat_FromDouble(factor), python_ptr::keep_count);
+        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), f.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
     }
- 
+
     void toFrequencyDomain(long index, int size, int sign = 1)
     {
         if(!axistags)
             return;
         python_ptr func(sign == 1
-                           ? PyString_FromString("toFrequencyDomain")
-                           : PyString_FromString("fromFrequencyDomain"), 
-                        python_ptr::keep_count);
-        python_ptr i(PyInt_FromLong(index), python_ptr::keep_count);
-        python_ptr s(PyInt_FromLong(size), python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), s.get(), NULL), 
+							? pythonFromData("toFrequencyDomain")
+							: pythonFromData("fromFrequencyDomain"));
+        python_ptr i(pythonFromData(index));
+        python_ptr s(pythonFromData(size));
+		python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), s.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
     }
- 
+
     void fromFrequencyDomain(long index, int size)
     {
         toFrequencyDomain(index, size, -1);
     }
- 
-    ArrayVector<npy_intp> 
+
+    ArrayVector<npy_intp>
     permutationToNormalOrder(bool ignoreErrors = false) const
     {
         ArrayVector<npy_intp> permute;
@@ -332,55 +339,53 @@ class PyAxisTags
         return permute;
     }
 
-    ArrayVector<npy_intp> 
+    ArrayVector<npy_intp>
     permutationToNormalOrder(AxisInfo::AxisType types, bool ignoreErrors = false) const
     {
         ArrayVector<npy_intp> permute;
-        detail::getAxisPermutationImpl(permute, axistags, 
+        detail::getAxisPermutationImpl(permute, axistags,
                                             "permutationToNormalOrder", types, ignoreErrors);
         return permute;
     }
 
-    ArrayVector<npy_intp> 
+    ArrayVector<npy_intp>
     permutationFromNormalOrder(bool ignoreErrors = false) const
     {
         ArrayVector<npy_intp> permute;
-        detail::getAxisPermutationImpl(permute, axistags, 
+        detail::getAxisPermutationImpl(permute, axistags,
                                        "permutationFromNormalOrder", ignoreErrors);
         return permute;
     }
-    
-    ArrayVector<npy_intp> 
+
+    ArrayVector<npy_intp>
     permutationFromNormalOrder(AxisInfo::AxisType types, bool ignoreErrors = false) const
     {
         ArrayVector<npy_intp> permute;
-        detail::getAxisPermutationImpl(permute, axistags, 
+        detail::getAxisPermutationImpl(permute, axistags,
                                        "permutationFromNormalOrder", types, ignoreErrors);
         return permute;
     }
-    
+
     void dropChannelAxis()
     {
         if(!axistags)
             return;
-        python_ptr func(PyString_FromString("dropChannelAxis"), 
-                               python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), NULL), 
-                       python_ptr::keep_count);
+        python_ptr func(pythonFromData("dropChannelAxis"));
+        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), NULL),
+					   python_ptr::keep_count);
         pythonToCppException(res);
     }
-    
+
     void insertChannelAxis()
     {
         if(!axistags)
             return;
-        python_ptr func(PyString_FromString("insertChannelAxis"), 
-                               python_ptr::keep_count);
-        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), NULL), 
+        python_ptr func(pythonFromData("insertChannelAxis"));
+        python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), NULL),
                        python_ptr::keep_count);
         pythonToCppException(res);
     }
-    
+
     operator pointer()
     {
         return axistags.get();
@@ -402,18 +407,18 @@ class TaggedShape
 {
   public:
     enum ChannelAxis { first, last, none };
-    
+
     ArrayVector<npy_intp> shape, original_shape;
     PyAxisTags axistags;
     ChannelAxis channelAxis;
     std::string channelDescription;
-    
+
     explicit TaggedShape(MultiArrayIndex size)
     : shape(size),
       axistags(size),
       channelAxis(none)
     {}
-    
+
     template <class U, int N>
     TaggedShape(TinyVector<U, N> const & sh, PyAxisTags tags)
     : shape(sh.begin(), sh.end()),
@@ -421,7 +426,7 @@ class TaggedShape
       axistags(tags),
       channelAxis(none)
     {}
-    
+
     template <class T>
     TaggedShape(ArrayVector<T> const & sh, PyAxisTags tags)
     : shape(sh.begin(), sh.end()),
@@ -429,132 +434,132 @@ class TaggedShape
       axistags(tags),
       channelAxis(none)
     {}
-    
+
     template <class U, int N>
     explicit TaggedShape(TinyVector<U, N> const & sh)
     : shape(sh.begin(), sh.end()),
       original_shape(sh.begin(), sh.end()),
       channelAxis(none)
     {}
-    
+
     template <class T>
     explicit TaggedShape(ArrayVector<T> const & sh)
     : shape(sh.begin(), sh.end()),
       original_shape(sh.begin(), sh.end()),
       channelAxis(none)
     {}
-    
+
     template <class U, int N>
     TaggedShape & resize(TinyVector<U, N> const & sh)
     {
         int start = channelAxis == first
                         ? 1
-                        : 0, 
+                        : 0,
             stop = channelAxis == last
                         ? (int)size()-1
                         : (int)size();
-                        
+
         vigra_precondition(N == stop - start || size() == 0,
              "TaggedShape.resize(): size mismatch.");
-             
+
         if(size() == 0)
             shape.resize(N);
-        
+
         for(int k=0; k<N; ++k)
             shape[k+start] = sh[k];
-            
+
         return *this;
     }
-    
+
     TaggedShape & resize(MultiArrayIndex v1)
     {
         return resize(TinyVector<MultiArrayIndex, 1>(v1));
     }
-    
+
     TaggedShape & resize(MultiArrayIndex v1, MultiArrayIndex v2)
     {
         return resize(TinyVector<MultiArrayIndex, 2>(v1, v2));
     }
-    
+
     TaggedShape & resize(MultiArrayIndex v1, MultiArrayIndex v2, MultiArrayIndex v3)
     {
         return resize(TinyVector<MultiArrayIndex, 3>(v1, v2, v3));
     }
-    
-    TaggedShape & resize(MultiArrayIndex v1, MultiArrayIndex v2, 
+
+    TaggedShape & resize(MultiArrayIndex v1, MultiArrayIndex v2,
                          MultiArrayIndex v3, MultiArrayIndex v4)
     {
         return resize(TinyVector<MultiArrayIndex, 4>(v1, v2, v3, v4));
     }
-    
+
     npy_intp & operator[](int i)
     {
         return shape[i];
     }
-    
+
     npy_intp operator[](int i) const
     {
         return shape[i];
     }
-    
+
     unsigned int size() const
     {
         return shape.size();
     }
-    
+
     TaggedShape & operator+=(int v)
     {
         int start = channelAxis == first
                         ? 1
-                        : 0, 
+                        : 0,
             stop = channelAxis == last
                         ? (int)size()-1
                         : (int)size();
         for(int k=start; k<stop; ++k)
             shape[k] += v;
-            
+
         return *this;
     }
-    
+
     TaggedShape & operator-=(int v)
     {
         return operator+=(-v);
     }
-    
+
     TaggedShape & operator*=(int factor)
     {
         int start = channelAxis == first
                         ? 1
-                        : 0, 
+                        : 0,
             stop = channelAxis == last
                         ? (int)size()-1
                         : (int)size();
         for(int k=start; k<stop; ++k)
             shape[k] *= factor;
-            
+
         return *this;
     }
-    
+
     void rotateToNormalOrder()
     {
         if(axistags && channelAxis == last)
         {
             int ndim = (int)size();
-            
-            npy_intp channelCount = shape[ndim-1];            
+
+            npy_intp channelCount = shape[ndim-1];
             for(int k=ndim-1; k>0; --k)
                 shape[k] = shape[k-1];
             shape[0] = channelCount;
-            
-            channelCount = original_shape[ndim-1];            
+
+            channelCount = original_shape[ndim-1];
             for(int k=ndim-1; k>0; --k)
                 original_shape[k] = original_shape[k-1];
             original_shape[0] = channelCount;
-            
+
             channelAxis = first;
         }
     }
-    
+
     TaggedShape & setChannelDescription(std::string const & description)
     {
         // we only remember the description here, and will actually set
@@ -562,14 +567,14 @@ class TaggedShape
         channelDescription = description;
         return *this;
     }
-    
+
     TaggedShape & setChannelIndexLast()
     {
         // FIXME: add some checks?
         channelAxis = last;
         return *this;
     }
-    
+
     // transposeShape() means: only shape and resolution are transposed, not the axis keys
     template <class U, int N>
     TaggedShape & transposeShape(TinyVector<U, N> const & p)
@@ -578,7 +583,7 @@ class TaggedShape
         {
             int ntags = axistags.size();
             ArrayVector<npy_intp> permute = axistags.permutationToNormalOrder();
-            
+
             int tstart = (axistags.channelIndex(ntags) < ntags)
                             ? 1
                             : 0;
@@ -589,7 +594,7 @@ class TaggedShape
 
             vigra_precondition(N == ndim,
                  "TaggedShape.transposeShape(): size mismatch.");
-                 
+
             PyAxisTags newAxistags(axistags.axistags); // force copy
             for(int k=0; k<ndim; ++k)
             {
@@ -606,7 +611,7 @@ class TaggedShape
             }
         }
         shape = original_shape;
-        
+
         return *this;
     }
 
@@ -615,9 +620,9 @@ class TaggedShape
         if(axistags)
         {
             int ntags = axistags.size();
-            
+
             ArrayVector<npy_intp> permute = axistags.permutationToNormalOrder();
-            
+
             int tstart = (axistags.channelIndex(ntags) < ntags)
                             ? 1
                             : 0;
@@ -628,7 +633,7 @@ class TaggedShape
                             ? (int)size()-1
                             : (int)size();
             int size = send - sstart;
-            
+
             for(int k=0; k<size; ++k)
             {
                 axistags.toFrequencyDomain(permute[k+tstart], shape[k+sstart], sign);
@@ -646,35 +651,35 @@ class TaggedShape
     {
         return toFrequencyDomain(-1);
     }
-    
+
     bool compatible(TaggedShape const & other) const
     {
         if(channelCount() != other.channelCount())
             return false;
-            
+
         int start = channelAxis == first
                         ? 1
-                        : 0, 
+                        : 0,
             stop = channelAxis == last
                         ? (int)size()-1
                         : (int)size();
         int ostart = other.channelAxis == first
                         ? 1
-                        : 0, 
+                        : 0,
             ostop = other.channelAxis == last
                         ? (int)other.size()-1
                         : (int)other.size();
-                        
+
         int len = stop - start;
         if(len != ostop - ostart)
             return false;
-        
+
         for(int k=0; k<len; ++k)
             if(shape[k+start] != other.shape[k+ostart])
                 return false;
         return true;
     }
-    
+
     TaggedShape & setChannelCount(int count)
     {
         switch(channelAxis)
@@ -714,7 +719,7 @@ class TaggedShape
         }
         return *this;
     }
-    
+
     int channelCount() const
     {
         switch(channelAxis)
@@ -729,16 +734,16 @@ class TaggedShape
     }
 };
 
-inline 
+inline
 void scaleAxisResolution(TaggedShape & tagged_shape)
 {
     if(tagged_shape.size() != tagged_shape.original_shape.size())
         return;
-    
+
     int ntags = tagged_shape.axistags.size();
-    
+
     ArrayVector<npy_intp> permute = tagged_shape.axistags.permutationToNormalOrder();
-    
+
     int tstart = (tagged_shape.axistags.channelIndex(ntags) < ntags)
                     ? 1
                     : 0;
@@ -746,7 +751,7 @@ void scaleAxisResolution(TaggedShape & tagged_shape)
                     ? 1
                     : 0;
     int size = (int)tagged_shape.size() - sstart;
-    
+
     for(int k=0; k<size; ++k)
     {
         int sk = k + sstart;
@@ -757,7 +762,7 @@ void scaleAxisResolution(TaggedShape & tagged_shape)
     }
 }
 
-inline 
+inline
 void unifyTaggedShapeSize(TaggedShape & tagged_shape)
 {
     PyAxisTags axistags = tagged_shape.axistags;
@@ -765,7 +770,7 @@ void unifyTaggedShapeSize(TaggedShape & tagged_shape)
 
     int ndim = (int)shape.size();
     int ntags = axistags.size();
-    
+
     long channelIndex = axistags.channelIndex();
 
     if(tagged_shape.channelAxis == TaggedShape::none)
@@ -804,7 +809,7 @@ void unifyTaggedShapeSize(TaggedShape & tagged_shape)
             // axistags have no channel axis => should be one element shorter
             vigra_precondition(ndim == ntags+1,
                  "constructArray(): size mismatch between shape and axistags.");
-                 
+
             if(shape[0] == 1)
             {
                 // std::cerr << "   drop channel axis\n";
@@ -835,15 +840,15 @@ ArrayVector<npy_intp> finalizeTaggedShape(TaggedShape & tagged_shape)
     if(tagged_shape.axistags)
     {
         tagged_shape.rotateToNormalOrder();
-    
+
         // we assume here that the axistag object belongs to the array to be created
         // so that we can freely edit it
         scaleAxisResolution(tagged_shape);
-            
-        // this must be after scaleAxisResolution(), because the latter requires 
+
+        // this must be after scaleAxisResolution(), because the latter requires
         // shape and original_shape to be still in sync
         unifyTaggedShapeSize(tagged_shape);
-                
+
         if(tagged_shape.channelDescription != "")
             tagged_shape.axistags.setChannelDescription(tagged_shape.channelDescription);
     }
diff --git a/include/vigra/python_utility.hxx b/include/vigra/python_utility.hxx
index 3ca8eef..52311c3 100644
--- a/include/vigra/python_utility.hxx
+++ b/include/vigra/python_utility.hxx
@@ -44,6 +44,14 @@
 
 namespace vigra {
 
+/****************************************************************/
+/*                                                              */
+/*                     exception conversion                     */
+/*                                                              */
+/****************************************************************/
+
+inline std::string dataFromPython(PyObject * data, const char * defaultVal);
+
 template <class PYOBJECT_PTR>
 void pythonToCppException(PYOBJECT_PTR obj)
 {
@@ -54,11 +62,7 @@ void pythonToCppException(PYOBJECT_PTR obj)
     if(type == 0)
         return;
     std::string message(((PyTypeObject *)type)->tp_name);
-    if(PyString_Check(value))
-    {
-        message += std::string(": ") + PyString_AS_STRING(value);
-    }
-
+    message += ": " + dataFromPython(value, "<no error message>");
     Py_XDECREF(type);
     Py_XDECREF(value);
     Py_XDECREF(trace);
@@ -218,83 +222,81 @@ inline void swap(python_ptr & a, python_ptr & b)
 }
 
 /****************************************************************/
+/*                                                              */
+/*                   data conversion to python                  */
+/*                                                              */
+/****************************************************************/
 
-inline python_ptr 
-makePythonDictionary(char const * k1 = 0, PyObject * a1 = 0,
-                    char const * k2 = 0, PyObject * a2 = 0,
-                    char const * k3 = 0, PyObject * a3 = 0)
+inline python_ptr pythonFromData(bool t)
 {
-    python_ptr dict(PyDict_New(), python_ptr::keep_count);
-    pythonToCppException(dict);
-    if(k1 && a1)
-        PyDict_SetItemString(dict, k1, a1);
-    if(k2 && a2)
-        PyDict_SetItemString(dict, k2, a2);
-    if(k3 && a3)
-        PyDict_SetItemString(dict, k3, a3);
-    return dict;
+    return python_ptr(PyBool_FromLong(t ? 1 : 0), python_ptr::new_nonzero_reference);
 }
 
-/****************************************************************/
-
-inline python_ptr pythonFromData(bool t)
+inline python_ptr pythonFromData(char const * str)
 {
-    python_ptr res(PyBool_FromLong(t ? 1 : 0), python_ptr::keep_count);
-    pythonToCppException(res);
-    return res;
+#if PY_MAJOR_VERSION < 3
+	return python_ptr(PyString_FromString(str), python_ptr::new_nonzero_reference);
+#else
+	return python_ptr(PyUnicode_FromString(str), python_ptr::new_nonzero_reference);
+#endif
 }
 
-inline python_ptr pythonFromData(std::string const & s)
+inline python_ptr pythonFromData(std::string const & str)
 {
-    python_ptr res(PyString_FromString(s.c_str()), python_ptr::keep_count);
-    pythonToCppException(res);
-    return res;
+	return pythonFromData(str.c_str());
 }
 
+#define VIGRA_PYTHON_FROM_DATA(type, fct, cast_type) \
+inline python_ptr pythonFromData(type t) \
+{ \
+    return python_ptr(fct((cast_type)t), python_ptr::new_nonzero_reference); \
+}
+
+#if PY_MAJOR_VERSION < 3
+    VIGRA_PYTHON_FROM_DATA(signed char, PyInt_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned char, PyInt_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(short, PyInt_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned short, PyInt_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(long, PyInt_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned long, PyInt_FromSize_t, size_t)
+    VIGRA_PYTHON_FROM_DATA(int, PyInt_FromSsize_t, Py_ssize_t)
+    VIGRA_PYTHON_FROM_DATA(unsigned int, PyInt_FromSize_t, size_t)
+    VIGRA_PYTHON_FROM_DATA(float, PyFloat_FromDouble, double)
+    VIGRA_PYTHON_FROM_DATA(double, PyFloat_FromDouble, double)
+#else
+    VIGRA_PYTHON_FROM_DATA(signed char, PyLong_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned char, PyLong_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(short, PyLong_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned short, PyLong_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(long, PyLong_FromLong, long)
+    VIGRA_PYTHON_FROM_DATA(unsigned long, PyLong_FromSize_t, size_t)
+    VIGRA_PYTHON_FROM_DATA(int, PyLong_FromSsize_t, Py_ssize_t)
+    VIGRA_PYTHON_FROM_DATA(unsigned int, PyLong_FromSize_t, size_t)
+    VIGRA_PYTHON_FROM_DATA(float, PyFloat_FromDouble, double)
+    VIGRA_PYTHON_FROM_DATA(double, PyFloat_FromDouble, double)
+#endif
+#undef VIGRA_PYTHON_FROM_DATA
+
 inline python_ptr pythonFromData(long long t)
 {
-    python_ptr res;
     if(t > (long long)NumericTraits<long>::max() || t < (long long)NumericTraits<long>::min())
-        res = python_ptr(PyLong_FromLongLong(t), python_ptr::keep_count);
+        return python_ptr(PyLong_FromLongLong(t), python_ptr::new_nonzero_reference);
     else
-        res = python_ptr(PyInt_FromLong((long)t), python_ptr::keep_count);
-    pythonToCppException(res);
-    return res;
+        return pythonFromData((long)t);
 }
 
 inline python_ptr pythonFromData(unsigned long long t)
 {
-    python_ptr res;
     if(t > (unsigned long long)NumericTraits<long>::max())
-        res = python_ptr(PyLong_FromUnsignedLongLong(t), python_ptr::keep_count);
+        return python_ptr(PyLong_FromUnsignedLongLong(t), python_ptr::new_nonzero_reference);
     else
-        res = python_ptr(PyInt_FromLong((long)t), python_ptr::keep_count);
-    pythonToCppException(res);
-    return res;
-}
-
-#define VIGRA_PYTHON_FROM_DATA(type, fct, cast_type) \
-inline python_ptr pythonFromData(type t) \
-{ \
-    python_ptr res(fct((cast_type)t), python_ptr::keep_count); \
-    pythonToCppException(res); \
-    return res; \
+        return pythonFromData((long)t);
 }
 
-VIGRA_PYTHON_FROM_DATA(signed char, PyInt_FromLong, long)
-VIGRA_PYTHON_FROM_DATA(unsigned char, PyInt_FromLong, long)
-VIGRA_PYTHON_FROM_DATA(short, PyInt_FromLong, long)
-VIGRA_PYTHON_FROM_DATA(unsigned short, PyInt_FromLong, long)
-VIGRA_PYTHON_FROM_DATA(long, PyInt_FromLong, long)
-VIGRA_PYTHON_FROM_DATA(unsigned long, PyInt_FromSize_t, size_t)
-VIGRA_PYTHON_FROM_DATA(int, PyInt_FromSsize_t, Py_ssize_t)
-VIGRA_PYTHON_FROM_DATA(unsigned int, PyInt_FromSize_t, size_t)
-VIGRA_PYTHON_FROM_DATA(float, PyFloat_FromDouble, double)
-VIGRA_PYTHON_FROM_DATA(double, PyFloat_FromDouble, double)
-VIGRA_PYTHON_FROM_DATA(char const *, PyString_FromString, char const *)
-
-#undef VIGRA_PYTHON_FROM_DATA
-
+/****************************************************************/
+/*                                                              */
+/*                 data conversion from python                  */
+/*                                                              */
 /****************************************************************/
 
 #define VIGRA_DATA_FROM_PYTHON(type, check, extract) \
@@ -305,51 +307,79 @@ inline type dataFromPython(PyObject * data, type const & defaultVal) \
              : defaultVal; \
 }
 
-VIGRA_DATA_FROM_PYTHON(signed char, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(unsigned char, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(short, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(unsigned short, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(long, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(unsigned long, PyInt_Check, PyInt_AsUnsignedLongMask)
-VIGRA_DATA_FROM_PYTHON(int, PyInt_Check, PyInt_AsLong)
-VIGRA_DATA_FROM_PYTHON(unsigned int, PyInt_Check, PyInt_AsUnsignedLongMask)
-VIGRA_DATA_FROM_PYTHON(long long, PyInt_Check, PyInt_AsSsize_t)
-VIGRA_DATA_FROM_PYTHON(unsigned long long, PyInt_Check, PyInt_AsUnsignedLongLongMask)
+#if PY_MAJOR_VERSION < 3
+    VIGRA_DATA_FROM_PYTHON(signed char, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned char, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(short, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned short, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(long, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned long, PyInt_Check, PyInt_AsUnsignedLongMask)
+    VIGRA_DATA_FROM_PYTHON(int, PyInt_Check, PyInt_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned int, PyInt_Check, PyInt_AsUnsignedLongMask)
+    VIGRA_DATA_FROM_PYTHON(long long, PyInt_Check, PyInt_AsSsize_t)
+    VIGRA_DATA_FROM_PYTHON(unsigned long long, PyInt_Check, PyInt_AsUnsignedLongLongMask)
+#else
+    VIGRA_DATA_FROM_PYTHON(signed char, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned char, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(short, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned short, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(long, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned long, PyLong_Check, PyLong_AsUnsignedLongMask)
+    VIGRA_DATA_FROM_PYTHON(int, PyLong_Check, PyLong_AsLong)
+    VIGRA_DATA_FROM_PYTHON(unsigned int, PyLong_Check, PyLong_AsUnsignedLongMask)
+    VIGRA_DATA_FROM_PYTHON(long long, PyLong_Check, PyLong_AsSsize_t)
+    VIGRA_DATA_FROM_PYTHON(unsigned long long, PyLong_Check, PyLong_AsUnsignedLongLongMask)
+#endif
 VIGRA_DATA_FROM_PYTHON(float, PyFloat_Check, PyFloat_AsDouble)
 VIGRA_DATA_FROM_PYTHON(double, PyFloat_Check, PyFloat_AsDouble)
 
-inline std::string dataFromPython(PyObject * data, const char * defaultVal) 
-{ 
-    return data && PyString_Check(data) 
-             ? std::string(PyString_AsString(data)) 
-             : std::string(defaultVal); 
+inline std::string dataFromPython(PyObject * data, const char * defaultVal)
+{
+#if PY_MAJOR_VERSION < 3
+    return data && PyString_Check(data)
+        ? std::string(PyString_AsString(data))
+#else
+	python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
+    return data && PyBytes_Check(ascii)
+        ? std::string(PyBytes_AsString(ascii))
+#endif
+        : std::string(defaultVal);
 }
 
-inline std::string dataFromPython(PyObject * data, std::string const & defaultVal) 
-{ 
-    return data && PyString_Check(data) 
-             ? std::string(PyString_AsString(data)) 
-             : defaultVal; 
+inline std::string dataFromPython(PyObject * data, std::string const & defaultVal)
+{
+#if PY_MAJOR_VERSION < 3
+    return data && PyString_Check(data)
+        ? std::string(PyString_AsString(data))
+#else
+	python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
+	return data && PyBytes_Check(ascii)
+		? std::string(PyBytes_AsString(ascii))
+#endif
+        : defaultVal;
 }
 
-inline python_ptr dataFromPython(PyObject * data, python_ptr defaultVal) 
-{ 
+inline python_ptr dataFromPython(PyObject * data, python_ptr defaultVal)
+{
     return data
-             ? python_ptr(data) 
-             : defaultVal; 
+             ? python_ptr(data)
+             : defaultVal;
 }
 
 #undef VIGRA_DATA_FROM_PYTHON
 
 /****************************************************************/
+/*                                                              */
+/*         access utilities and factory functions               */
+/*                                                              */
+/****************************************************************/
 
 template <class T>
 T pythonGetAttr(PyObject * obj, const char * key, T defaultValue)
 {
     if(!obj)
         return defaultValue;
-        
-    python_ptr k(PyString_FromString(key), python_ptr::keep_count);
+    python_ptr k(pythonFromData(key));
     pythonToCppException(k);
     python_ptr pres(PyObject_GetAttr(obj, k), python_ptr::keep_count);
     if(!pres)
@@ -357,13 +387,12 @@ T pythonGetAttr(PyObject * obj, const char * key, T defaultValue)
     return dataFromPython(pres, defaultValue);
 }
 
-inline std::string 
+inline std::string
 pythonGetAttr(PyObject * obj, const char * key, const char * defaultValue)
 {
     if(!obj)
         return std::string(defaultValue);
-        
-    python_ptr k(PyString_FromString(key), python_ptr::keep_count);
+    python_ptr k(pythonFromData(key));
     pythonToCppException(k);
     python_ptr pres(PyObject_GetAttr(obj, k), python_ptr::keep_count);
     if(!pres)
@@ -373,6 +402,23 @@ pythonGetAttr(PyObject * obj, const char * key, const char * defaultValue)
 
 /****************************************************************/
 
+inline python_ptr
+makePythonDictionary(char const * k1 = 0, PyObject * a1 = 0,
+                    char const * k2 = 0, PyObject * a2 = 0,
+                    char const * k3 = 0, PyObject * a3 = 0)
+{
+    python_ptr dict(PyDict_New(), python_ptr::new_nonzero_reference);
+    if(k1 && a1)
+        PyDict_SetItemString(dict, k1, a1);
+    if(k2 && a2)
+        PyDict_SetItemString(dict, k2, a2);
+    if(k3 && a3)
+        PyDict_SetItemString(dict, k3, a3);
+    return dict;
+}
+
+/****************************************************************/
+
 template <class T, int N>
 python_ptr shapeToPythonTuple(TinyVector<T, N> const & shape)
 {
@@ -402,16 +448,16 @@ python_ptr shapeToPythonTuple(ArrayVectorView<T> const & shape)
 class PyAllowThreads
 {
     PyThreadState * save_;
-    
+
     // make it non-copyable
     PyAllowThreads(PyAllowThreads const &);
     PyAllowThreads & operator=(PyAllowThreads const &);
-  
+
   public:
     PyAllowThreads()
     : save_(PyEval_SaveThread())
     {}
-    
+
     ~PyAllowThreads()
     {
         PyEval_RestoreThread(save_);
diff --git a/include/vigra/threading.hxx b/include/vigra/threading.hxx
index fa371ed..b0181b8 100644
--- a/include/vigra/threading.hxx
+++ b/include/vigra/threading.hxx
@@ -29,7 +29,7 @@
 /*    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,      */
 /*    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING      */
 /*    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR     */
-/*    OTHER DEALINGS IN THE SOFTWARE.                                   */                
+/*    OTHER DEALINGS IN THE SOFTWARE.                                   */
 /*                                                                      */
 /************************************************************************/
 
@@ -43,7 +43,7 @@
     // ignore all threading if VIGRA_SINGLE_THREADED is defined
 #ifndef VIGRA_SINGLE_THREADED
 
-#ifndef VIGRA_NO_STD_THREADING 
+#ifndef VIGRA_NO_STD_THREADING
 # if defined(__clang__)
 #  if (!__has_include(<thread>) || !__has_include(<mutex>) || !__has_include(<atomic>))
 #    define VIGRA_NO_STD_THREADING
@@ -60,8 +60,12 @@
 #endif
 
 #ifdef USE_BOOST_THREAD
+// Use the latest API version for Boost.Thread.
+#define BOOST_THREAD_VERSION 4
 #  include <boost/thread.hpp>
 #  if BOOST_VERSION >= 105300
+     // At the moment, we only need the atomic headers, not the library.
+#    define BOOST_ATOMIC_NO_LIB 1
 #    include <boost/atomic.hpp>
 #    define VIGRA_HAS_ATOMIC 1
 #  endif
@@ -69,6 +73,8 @@
 #elif defined(VIGRA_NO_STD_THREADING)
 #  error "Your compiler does not support std::thread. If the boost libraries are available, consider running cmake with -DWITH_BOOST_THREAD=1"
 #else
+#  include <condition_variable>
+#  include <future>
 #  include <thread>
 #  include <mutex>
 // #  include <shared_mutex>  // C++14
@@ -99,9 +105,22 @@ using VIGRA_THREADING_NAMESPACE::this_thread::sleep_until;
 // contents of <mutex>
 
 using VIGRA_THREADING_NAMESPACE::mutex;
-using VIGRA_THREADING_NAMESPACE::timed_mutex;
 using VIGRA_THREADING_NAMESPACE::recursive_mutex;
-using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
+
+#ifdef __APPLE__
+#  ifdef __GNUC__
+#    ifdef USE_BOOST_THREAD
+       using VIGRA_THREADING_NAMESPACE::timed_mutex;
+       using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
+#    endif
+#  else
+       using VIGRA_THREADING_NAMESPACE::timed_mutex;
+       using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
+#  endif
+#else
+       using VIGRA_THREADING_NAMESPACE::timed_mutex;
+       using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
+#endif
 
 using VIGRA_THREADING_NAMESPACE::lock_guard;
 using VIGRA_THREADING_NAMESPACE::unique_lock;
@@ -125,6 +144,18 @@ using VIGRA_THREADING_NAMESPACE::call_once;
 // using VIGRA_THREADING_NAMESPACE::shared_mutex;   // C++14
 // using VIGRA_THREADING_NAMESPACE::shared_lock;  // C++14
 
+// Futures.
+
+using VIGRA_THREADING_NAMESPACE::future;
+
+// Condition variables.
+
+using VIGRA_THREADING_NAMESPACE::condition_variable;
+
+// Packaged task.
+
+using VIGRA_THREADING_NAMESPACE::packaged_task;
+
 #ifdef VIGRA_HAS_ATOMIC
 
 // contents of <atomic>
@@ -197,7 +228,7 @@ using VIGRA_THREADING_NAMESPACE::atomic_signal_fence;
 // using VIGRA_THREADING_NAMESPACE::atomic_flag_clearatomic_flag_clear_explicit;
 // using VIGRA_THREADING_NAMESPACE::atomic_init;
 // using VIGRA_THREADING_NAMESPACE::kill_dependency;
-    
+
 #else  // VIGRA_HAS_ATOMIC not defined
 
 enum memory_order {
@@ -215,30 +246,30 @@ template <int SIZE=4>
 struct atomic_long_impl
 {
     typedef LONG value_type;
-    
+
     static long load(value_type const & val)
     {
         long res = val;
         MemoryBarrier();
         return res;
     }
-    
+
     static void store(value_type & dest, long val)
     {
         MemoryBarrier();
         dest = val;
     }
-    
+
     static long add(value_type & dest, long val)
     {
         return InterlockedExchangeAdd(&dest, val);
     }
-    
+
     static long sub(value_type & dest, long val)
     {
         return InterlockedExchangeAdd(&dest, -val);
     }
-    
+
     static bool compare_exchange(value_type & dest, long & old_val, long new_val)
     {
         long check_val = old_val;
@@ -251,30 +282,30 @@ template <>
 struct atomic_long_impl<8>
 {
     typedef LONGLONG value_type;
-    
+
     static long load(value_type const & val)
     {
         long res = val;
         MemoryBarrier();
         return res;
     }
-    
+
     static void store(value_type & dest, long val)
     {
         MemoryBarrier();
         dest = val;
     }
-    
+
     static long add(value_type & dest, long val)
     {
         return InterlockedExchangeAdd64(&dest, val);
     }
-    
+
     static long sub(value_type & dest, long val)
     {
         return InterlockedExchangeAdd64(&dest, -val);
     }
-    
+
     static bool compare_exchange(value_type & dest, long & old_val, long new_val)
     {
         long check_val = old_val;
@@ -289,30 +320,30 @@ template <int SIZE=4>
 struct atomic_long_impl
 {
     typedef long value_type;
-    
+
     static long load(value_type const & val)
     {
         long res = val;
         __sync_synchronize();
         return res;
     }
-    
+
     static void store(value_type & dest, long val)
     {
         __sync_synchronize();
         dest = val;
     }
-    
+
     static long add(value_type & dest, long val)
     {
         return __sync_fetch_and_add(&dest, val);
     }
-    
+
     static long sub(value_type & dest, long val)
     {
         return __sync_fetch_and_sub(&dest, val);
     }
-    
+
     static bool compare_exchange(value_type & dest, long & old_val, long new_val)
     {
         long check_val = old_val;
@@ -326,42 +357,57 @@ struct atomic_long_impl
 struct atomic_long
 {
     typedef atomic_long_impl<sizeof(long)>::value_type value_type;
-    
+
     atomic_long(long v = 0)
     : value_(v)
     {}
-    
+
     atomic_long & operator=(long val)
     {
         store(val);
         return *this;
     }
-    
+
+    bool operator==(long val) const
+    {
+        return load() == val;
+    }
+
+    void operator++()
+    {
+        fetch_add(1);
+    }
+
+    void operator--()
+    {
+        fetch_sub(1);
+    }
+
     long load(memory_order = memory_order_seq_cst) const
     {
         return atomic_long_impl<sizeof(long)>::load(value_);
     }
-    
+
     void store(long v, memory_order = memory_order_seq_cst)
     {
         atomic_long_impl<sizeof(long)>::store(value_, v);
     }
-    
+
     long fetch_add(long v, memory_order = memory_order_seq_cst)
     {
         return atomic_long_impl<sizeof(long)>::add(value_, v);
     }
-    
+
     long fetch_sub(long v, memory_order = memory_order_seq_cst)
     {
         return atomic_long_impl<sizeof(long)>::sub(value_, v);
     }
-    
+
     bool compare_exchange_strong(long & old_val, long new_val, memory_order = memory_order_seq_cst)
     {
         return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
     }
-    
+
     bool compare_exchange_weak(long & old_val, long new_val, memory_order = memory_order_seq_cst)
     {
         return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
diff --git a/include/vigra/threadpool.hxx b/include/vigra/threadpool.hxx
index e65545b..acda85c 100644
--- a/include/vigra/threadpool.hxx
+++ b/include/vigra/threadpool.hxx
@@ -36,18 +36,13 @@
 #ifndef VIGRA_THREADPOOL_HXX
 #define VIGRA_THREADPOOL_HXX
 
-#include <functional>
-#include <thread>
-#include <atomic>
 #include <vector>
-#include <future>
-#include <mutex>
 #include <queue>
-#include <condition_variable>
 #include <stdexcept>
 #include <cmath>
 #include "mathutil.hxx"
 #include "counting_iterator.hxx"
+#include "threading.hxx"
 
 
 namespace vigra
@@ -70,7 +65,7 @@ class ParallelOptions
         /** Constants for special settings.
         */
     enum {
-        Auto       = -1, ///< Determine number of threads automatically (from <tt>std::thread::hardware_concurrency()</tt>)
+        Auto       = -1, ///< Determine number of threads automatically (from <tt>threading::thread::hardware_concurrency()</tt>)
         Nice       = -2, ///< Use half as many threads as <tt>Auto</tt> would.
         NoThreads  =  0  ///< Switch off multi-threading (i.e. execute tasks sequentially)
     };
@@ -130,8 +125,8 @@ class ParallelOptions
             return userNThreads >= 0
                        ? userNThreads
                        : userNThreads == Nice
-                               ? std::thread::hardware_concurrency() / 2
-                               : std::thread::hardware_concurrency();
+                               ? threading::thread::hardware_concurrency() / 2
+                               : threading::thread::hardware_concurrency();
         #endif
     }
 
@@ -159,16 +154,14 @@ class ThreadPool
         in the present thread.
      */
     ThreadPool(const ParallelOptions & options)
-    :   stop(false),
-        busy(0),
-        processed(0)
+    :   stop(false)
     {
         init(options);
     }
 
     /** Create a thread pool with n threads. The constructor just launches
         the desired number of workers. If \arg n is <tt>ParallelOptions::Auto</tt>,
-        the number of threads is determined by <tt>std::thread::hardware_concurrency()</tt>.
+        the number of threads is determined by <tt>threading::thread::hardware_concurrency()</tt>.
         <tt>ParallelOptions::Nice</tt> will create half as many threads.
         If <tt>n = 0</tt>, no workers are started, and all tasks will be executed
         synchronously in the present thread. If the preprocessor flag
@@ -177,9 +170,7 @@ class ThreadPool
         is useful for debugging.
      */
     ThreadPool(const int n)
-    :   stop(false),
-        busy(0),
-        processed(0)
+    :   stop(false)
     {
         init(ParallelOptions().numThreads(n));
     }
@@ -195,7 +186,7 @@ class ThreadPool
      * If the task throws an exception, it will be raised on the call to get().
      */
     template<class F>
-    std::future<typename std::result_of<F(int)>::type>  enqueueReturning(F&& f) ;
+    auto enqueueReturning(F&& f) -> threading::future<decltype(f(0))>;
 
     /**
      * Enqueue function for tasks without return value.
@@ -203,14 +194,14 @@ class ThreadPool
      * some compilers fail on <tt>std::result_of<F(int)>::type</tt> for void(int) functions.
      */
     template<class F>
-    std::future<void> enqueue(F&& f) ;
+    threading::future<void> enqueue(F&& f) ;
 
     /**
      * Block until all tasks are finished.
      */
     void waitFinished()
     {
-        std::unique_lock<std::mutex> lock(queue_mutex);
+        threading::unique_lock<threading::mutex> lock(queue_mutex);
         finish_condition.wait(lock, [this](){ return tasks.empty() && (busy == 0); });
     }
 
@@ -228,21 +219,24 @@ private:
     void init(const ParallelOptions & options);
 
     // need to keep track of threads so we can join them
-    std::vector<std::thread> workers;
+    std::vector<threading::thread> workers;
 
     // the task queue
     std::queue<std::function<void(int)> > tasks;
 
     // synchronization
-    std::mutex queue_mutex;
-    std::condition_variable worker_condition;
-    std::condition_variable finish_condition;
+    threading::mutex queue_mutex;
+    threading::condition_variable worker_condition;
+    threading::condition_variable finish_condition;
     bool stop;
-    std::atomic<unsigned int> busy, processed;
+    threading::atomic_long busy, processed;
 };
 
 inline void ThreadPool::init(const ParallelOptions & options)
 {
+    busy.store(0);
+    processed.store(0);
+
     const size_t actualNThreads = options.getNumThreads();
     for(size_t ti = 0; ti<actualNThreads; ++ti)
     {
@@ -253,7 +247,7 @@ inline void ThreadPool::init(const ParallelOptions & options)
                 {
                     std::function<void(int)> task;
                     {
-                        std::unique_lock<std::mutex> lock(this->queue_mutex);
+                        threading::unique_lock<threading::mutex> lock(this->queue_mutex);
 
                         // will wait if : stop == false  AND queue is empty
                         // if stop == true AND queue is empty thread function will return later
@@ -286,27 +280,27 @@ inline void ThreadPool::init(const ParallelOptions & options)
 inline ThreadPool::~ThreadPool()
 {
     {
-        std::unique_lock<std::mutex> lock(queue_mutex);
+        threading::unique_lock<threading::mutex> lock(queue_mutex);
         stop = true;
     }
     worker_condition.notify_all();
-    for(std::thread &worker: workers)
+    for(threading::thread &worker: workers)
         worker.join();
 }
 
 template<class F>
-inline std::future<typename std::result_of<F(int)>::type>
-ThreadPool::enqueueReturning(F&& f)
+inline auto
+ThreadPool::enqueueReturning(F&& f) -> threading::future<decltype(f(0))>
 {
-    typedef typename std::result_of<F(int)>::type result_type;
-    typedef std::packaged_task<result_type(int)> PackageType;
+    typedef decltype(f(0)) result_type;
+    typedef threading::packaged_task<result_type(int)> PackageType;
 
     auto task = std::make_shared<PackageType>(f);
     auto res = task->get_future();
 
     if(workers.size()>0){
         {
-            std::unique_lock<std::mutex> lock(queue_mutex);
+            threading::unique_lock<threading::mutex> lock(queue_mutex);
 
             // don't allow enqueueing after stopping the pool
             if(stop)
@@ -315,7 +309,7 @@ ThreadPool::enqueueReturning(F&& f)
             tasks.emplace(
                 [task](int tid)
                 {
-                    (*task)(tid);
+                    (*task)(std::move(tid));
                 }
             );
         }
@@ -329,32 +323,54 @@ ThreadPool::enqueueReturning(F&& f)
 }
 
 template<class F>
-inline std::future<void>
+inline threading::future<void>
 ThreadPool::enqueue(F&& f)
 {
-    typedef std::packaged_task<void(int)> PackageType;
-
+#if defined(USE_BOOST_THREAD) && \
+    !defined(BOOST_THREAD_PROVIDES_VARIADIC_THREAD)
+    // Without variadic templates, boost:thread::packaged_task only
+    // supports the signature 'R()' (functions with no arguments).
+    // We bind the thread_id parameter to 0, so this parameter
+    // must NOT be used in function f (fortunately, this is the case
+    // for the blockwise versions of convolution, labeling and
+    // watersheds).
+    typedef threading::packaged_task<void()> PackageType;
+    auto task = std::make_shared<PackageType>(std::bind(f, 0));
+#else
+    typedef threading::packaged_task<void(int)> PackageType;
     auto task = std::make_shared<PackageType>(f);
+#endif
+
     auto res = task->get_future();
     if(workers.size()>0){
         {
-            std::unique_lock<std::mutex> lock(queue_mutex);
+            threading::unique_lock<threading::mutex> lock(queue_mutex);
 
             // don't allow enqueueing after stopping the pool
             if(stop)
                 throw std::runtime_error("enqueue on stopped ThreadPool");
 
             tasks.emplace(
-                [task](int tid)
-                {
-                    (*task)(tid);
-                }
+               [task](int tid)
+               {
+#if defined(USE_BOOST_THREAD) && \
+    !defined(BOOST_THREAD_PROVIDES_VARIADIC_THREAD)
+                    (*task)();
+#else
+                    (*task)(std::move(tid));
+#endif
+               }
             );
         }
         worker_condition.notify_one();
     }
     else{
+#if defined(USE_BOOST_THREAD) && \
+    !defined(BOOST_THREAD_PROVIDES_VARIADIC_THREAD)
+        (*task)();
+#else
         (*task)(0);
+#endif
     }
     return res;
 }
@@ -380,7 +396,7 @@ inline void parallel_foreach_impl(
     const float workPerThread = float(workload)/pool.nThreads();
     const std::ptrdiff_t chunkedWorkPerThread = std::max<std::ptrdiff_t>(roundi(workPerThread/3.0), 1);
 
-    std::vector<std::future<void> > futures;
+    std::vector<threading::future<void> > futures;
     for( ;iter<end; iter+=chunkedWorkPerThread)
     {
         const size_t lc = std::min(workload, chunkedWorkPerThread);
@@ -421,7 +437,7 @@ inline void parallel_foreach_impl(
     const float workPerThread = float(workload)/pool.nThreads();
     const std::ptrdiff_t chunkedWorkPerThread = std::max<std::ptrdiff_t>(roundi(workPerThread/3.0), 1);
 
-    std::vector<std::future<void> > futures;
+    std::vector<threading::future<void> > futures;
     for(;;)
     {
         const size_t lc = std::min(chunkedWorkPerThread, workload);
@@ -468,7 +484,7 @@ inline void parallel_foreach_impl(
     std::input_iterator_tag
 ){
     size_t num_items = 0;
-    std::vector<std::future<void> > futures;
+    std::vector<threading::future<void> > futures;
     for (; iter != end; ++iter)
     {
         auto item = *iter;
diff --git a/src/impex/CMakeLists.txt b/src/impex/CMakeLists.txt
index da9a76c..c3516d7 100644
--- a/src/impex/CMakeLists.txt
+++ b/src/impex/CMakeLists.txt
@@ -32,7 +32,7 @@ IF (MSVC OR MINGW)
     IF(NOT VIGRA_STATIC_LIB)
         ADD_DEFINITIONS(-DVIGRA_DLL)
     ENDIF()
-ELSEIF(CMAKE_COMPILER_IS_GNUCXX)
+ELSEIF(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX)
     IF(VIGRA_STATIC_LIB)
         ADD_DEFINITIONS(-fPIC -DPIC)
     ENDIF()
@@ -66,11 +66,11 @@ ADD_LIBRARY(vigraimpex ${LIBTYPE}
 
 set(SOVERSION 6)  # increment this after changing the vigraimpex library
 IF(MACOSX)
-    SET_TARGET_PROPERTIES(vigraimpex PROPERTIES VERSION ${SOVERSION}.${vigra_version} 
+    SET_TARGET_PROPERTIES(vigraimpex PROPERTIES VERSION ${SOVERSION}.${vigra_version}
                          SOVERSION ${SOVERSION} INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}")
 ELSE()
     SET_TARGET_PROPERTIES(vigraimpex PROPERTIES VERSION ${SOVERSION}.${vigra_version} SOVERSION ${SOVERSION})
-ENDIF() 
+ENDIF()
 
 IF(JPEG_FOUND)
   TARGET_LINK_LIBRARIES(vigraimpex ${JPEG_LIBRARIES})
@@ -99,6 +99,6 @@ ENDIF(ZLIB_FOUND)
 
 INSTALL(TARGETS vigraimpex
         EXPORT vigra-targets
-        RUNTIME DESTINATION bin 
-        LIBRARY DESTINATION lib${LIB_SUFFIX} 
+        RUNTIME DESTINATION bin
+        LIBRARY DESTINATION lib${LIB_SUFFIX}
         ARCHIVE DESTINATION lib${LIB_SUFFIX})
diff --git a/src/impex/viff.cxx b/src/impex/viff.cxx
index 03dbbf1..01df292 100644
--- a/src/impex/viff.cxx
+++ b/src/impex/viff.cxx
@@ -29,7 +29,7 @@
 /*    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,      */
 /*    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING      */
 /*    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR     */
-/*    OTHER DEALINGS IN THE SOFTWARE.                                   */                
+/*    OTHER DEALINGS IN THE SOFTWARE.                                   */
 /*                                                                      */
 /************************************************************************/
 
@@ -72,7 +72,7 @@
 /* definitions for data storage type,
    unsigned long data_storage_type; */
 #define VFF_TYP_BIT     0   /* pixels are on or off (binary image)*/
-                                        /* Note: This is an X11 XBitmap 
+                                        /* Note: This is an X11 XBitmap
                        with bits packed into a byte and
                        padded to a byte */
 #define VFF_TYP_1_BYTE      1   /* pixels are byte (unsigned char) */
@@ -114,13 +114,13 @@
                        by groups of maps_per_cycle, allowing
                        "rotating the color map" */
 #define VFF_MS_SHARED       3   /* All data band share the same map */
-#define VFF_MS_GROUP        4   /* All data bands are "grouped" 
+#define VFF_MS_GROUP        4   /* All data bands are "grouped"
                        together to point into one map */
 /* definitions for enabling the map,
    unsigned long map_enable; */
 #define VFF_MAP_OPTIONAL    1   /* The data is valid without being
                        sent thru the color map. If a
-                       map is defined, the data may 
+                       map is defined, the data may
                        optionally be sent thru it. */
 #define VFF_MAP_FORCE       2   /* The data MUST be sent thru the map
                        to be interpreted */
@@ -138,8 +138,8 @@
     HSV:  hue, saturation, value
     IHS:  intensity, hue, saturation
     XYZ:
-    UVW:  
-    SOW:  
+    UVW:
+    SOW:
     Lab:
     Luv:
 
@@ -176,7 +176,7 @@
                         or the vectors is explicit */
 
 namespace vigra {
-    
+
     template< class T1, class T2 >
     class colormap
     {
@@ -262,7 +262,7 @@ namespace vigra {
     {
         vigra_precondition(src_bands == 1u,
                "map_multiband(): Source image must have one band.");
-        
+
         typedef colormap< storage_type, map_storage_type > colormap_type;
         const unsigned int num_pixels = src_width * src_height;
 
@@ -321,7 +321,7 @@ namespace vigra {
 
         desc.bandNumbers.resize(1);
         desc.bandNumbers[0] = 0;
-        
+
         return desc;
     }
 
@@ -426,7 +426,7 @@ namespace vigra {
             read_field( stream, bo, map_row_size );
             read_field( stream, bo, map_col_size );
         }
-            
+
         // seek behind the header. (skip colorspace and pointers)
         stream.seekg( 1024, std::ios::beg );
     }
@@ -443,7 +443,7 @@ namespace vigra {
 
         // magic number
         stream.put((VIGRA_STREAM_CHAR_TYPE)0xAB);
-            
+
         // file type
         stream.put((VIGRA_STREAM_CHAR_TYPE)0x01);
 
@@ -459,7 +459,7 @@ namespace vigra {
             bo.set("big endian" );
             stream.put(VFF_DEP_BIGENDIAN);
         }
-        else 
+        else
         {
             bo.set("little endian" );
             stream.put(VFF_DEP_LITENDIAN);
@@ -537,7 +537,7 @@ namespace vigra {
 #else
         std::ifstream stream( filename.c_str() );
 #endif
-        
+
         if(!stream.good())
         {
             std::string msg("Unable to open file '");
@@ -634,7 +634,7 @@ namespace vigra {
     void ViffDecoderImpl::color_map()
     {
         void_vector_base temp_bands;
-        unsigned int temp_num_bands;
+        unsigned int temp_num_bands = 0;
 
         if ( header.map_storage_type == VFF_MAPTYP_1_BYTE ) {
             typedef UInt8 map_storage_type;
@@ -794,7 +794,7 @@ namespace vigra {
                                components, width, height,
                                static_cast< const maps_type & >(maps),
                                num_maps, map_width, map_height );
- 
+
             } else
                 vigra_precondition( false, "storage type unsupported" );
 
@@ -905,9 +905,9 @@ namespace vigra {
 
         ViffEncoderImpl( const std::string & filename )
 #ifdef VIGRA_NEED_BIN_STREAMS
-            : stream( filename.c_str(), std::ios::binary ), 
+            : stream( filename.c_str(), std::ios::binary ),
 #else
-            : stream( filename.c_str() ), 
+            : stream( filename.c_str() ),
 #endif
               bo( "big endian" ),
               pixelType("undefined"), current_scanline(0), finalized(false)
diff --git a/test/blockwisealgorithms/CMakeLists.txt b/test/blockwisealgorithms/CMakeLists.txt
index c2a751d..ded6753 100644
--- a/test/blockwisealgorithms/CMakeLists.txt
+++ b/test/blockwisealgorithms/CMakeLists.txt
@@ -1,14 +1,12 @@
 VIGRA_CONFIGURE_THREADING()
-if(NOT THREADING_FOUND)
-    MESSAGE(STATUS "** WARNING: Your compiler does not support C++ threading.")
-    MESSAGE(STATUS "**          test_blockwiselabeling will not be executed on this platform.")
-    if(NOT WITH_BOOST_THREAD)
-        MESSAGE(STATUS "**          Try to run cmake with '-DWITH_BOOST_THREAD=1' to use boost threading.")
-    endif()
-    
+
+if(THREADING_FOUND)
+    VIGRA_ADD_TEST(test_blockwiselabeling test_labeling.cxx LIBRARIES ${THREADING_LIBRARIES})
+    VIGRA_ADD_TEST(test_blockwisewatersheds test_watersheds.cxx LIBRARIES ${THREADING_LIBRARIES})
+    VIGRA_ADD_TEST(test_blockwiseconvolution test_convolution.cxx LIBRARIES ${THREADING_LIBRARIES})
 else()
-    SET(MULTIARRAY_CHUNKED_LIBRARIES ${THREADING_LIBRARIES})
-    VIGRA_ADD_TEST(test_blockwiselabeling test_labeling.cxx LIBRARIES ${MULTIARRAY_CHUNKED_LIBRARIES})
-    VIGRA_ADD_TEST(test_blockwisewatersheds test_watersheds.cxx LIBRARIES ${MULTIARRAY_CHUNKED_LIBRARIES})
-    VIGRA_ADD_TEST(test_blockwiseconvolution test_convolution.cxx LIBRARIES ${MULTIARRAY_CHUNKED_LIBRARIES})
+    MESSAGE(STATUS "** WARNING: No threading implementation found.")
+    MESSAGE(STATUS "**          test_blockwiselabeling will not be executed on this platform.")
+    MESSAGE(STATUS "**          test_blockwisewatersheds will not be executed on this platform.")
+    MESSAGE(STATUS "**          test_blockwiseconvolution will not be executed on this platform.")
 endif()
\ No newline at end of file
diff --git a/test/checkUnitTests.py b/test/checkUnitTests.py
index 9b46c53..6c22613 100644
--- a/test/checkUnitTests.py
+++ b/test/checkUnitTests.py
@@ -3,7 +3,7 @@ def hook(ui, repo, **args):
     repoPath = repo.url()[5:] + '/' # cut-off 'file:' prefix
     testSuccessFile = repoPath + 'test/testSuccess'
     if not os.path.exists(testSuccessFile):
-        print "File 'test/testSuccess' is missing. Run the test suite before committing."
+        print("File 'test/testSuccess' is missing. Run the test suite before committing.")
         return True
     testTime = os.path.getmtime(testSuccessFile)
     stat = repo.status()
@@ -16,9 +16,9 @@ def hook(ui, repo, **args):
         if fileTime > testTime:
             modified.append(file)
     if len(modified) > 0:
-        print "Run the test suite before committing. The following files are untested:" 
+        print("Run the test suite before committing. The following files are untested:")
         for file in modified:
-            print '   ',file
+            print('   ',file)
         return True
     return False
     
diff --git a/test/correlation/CMakeLists.txt b/test/correlation/CMakeLists.txt
index 8c515b0..628169b 100644
--- a/test/correlation/CMakeLists.txt
+++ b/test/correlation/CMakeLists.txt
@@ -2,8 +2,8 @@ if(FFTW3_FOUND)
     INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
 
     VIGRA_CONFIGURE_THREADING()
-      
+
     VIGRA_ADD_TEST(test_correlation test.cxx LIBRARIES ${FFTW3_LIBRARIES} ${FFTW3F_LIBRARIES} ${THREADING_LIBRARIES})
 else()
-    MESSAGE(STATUS "** WARNING: test_correlation will not be executed")
+    MESSAGE(STATUS "** WARNING: fftw not found. test_correlation will not be executed")
 endif()
diff --git a/test/multiarray/test_chunked.cxx b/test/multiarray/test_chunked.cxx
index 236b4d2..7adf241 100644
--- a/test/multiarray/test_chunked.cxx
+++ b/test/multiarray/test_chunked.cxx
@@ -29,10 +29,11 @@
 /*    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,      */
 /*    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING      */
 /*    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR     */
-/*    OTHER DEALINGS IN THE SOFTWARE.                                   */                
+/*    OTHER DEALINGS IN THE SOFTWARE.                                   */
 /*                                                                      */
 /************************************************************************/
 
+#include <functional>
 #include <stdio.h>
 
 #include "vigra/unittest.hxx"
@@ -70,15 +71,15 @@ public:
     typedef ChunkedArray<3, T> BaseArray;
     typedef VIGRA_UNIQUE_PTR<BaseArray> ArrayPtr;
     typedef typename BaseArray::iterator Iterator;
-    
-    static const int channelCount = NumericTraits<T>::isScalar::value 
+
+    static const int channelCount = NumericTraits<T>::isScalar::value
                                        ? 1
                                        : 3;
-    
+
     Shape3 shape, chunk_shape;
     ArrayPtr empty_array, array;
     PlainArray ref;
-    
+
     static const int fill_value = 42;
 
     ChunkedMultiArrayTest ()
@@ -92,92 +93,92 @@ public:
         array = createArray(shape, chunk_shape, (Array *)0);
         linearSequence(array->begin(), array->end());
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 Shape3 const & chunk_shape,
                                 ChunkedArrayFull<3, T> *,
                                 std::string const & = "chunked_test.h5")
     {
         return ArrayPtr(new ChunkedArrayFull<3, T>(shape, ChunkedArrayOptions().fillValue(fill_value)));
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 Shape3 const & chunk_shape,
                                 ChunkedArrayLazy<3, T> *,
                                 std::string const & = "chunked_test.h5")
     {
-        return ArrayPtr(new ChunkedArrayLazy<3, T>(shape, chunk_shape, 
+        return ArrayPtr(new ChunkedArrayLazy<3, T>(shape, chunk_shape,
                                                    ChunkedArrayOptions().fillValue(fill_value)));
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 Shape3 const & chunk_shape,
                                 ChunkedArrayCompressed<3, T> *,
                                 std::string const & = "chunked_test.h5")
     {
-        return ArrayPtr(new ChunkedArrayCompressed<3, T>(shape, chunk_shape, 
+        return ArrayPtr(new ChunkedArrayCompressed<3, T>(shape, chunk_shape,
                                                          ChunkedArrayOptions().fillValue(fill_value)
                                                                               .compression(LZ4)));
     }
-    
+
 #ifdef HasHDF5
-    static ArrayPtr createArray(Shape3 const & shape, 
+    static ArrayPtr createArray(Shape3 const & shape,
                                 Shape3 const & chunk_shape,
                                 ChunkedArrayHDF5<3, T> *,
                                 std::string const & name = "chunked_test.h5")
     {
         HDF5File hdf5_file(name, HDF5File::New);
-        return ArrayPtr(new ChunkedArrayHDF5<3, T>(hdf5_file, "test", HDF5File::New, 
-                                                   shape, chunk_shape, 
+        return ArrayPtr(new ChunkedArrayHDF5<3, T>(hdf5_file, "test", HDF5File::New,
+                                                   shape, chunk_shape,
                                                    ChunkedArrayOptions().fillValue(fill_value)));
     }
 #endif
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 Shape3 const & chunk_shape,
                                 ChunkedArrayTmpFile<3, T> *,
                                 std::string const & = "chunked_test.h5")
     {
-        return ArrayPtr(new ChunkedArrayTmpFile<3, T>(shape, chunk_shape, 
+        return ArrayPtr(new ChunkedArrayTmpFile<3, T>(shape, chunk_shape,
                                                       ChunkedArrayOptions().fillValue(fill_value), ""));
     }
-    
+
     void test_construction ()
     {
         bool isFullArray = IsSameType<Array, ChunkedArrayFull<3, T> >::value;
-        
+
         should(array->isInside(Shape3(1,2,3)));
         should(!array->isInside(Shape3(1,23,3)));
         should(!array->isInside(Shape3(1,2,-3)));
-        
+
         shouldEqual(array->shape(), ref.shape());
         shouldEqual(array->shape(0), ref.shape(0));
         shouldEqual(array->shape(1), ref.shape(1));
         shouldEqual(array->shape(2), ref.shape(2));
-        
+
         if(isFullArray)
             shouldEqual(array->chunkArrayShape(), Shape3(1));
         else
             shouldEqual(array->chunkArrayShape(), Shape3(3));
-        
+
         shouldEqualSequence(array->begin(), array->end(), ref.begin());
         shouldEqualSequence(array->cbegin(), array->cend(), ref.begin());
-        
+
         should(*array == ref);
         should(*array != ref.subarray(Shape3(1),ref.shape()));
-        
+
         shouldEqual(array->getItem(Shape3(1,8,17)), ref[Shape3(1,8,17)]);
-        
+
         ref[ref.size()-1] = ref[ref.size()-1] + T(1);
         should(*array != ref);
         array->setItem(ref.shape()-Shape3(1), ref[ref.size()-1]);
         should(*array == ref);
-        
+
         if(isFullArray)
             shouldEqual(empty_array->dataBytes(), ref.size()*sizeof(T));
         else
             shouldEqual(empty_array->dataBytes(), 0);
-            
+
         PlainArray empty(shape, T(fill_value));
         // const_iterator should simply use the fill_value_chunk_
         shouldEqualSequence(empty_array->cbegin(), empty_array->cend(), empty.begin());
@@ -185,14 +186,14 @@ public:
             shouldEqual(empty_array->dataBytes(), ref.size()*sizeof(T));
         else
             shouldEqual(empty_array->dataBytes(), 0);
-            
+
         // non-const iterator should allocate the array and initialize with fill_value_
         shouldEqualSequence(empty_array->begin(), empty_array->end(), empty.begin());
         if(IsSameType<Array, ChunkedArrayTmpFile<3, T> >::value)
             should(empty_array->dataBytes() >= ref.size()*sizeof(T)); // must pad to a full memory page
         else
             shouldEqual(empty_array->dataBytes(), ref.size()*sizeof(T));
-        
+
         // make sure the central chunk is loaded, so that releaseChunks() will have an effect
         array->getItem(Shape3(10,10,10));
         int dataBytesBefore = array->dataBytes();
@@ -208,7 +209,7 @@ public:
         shouldEqualSequence(array->cbegin(), array->cend(), ref.begin());
 
         // FIXME: test copy construction?
-        
+
         // should(array3 != array3.subarray(Shape(1,1,1), Shape(2,2,2)));
         // should(array3.subarray(Shape(0,0,0), Shape(10,1,1)) != array3.subarray(Shape(0,1,0), Shape(10,2,1)));
 
@@ -228,34 +229,34 @@ public:
     void test_assignment()
     {
         MultiArrayView <3, T, ChunkedArrayTag> v;
-        should(!v.hasData());    
-        
+        should(!v.hasData());
+
         v = array->subarray(Shape3(), ref.shape());
-        should(v.hasData());        
-        
+        should(v.hasData());
+
         MultiArrayView <3, T, ChunkedArrayTag> vc;
         should(!vc.hasData());
-        
+
         vc = v;
         should(vc.hasData());
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         vc = T(7);
         std::vector<T> v7ref(vc.size(), T(7));
         should(vc.hasData());
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), v7ref.begin());
         shouldEqualSequence(v.begin(), v.end(), v7ref.begin());
-        
+
         vc = ref;
         should(vc.hasData());
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         MultiArrayView <3, T, ChunkedArrayTag> vs(array->subarray(Shape3(), Shape3(4)));
         should(vs.hasData());
-        
+
         try
         {
             vc = vs;
@@ -267,47 +268,47 @@ public:
                         actual(e.what());
             shouldEqual(actual.substr(0, expected.size()), expected);
         }
-        
+
         vc += T(1);
         ref += T(1);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         vc += v;
         ref *= T(2);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-         
+
         vc += T(42);
         ref += T(42);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         vc -= T(42);
         ref -= T(42);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-       
+
         ref /= T(2);
         vc -= ref;
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         vc *= v;
         ref *= ref;
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-        
+
         vc *= T(4);
         ref *= T(4);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-         
+
         vc /= T(4);
         ref /= T(4);
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
-       
+
         vc /= PlainArray(ref.shape(), T(1));
         shouldEqual(vc.shape(), ref.shape());
         shouldEqualSequence(vc.begin(), vc.end(), ref.begin());
@@ -318,69 +319,69 @@ public:
         MultiArrayView <2, T, ChunkedArrayTag> v = array->bindAt (1, 4);
         MultiArrayView <2, T, ChunkedArrayTag> vv = array->template bind<1>(4);
         MultiArrayView <2, T, StridedArrayTag> vr = ref.bindAt (1, 4);
-        
+
         shouldEqual(v.shape(), vr.shape());
         shouldEqual(vv.shape(), vr.shape());
         should(v == vr);
         should(vv == vr);
         shouldEqualSequence(v.begin(), v.end(), vr.begin());
         shouldEqualIndexing(2, v, vr);
-        
+
         MultiArrayView <2, T, ChunkedArrayTag> vt = v.transpose();
         MultiArrayView <2, T, StridedArrayTag> vtr = vr.transpose();
-        
+
         shouldEqual(vt.shape(), vtr.shape());
         should(vt == vtr);
         shouldEqualSequence(vt.begin(), vt.end(), vtr.begin());
         shouldEqualIndexing(2, vt, vtr);
-        
+
         MultiArrayView <1, T, ChunkedArrayTag> v1 = v.bindAt (0, 11);
         MultiArrayView <1, T, StridedArrayTag> v1r = vr.bindAt (0, 11);
-        
+
         shouldEqual(v1.shape(), v1r.shape());
         should(v1 == v1r);
         shouldEqualSequence(v1.begin(), v1.end(), v1r.begin());
         shouldEqualIndexing(1, v1, v1r);
-        
+
         MultiArrayView <1, T, ChunkedArrayTag> v1t = v1.transpose();
-        
+
         shouldEqual(v1t.shape(), v1r.shape());
         should(v1t == v1r);
         shouldEqualSequence(v1t.begin(), v1t.end(), v1r.begin());
         shouldEqualIndexing(1, v1t, v1r);
     }
-    
+
     void test_bindInner ()
     {
         MultiArrayView <2, T, ChunkedArrayTag> v = array->bindInner(2);
         MultiArrayView <2, T, StridedArrayTag> vr = ref.bindInner(2);
         shouldEqual(v.shape(), vr.shape());
         should(v == vr);
-        
+
         TinyVector <int, 2> inner_indices (2, 5);
         MultiArrayView <1, T, ChunkedArrayTag> v1 = array->bindInner(inner_indices);
         MultiArrayView <1, T, StridedArrayTag> v1r = ref.bindInner(inner_indices);
         shouldEqual(v1.shape(), v1r.shape());
         should(v1 == v1r);
-        
+
         MultiArrayView <1, T, ChunkedArrayTag> v21 = v.bindInner(5);
         shouldEqual(v21.shape(), v1r.shape());
         should(v21 == v1r);
     }
-    
+
     void test_bindOuter ()
     {
         MultiArrayView <2, T, ChunkedArrayTag> v = array->bindOuter(2);
         MultiArrayView <2, T, StridedArrayTag> vr = ref.bindOuter(2);
         shouldEqual(v.shape(), vr.shape());
         should(v == vr);
-        
+
         TinyVector <int, 2> inner_indices (5, 2);
         MultiArrayView <1, T, ChunkedArrayTag> v1 = array->bindOuter(inner_indices);
         MultiArrayView <1, T, StridedArrayTag> v1r = ref.bindOuter(inner_indices);
         shouldEqual(v1.shape(), v1r.shape());
         should(v1 == v1r);
-        
+
         MultiArrayView <1, T, ChunkedArrayTag> v21 = v.bindOuter(5);
         shouldEqual(v21.shape(), v1r.shape());
         should(v21 == v1r);
@@ -391,21 +392,21 @@ public:
         {
             Shape3 start, stop(ref.shape());  // empty array
             bool isFullArray = IsSameType<Array, ChunkedArrayFull<3, T> >::value;
-        
+
             MultiArrayView <3, T const, ChunkedArrayTag> vc(empty_array->const_subarray(start, stop));
 
             MultiArray <3, T> c(stop-start);
             empty_array->checkoutSubarray(start, c);
-            
+
             if(isFullArray)
                 shouldEqual(empty_array->dataBytes(), ref.size()*sizeof(T));
             else
                 shouldEqual(empty_array->dataBytes(), 0);
-                
+
             PlainArray empty(shape, T(fill_value));
             shouldEqualSequence(vc.begin(), vc.end(), empty.begin());
             shouldEqualSequence(c.begin(), c.end(), empty.begin());
-            
+
             MultiArrayView <3, T, ChunkedArrayTag> v(empty_array->subarray(start, stop));
             if(IsSameType<Array, ChunkedArrayTmpFile<3, T> >::value)
                 should(empty_array->dataBytes() >= ref.size()*sizeof(T)); // must pad to a full memory page
@@ -413,7 +414,7 @@ public:
                 shouldEqual(empty_array->dataBytes(), ref.size()*sizeof(T));
             shouldEqualSequence(v.begin(), v.end(), empty.begin());
         }
-        
+
         {
             Shape3 start, stop(ref.shape());  // whole array
             MultiArrayView <3, T, ChunkedArrayTag> v(array->subarray(start, stop));
@@ -421,26 +422,26 @@ public:
 
             MultiArray <3, T> c(stop-start);
             array->checkoutSubarray(start, c);
-            
+
             MultiArrayView <3, T, StridedArrayTag> vr = ref.subarray(start, stop);
             MultiArrayView <3, T, StridedArrayTag> vtr = vr.transpose();
-            
+
             shouldEqual(v.shape(), vr.shape());
             should(v == vr);
             shouldEqualSequence(v.begin(), v.end(), vr.begin());
             shouldEqualIndexing(3, v, vr);
-            
+
             shouldEqual(vt.shape(), vtr.shape());
             should(vt == vtr);
             shouldEqualSequence(vt.begin(), vt.end(), vtr.begin());
             shouldEqualIndexing(3, vt, vtr);
-            
+
             shouldEqual(c.shape(), vr.shape());
             should(c == vr);
             shouldEqualSequence(c.begin(), c.end(), vr.begin());
             shouldEqualIndexing(3, c, vr);
         }
-        
+
         {
             Shape3 start(3,2,1), stop(4,5,6);  // single chunk
             MultiArrayView <3, T, ChunkedArrayTag> v(array->subarray(start, stop));
@@ -448,48 +449,48 @@ public:
 
             MultiArray <3, T> c(stop-start);
             array->checkoutSubarray(start, c);
-            
+
             MultiArrayView <3, T, StridedArrayTag> vr = ref.subarray(start, stop);
             MultiArrayView <3, T, StridedArrayTag> vtr = vr.transpose();
-            
+
             shouldEqual(v.shape(), vr.shape());
             should(v == vr);
             shouldEqualSequence(v.begin(), v.end(), vr.begin());
             shouldEqualIndexing(3, v, vr);
-            
+
             shouldEqual(vt.shape(), vtr.shape());
             should(vt == vtr);
             shouldEqualSequence(vt.begin(), vt.end(), vtr.begin());
             shouldEqualIndexing(3, vt, vtr);
-            
+
             shouldEqual(c.shape(), vr.shape());
             should(c == vr);
             shouldEqualSequence(c.begin(), c.end(), vr.begin());
             shouldEqualIndexing(3, c, vr);
         }
-        
+
         {
             Shape3 start(7,6,5), stop(9,10,11); // across chunk borders
             MultiArrayView <3, T, ChunkedArrayTag> v(array->subarray(start, stop));
             MultiArrayView <3, T, ChunkedArrayTag> vt(v.transpose());
-            
+
             MultiArray <3, T> c(stop-start);
             array->checkoutSubarray(start, c);
-            
+
             MultiArrayView <3, T, StridedArrayTag> vr = ref.subarray(start, stop);
-            
+
             shouldEqual(v.shape(), vr.shape());
             should(v == vr);
             shouldEqualSequence(v.begin(), v.end(), vr.begin());
             shouldEqualIndexing(3, v, vr);
-            
+
             shouldEqual(c.shape(), vr.shape());
             should(c == vr);
             shouldEqualSequence(c.begin(), c.end(), vr.begin());
             shouldEqualIndexing(3, c, vr);
         }
     }
-    
+
     void test_iterator ()
     {
         Shape3 s(ref.shape());
@@ -503,7 +504,7 @@ public:
         should(i1.isValid() && !i1.atEnd());
         should(!iend.isValid() && iend.atEnd());
         should(iend.getEndIterator() == iend);
-        
+
         shouldEqual(i1.point(), *c);
         shouldEqual((i1+0).point(), c[0]);
         shouldEqual((i1+1).point(), c[1]);
@@ -553,7 +554,7 @@ public:
         shouldEqual(&iend[-2], &v[Shape3(18,20,21)]);
         shouldEqual(&iend[-10], &v[Shape3(10,20,21)]);
         shouldEqual(&iend[-s[0]-1], &v[Shape3(19,19,21)]);
-        
+
         Iterator i2;
         i2 = iend;
         should(i2 == iend);
@@ -594,16 +595,16 @@ public:
         Iterator i5 = array->begin();
         Iterator i6 = array->begin();
 
-        for (p[2]=0, i3.resetDim(2), i4.setDim(2, 0), i5.template dim<2>() = 0, i6.resetDim(2); 
-                i3.point(2) != s[2]; 
-                i3.incDim(2), i4.addDim(2, 1), ++i5.template dim<2>(), i6.template dim<2>() += 1, ++p[2]) 
+        for (p[2]=0, i3.resetDim(2), i4.setDim(2, 0), i5.template dim<2>() = 0, i6.resetDim(2);
+                i3.point(2) != s[2];
+                i3.incDim(2), i4.addDim(2, 1), ++i5.template dim<2>(), i6.template dim<2>() += 1, ++p[2])
         {
-            for (p[1]=0, i3.resetDim(1), i4.setDim(1, 0), i5.template dim<1>() = 0, i6.resetDim(1); 
-                    i3.point(1) != s[1]; 
-                    i3.incDim(1), i4.addDim(1, 1), ++i5.template dim<1>(), i6.template dim<1>() += 1, ++p[1]) 
+            for (p[1]=0, i3.resetDim(1), i4.setDim(1, 0), i5.template dim<1>() = 0, i6.resetDim(1);
+                    i3.point(1) != s[1];
+                    i3.incDim(1), i4.addDim(1, 1), ++i5.template dim<1>(), i6.template dim<1>() += 1, ++p[1])
             {
-                for (p[0]=0, i3.resetDim(0), i4.setDim(0, 0), i5.template dim<0>() = 0, i6.resetDim(0); 
-                        i3.point(0) != s[0]; 
+                for (p[0]=0, i3.resetDim(0), i4.setDim(0, 0), i5.template dim<0>() = 0, i6.resetDim(0);
+                        i3.point(0) != s[0];
                         i3.incDim(0), i4.addDim(0, 1), ++i5.template dim<0>(), i6.template dim<0>() += 1, ++p[0], ++i1, ++c, i2 += 1, ++count)
                 {
                     shouldEqual(&*i1, &v[p]);
@@ -698,15 +699,15 @@ public:
         shouldEqual(&*i1, &v[Shape3(19,20,21)]);
         shouldEqual(&*i2, &v[Shape3(19,20,21)]);
     }
-    
+
     void testChunkIterator()
     {
         Shape3 start(5,0,3), stop(shape[0], shape[1], shape[2]-3);
         MultiArrayView <3, T, ChunkedArrayTag> v(array->subarray(Shape3(), shape));
-        
+
         typename Array::chunk_iterator i = array->chunk_begin(start, stop),
                                        end = array->chunk_end(start, stop);
-        typename MultiArrayView <3, T, ChunkedArrayTag>::chunk_const_iterator 
+        typename MultiArrayView <3, T, ChunkedArrayTag>::chunk_const_iterator
                                        vi = v.chunk_cbegin(start, stop),
                                        vend = v.chunk_cend(start, stop);
         int count = -1;
@@ -714,14 +715,14 @@ public:
         {
             shouldEqual(i->data(), i[0].data());
             shouldEqual(i->data(), vi->data());
-            
+
             *i = T(count);
             ref.subarray(i.chunkStart(), i.chunkStop()) = T(count);
             should(*vi == ref.subarray(i.chunkStart(), i.chunkStop()));
         }
         should(vi == vend);
         shouldEqualSequence(array->cbegin(), array->cend(), ref.begin());
-        
+
         for(;;)
         {
             --i;
@@ -737,15 +738,16 @@ public:
         ref.subarray(start, stop) = T(fill_value);
         shouldEqualSequence(array->cbegin(), array->cend(), ref.begin());
     }
-    
-    static void testMultiThreadedRun(BaseArray * v, int startIndex, int d, int * go)
+
+    static void testMultiThreadedRun(BaseArray * v, int startIndex, int d,
+                                     threading::atomic_long * go)
     {
-        while(*go == 0)
+        while(go->load() == 0)
             threading::this_thread::yield();
-            
+
         Shape3 s = v->shape();
         int sliceSize = s[0]*s[1];
-        
+
         Iterator bi(v->begin());
         T count(startIndex*sliceSize), start((d-1)*sliceSize), inc(1);
         for(bi.setDim(2,startIndex); bi.coord(2) < s[2]; bi.addDim(2, d), count += start)
@@ -760,27 +762,28 @@ public:
     {
         array.reset(0); // close the file if backend is HDF5
         ArrayPtr a = createArray(Shape3(200, 201, 202), Shape3(), (Array *)0);
-    
-        int go = 0;
-        
-        threading::thread t1(testMultiThreadedRun, a.get(), 0, 4, &go);
-        threading::thread t2(testMultiThreadedRun, a.get(), 1, 4, &go);
-        threading::thread t3(testMultiThreadedRun, a.get(), 2, 4, &go);
-        threading::thread t4(testMultiThreadedRun, a.get(), 3, 4, &go);
-     
-        go = 1;
-     
+
+        threading::atomic_long go;
+        go.store(0);
+
+        threading::thread t1(std::bind(testMultiThreadedRun,a.get(),0,4,&go));
+        threading::thread t2(std::bind(testMultiThreadedRun,a.get(),1,4,&go));
+        threading::thread t3(std::bind(testMultiThreadedRun,a.get(),2,4,&go));
+        threading::thread t4(std::bind(testMultiThreadedRun,a.get(),3,4,&go));
+
+        go.store(1);
+
         t4.join();
         t3.join();
         t2.join();
         t1.join();
-        
+
         PlainArray ref(a->shape());
         linearSequence(ref.begin(), ref.end());
-        
+
         shouldEqualSequence(a->begin(), a->end(), ref.begin());
     }
-        
+
     // void testIsUnstrided()
     // {
         // typedef difference3_type Shape;
@@ -804,12 +807,12 @@ public:
     // void testMethods ()
     // {
         // shouldEqual(array3.squaredNorm(), 332833500);
-        
+
         // shouldEqual(array3.norm(), std::sqrt(332833500.0));
         // shouldEqual(array3.norm(0), 999.0);
         // shouldEqual(array3.norm(1), 499500.0);
         // shouldEqualTolerance(array3.norm(2, false), std::sqrt(332833500.0), 1e-14);
-        
+
         // difference3_type first(0,0,0), last(1,1,1);
         // shouldEqual(array3.subarray(first, last).norm(), 0.0);
         // shouldEqual(array3.subarray(first, last).norm(0), 0.0);
@@ -930,7 +933,7 @@ public:
     // void testCopy()
     // {
         // Image3D res(img.shape(), 1.0), res1(img.shape(), 1.0);
-        
+
         // copyMultiArray(srcMultiArrayRange(img), destMultiArray(res));
         // copyMultiArray(img, res1);
 
@@ -943,7 +946,7 @@ public:
         // Image3D res(img.shape());
 
         // copyMultiArray(img.subarray(Size3(0,0,0), Size3(5,1,1)), res);
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -956,7 +959,7 @@ public:
         // Image3D res(img.shape());
 
         // copyMultiArray(img.subarray(Size3(0,0,0), Size3(1,1,3)), res);
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -970,7 +973,7 @@ public:
         // transformMultiArray(srcMultiArrayRange(img), destMultiArray(res),
                             // Arg1() + Arg1());
         // transformMultiArray(img, res1, Arg1() + Arg1());
-        
+
         // using namespace multi_math;
         // should(all(2.0*img == res));
         // should(all(2.0*img == res1));
@@ -981,7 +984,7 @@ public:
         // Image3D res(img.shape());
         // transformMultiArray(img.subarray(Size3(0,0,0), Size3(5,1,1)), res,
                             // Arg1() + Arg1());
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -995,7 +998,7 @@ public:
 
         // transformMultiArray(img.subarray(Size3(0,0,0), Size3(1,1,3)), res,
                             // Arg1() + Arg1());
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -1008,7 +1011,7 @@ public:
         // Image3D res(Size3(5,1,1));
 
         // transformMultiArray(img, res, reduceFunctor(Arg1() + Arg2(), 0.0));
-        
+
         // int x,y,z;
         // for(x=0; x<img.shape(0); ++x)
         // {
@@ -1018,19 +1021,19 @@ public:
                     // sum += img(x,y,z);
             // shouldEqual(res(x,0,0), sum);
         // }
-        
+
         // Image1D res1(Size1(5));
         // MultiArrayView<3,PixelType> res3 = res1.insertSingletonDimension(1).insertSingletonDimension(2);
         // transformMultiArray(img, res3, FindSum<PixelType>());
-        // shouldEqualSequenceTolerance(res1.data(), res1.data()+5, res.data(), 1e-7);       
+        // shouldEqualSequenceTolerance(res1.data(), res1.data()+5, res.data(), 1e-7);
     // }
 
     // void testTransformInnerReduce()
     // {
         // Image3D res(Size3(1,1,3));
-        
+
         // transformMultiArray(img, res, reduceFunctor(Arg1() + Arg2(), 0.0));
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
         // {
@@ -1040,22 +1043,22 @@ public:
                     // sum += img(x,y,z);
             // shouldEqual(res(0,0,z), sum);
         // }
-        
+
         // Image1D res1(Size1(3));
         // MultiArrayView<3,PixelType> res3 = res1.insertSingletonDimension(0).insertSingletonDimension(0);
         // transformMultiArray(img, res3, FindSum<PixelType>());
-        // shouldEqualSequenceTolerance(res1.data(), res1.data()+3, res.data(), 1e-6);       
+        // shouldEqualSequenceTolerance(res1.data(), res1.data()+3, res.data(), 1e-6);
     // }
 
     // void testCombine2()
     // {
         // Image3D res(img.shape()), res1(img.shape());
-        
-        // combineTwoMultiArrays(srcMultiArrayRange(img), srcMultiArray(img), 
+
+        // combineTwoMultiArrays(srcMultiArrayRange(img), srcMultiArray(img),
                               // destMultiArray(res),
                               // Arg1() + Arg2());
         // combineTwoMultiArrays(img, img, res1, Arg1() + Arg2());
-        
+
         // using namespace multi_math;
         // should(all(2.0*img == res));
         // should(all(2.0*img == res1));
@@ -1064,9 +1067,9 @@ public:
     // void testCombine2OuterExpand()
     // {
         // Image3D res(img.shape());
-        
+
         // combineTwoMultiArrays(img.subarray(Size3(0,0,0), Size3(5,1,1)), img, res,
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -1074,16 +1077,16 @@ public:
                     // shouldEqual(res(x,y,z), 2.0*img(x,y,z) + img(x,0,0));
 
         // combineTwoMultiArrays(img, img.subarray(Size3(0,0,0), Size3(5,1,1)), res,
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
                 // for(x=0; x<img.shape(0); ++x)
                     // shouldEqual(res(x,y,z), img(x,y,z) + 2.0*img(x,0,0));
 
         // View3D view = img.subarray(Size3(0,0,0), Size3(5,1,1));
-        // combineTwoMultiArrays(srcMultiArrayRange(view), srcMultiArrayRange(view), 
+        // combineTwoMultiArrays(srcMultiArrayRange(view), srcMultiArrayRange(view),
                               // destMultiArrayRange(res),
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
                 // for(x=0; x<img.shape(0); ++x)
@@ -1093,10 +1096,10 @@ public:
     // void testCombine2InnerExpand()
     // {
         // Image3D res(img.shape());
-        
+
         // View3D view = img.subarray(Size3(0,0,0), Size3(1,1,3));
         // combineTwoMultiArrays(view, img, res,
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
@@ -1104,15 +1107,15 @@ public:
                     // shouldEqual(res(x,y,z), 2.0*img(x,y,z) + img(0,0,z));
 
         // combineTwoMultiArrays(img, view, res,
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
                 // for(x=0; x<img.shape(0); ++x)
                     // shouldEqual(res(x,y,z), img(x,y,z) + 2.0*img(0,0,z));
 
-        // combineTwoMultiArrays(srcMultiArrayRange(view), srcMultiArrayRange(view), 
+        // combineTwoMultiArrays(srcMultiArrayRange(view), srcMultiArrayRange(view),
                               // destMultiArrayRange(res),
-                              // Arg1() + Param(2.0)*Arg2());       
+                              // Arg1() + Param(2.0)*Arg2());
         // for(z=0; z<img.shape(2); ++z)
             // for(y=0; y<img.shape(1); ++y)
                 // for(x=0; x<img.shape(0); ++x)
@@ -1122,10 +1125,10 @@ public:
     // void testCombine2OuterReduce()
     // {
         // Image3D res(Size3(5,1,1));
-        
+
         // combineTwoMultiArrays(img, img, res,
                               // reduceFunctor(Arg1() + Arg2() + Arg3(), 0.0));
-        
+
         // int x,y,z;
         // for(x=0; x<img.shape(0); ++x)
         // {
@@ -1140,10 +1143,10 @@ public:
     // void testCombine2InnerReduce()
     // {
         // Image3D res(Size3(1,1,3));
-        
+
         // combineTwoMultiArrays(img, img, res,
                               // reduceFunctor(Arg1() + Arg2() + Arg3(), 0.0));
-        
+
         // int x,y,z;
         // for(z=0; z<img.shape(2); ++z)
         // {
@@ -1158,9 +1161,9 @@ public:
     // void testCombine3()
     // {
         // Image3D res(img.shape()), res1(img.shape());
-        
-        // combineThreeMultiArrays(srcMultiArrayRange(img), 
-                                // srcMultiArray(img), srcMultiArray(img), 
+
+        // combineThreeMultiArrays(srcMultiArrayRange(img),
+                                // srcMultiArray(img), srcMultiArray(img),
                                 // destMultiArray(res),
                                 // Arg1() + Arg2() + Arg3());
         // combineThreeMultiArrays(img, img, img, res1,
@@ -1175,12 +1178,12 @@ public:
                     // shouldEqual(res1(x,y,z), 3.0*img(x,y,z));
                 // }
     // }
-    
+
     // void testInitMultiArrayBorder(){
         // typedef vigra::MultiArray<1,int> IntLine;
         // typedef vigra::MultiArray<2,int> IntImage;
         // typedef vigra::MultiArray<3,int> IntVolume;
-        
+
         // const int desired_vol[] ={  0, 0, 0, 0, 0, 0,
                                     // 0, 0, 0, 0, 0, 0,
                                     // 0, 0, 0, 0, 0, 0,
@@ -1235,32 +1238,32 @@ public:
         // const int desired_vol2[] ={  0, 0,
                                      // 0, 0,
 
-                                     // 0, 0, 
+                                     // 0, 0,
                                      // 0, 0};
 
         // IntVolume vol(IntVolume::difference_type(6,6,6));
-        
+
         // for(IntVolume::iterator iter=vol.begin(); iter!=vol.end(); ++iter)
             // *iter=5;
         // initMultiArrayBorder(destMultiArrayRange(vol),2,0);
         // shouldEqualSequence(vol.begin(), vol.end(), desired_vol);
 
         // IntImage img(IntImage::difference_type(6,6));
-        
+
         // for(IntImage::iterator iter=img.begin(); iter!=img.end(); ++iter)
             // *iter=5;
         // initMultiArrayBorder(destMultiArrayRange(img),1,0);
         // shouldEqualSequence(img.begin(), img.end(), desired_img);
 
         // IntLine lin(IntLine::difference_type(7));
-        
+
         // for(IntLine::iterator iter=lin.begin(); iter!=lin.end(); ++iter)
             // *iter=5;
         // initMultiArrayBorder(destMultiArrayRange(lin),3,0);
         // shouldEqualSequence(lin.begin(), lin.end(), desired_lin);
 
         // IntVolume vol2(IntVolume::difference_type(2,2,2));
-        
+
         // for(IntVolume::iterator iter=vol2.begin(); iter!=vol2.end(); ++iter)
             // *iter=5;
         // initMultiArrayBorder(vol2, 9, 0);
@@ -1292,17 +1295,17 @@ public:
         // shouldEqual(stats[1].min, 1.1f);
         // shouldEqual(stats[1].max, 58.1f);
     // }
-    
+
     // void testTensorUtilities()
     // {
         // MultiArrayShape<2>::type shape(3,4);
         // int size = shape[0]*shape[1];
-        
+
         // MultiArray<2, TinyVector<double, 2> > vector(shape), rvector(shape);
         // MultiArray<2, TinyVector<double, 3> > tensor1(shape), tensor2(shape), rtensor(shape);
         // MultiArray<2, double > trace(shape), rtrace(shape);
         // MultiArray<2, double > determinant(shape), rdet(shape);
-        
+
         // for(int k=0; k<size; ++k)
         // {
             // for(int l=0; l<2; ++l)
@@ -1311,27 +1314,27 @@ public:
                 // tensor1[k][l] = randomMT19937().uniform();
             // rdet[k] = tensor1[k][0]*tensor1[k][2] - sq(tensor1[k][1]);
         // }
-        
+
         // vectorToTensor(srcImageRange(vector), destImage(rtensor));
         // vectorToTensorMultiArray(srcMultiArrayRange(vector), destMultiArray(tensor2));
         // shouldEqualSequence(tensor2.data(), tensor2.data()+size, rtensor.data());
         // tensor2.init(TinyVector<double, 3>());
         // vectorToTensorMultiArray(vector, tensor2);
         // shouldEqualSequence(tensor2.data(), tensor2.data()+size, rtensor.data());
-                
+
         // tensorTrace(srcImageRange(tensor1), destImage(rtrace));
         // tensorTraceMultiArray(srcMultiArrayRange(tensor1), destMultiArray(trace));
         // shouldEqualSequence(trace.data(), trace.data()+size, rtrace.data());
         // trace = 0;
         // tensorTraceMultiArray(tensor1, trace);
         // shouldEqualSequence(trace.data(), trace.data()+size, rtrace.data());
-                
+
         // tensorDeterminantMultiArray(srcMultiArrayRange(tensor1), destMultiArray(determinant));
         // shouldEqualSequence(determinant.data(), determinant.data()+size, rdet.data());
         // determinant = 0;
         // tensorDeterminantMultiArray(tensor1, determinant);
         // shouldEqualSequence(determinant.data(), determinant.data()+size, rdet.data());
-                
+
         // determinant = 1000.0;
         // tensorDeterminantMultiArray(srcMultiArrayRange(tensor2), destMultiArray(determinant));
         // shouldEqualTolerance(norm(determinant), 0.0, 1e-14);
@@ -1355,7 +1358,7 @@ public:
     typedef ChunkedArray<3, T> BaseArray;
     typedef VIGRA_UNIQUE_PTR<BaseArray> ArrayPtr;
     typedef typename BaseArray::iterator Iterator;
-    
+
     Shape3 shape;
     ArrayPtr array;
 
@@ -1364,44 +1367,44 @@ public:
     {
         array = createArray(shape, (Array *)0);
         linearSequence(array->begin(), array->end());
-        std::cerr << "chunked multi array test for type " << typeid(Array).name() << ": \n";        
+        std::cerr << "chunked multi array test for type " << typeid(Array).name() << ": \n";
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 ChunkedArrayFull<3, T> *)
     {
         return ArrayPtr(new ChunkedArrayFull<3, T>(shape));
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 ChunkedArrayLazy<3, T> *)
     {
         return ArrayPtr(new ChunkedArrayLazy<3, T>(shape));
     }
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 ChunkedArrayCompressed<3, T> *)
     {
         return ArrayPtr(new ChunkedArrayCompressed<3, T>(shape));
     }
-    
+
 #ifdef HasHDF5
-    static ArrayPtr createArray(Shape3 const & shape, 
+    static ArrayPtr createArray(Shape3 const & shape,
                                 ChunkedArrayHDF5<3, T> *)
     {
         HDF5File hdf5_file("chunked_test.h5", HDF5File::New);
-        return ArrayPtr(new ChunkedArrayHDF5<3, T>(hdf5_file, "test", HDF5File::New, 
-                                                   shape, Shape3(), 
+        return ArrayPtr(new ChunkedArrayHDF5<3, T>(hdf5_file, "test", HDF5File::New,
+                                                   shape, Shape3(),
                                                    ChunkedArrayOptions().compression(NO_COMPRESSION)));
     }
 #endif
-    
-    static ArrayPtr createArray(Shape3 const & shape, 
+
+    static ArrayPtr createArray(Shape3 const & shape,
                                 ChunkedArrayTmpFile<3, T> *)
     {
         return ArrayPtr(new ChunkedArrayTmpFile<3, T>(shape));
     }
-    
+
     void testBaselineSpeed()
     {
         std::cerr << "############ chunked iterator speed #############\n";
@@ -1421,7 +1424,7 @@ public:
         std::string t = TOCS;
         std::cerr << "    baseline:  " << t << "\n";
     }
-    
+
     void testIteratorSpeed()
     {
         Iterator i   = array->begin(),
@@ -1439,7 +1442,7 @@ public:
         std::string t = TOCS;
         std::cerr << "    read time: " << t << " (cache: " << array->cacheSize() << ")\n";
     }
-    
+
     void testNestedLoopSpeed()
     {
         Iterator i   = array->begin(),
@@ -1459,7 +1462,7 @@ public:
         std::string t = TOCS;
         std::cerr << "    loop time: " << t << " (cache: " << array->cacheSize() << ")\n";
     }
-    
+
     void testIteratorSpeed_LargeCache()
     {
         array.reset(0);
@@ -1468,7 +1471,7 @@ public:
         linearSequence(array->begin(), array->end());
         testIteratorSpeed();
     }
-    
+
     void testIndexingBaselineSpeed()
     {
         std::cerr << "################## indexing speed ####################\n";
@@ -1488,7 +1491,7 @@ public:
         std::string t = TOCS;
         std::cerr << "    baseline:  " << t << "\n";
     }
-    
+
     void testIndexingSpeed()
     {
         MultiArrayView<3, T, ChunkedArrayTag> sub(array->subarray(Shape3(), shape));
@@ -1525,7 +1528,7 @@ struct ChunkedMultiArrayTestSuite
         add( testCase( &ChunkedMultiArrayTest<Array>::testChunkIterator ) );
         add( testCase( &ChunkedMultiArrayTest<Array>::testMultiThreaded ) );
     }
-    
+
     template <class T>
     void testSpeedImpl()
     {
@@ -1541,7 +1544,7 @@ struct ChunkedMultiArrayTestSuite
         add( testCase( (&ChunkedMultiArraySpeedTest<ChunkedArrayHDF5<3, T> >::testIteratorSpeed_LargeCache )));
 #endif
     }
-    
+
     template <class T>
     void testIndexingSpeedImpl()
     {
@@ -1554,7 +1557,7 @@ struct ChunkedMultiArrayTestSuite
         add( testCase( (&ChunkedMultiArraySpeedTest<ChunkedArrayHDF5<3, T> >::testIndexingSpeed )));
 #endif
     }
-    
+
     ChunkedMultiArrayTestSuite()
     : vigra::test_suite("ChunkedMultiArrayTestSuite")
     {
@@ -1565,7 +1568,7 @@ struct ChunkedMultiArrayTestSuite
 #ifdef HasHDF5
         testImpl<ChunkedArrayHDF5<3, float> >();
 #endif
-        
+
         testImpl<ChunkedArrayFull<3, TinyVector<float, 3> > >();
         testImpl<ChunkedArrayLazy<3, TinyVector<float, 3> > >();
         testImpl<ChunkedArrayCompressed<3, TinyVector<float, 3> > >();
@@ -1573,15 +1576,15 @@ struct ChunkedMultiArrayTestSuite
 #ifdef HasHDF5
         testImpl<ChunkedArrayHDF5<3, TinyVector<float, 3> > >();
 #endif
-        
+
         testSpeedImpl<unsigned char>();
         testSpeedImpl<float>();
         testSpeedImpl<double>();
-        
+
         testIndexingSpeedImpl<unsigned char>();
         testIndexingSpeedImpl<float>();
         testIndexingSpeedImpl<double>();
-        
+
         //add( testCase( &MultiArrayPointoperatorsTest::testInit ) );
         //add( testCase( &MultiArrayPointoperatorsTest::testCopy ) );
         //add( testCase( &MultiArrayPointoperatorsTest::testCopyOuterExpansion ) );
@@ -1611,7 +1614,7 @@ int main(int argc, char ** argv)
     ChunkedMultiArrayTestSuite test0;
     failed += test0.run(vigra::testsToBeExecuted(argc, argv));
     std::cout << test0.report() << std::endl;
-    
+
     return (failed != 0);
 }
 
diff --git a/test/registration/CMakeLists.txt b/test/registration/CMakeLists.txt
index a89495b..989c560 100644
--- a/test/registration/CMakeLists.txt
+++ b/test/registration/CMakeLists.txt
@@ -6,7 +6,7 @@ if(FFTW3_FOUND)
 
     VIGRA_ADD_TEST(test_registration test.cxx LIBRARIES ${FFTW3_LIBRARIES} ${FFTW3F_LIBRARIES} ${THREADING_LIBRARIES} vigraimpex)
 else()
-    MESSAGE(STATUS "** WARNING: tests of correlation-based registration will not be executed")
+    MESSAGE(STATUS "** WARNING: fftw not found. Fourier-domain registration tests will not be executed")
 
     VIGRA_ADD_TEST(test_registration test.cxx LIBRARIES ${THREADING_LIBRARIES} vigraimpex)
 endif()
diff --git a/test/threadpool/CMakeLists.txt b/test/threadpool/CMakeLists.txt
index 2e9deb1..33c5236 100644
--- a/test/threadpool/CMakeLists.txt
+++ b/test/threadpool/CMakeLists.txt
@@ -1,2 +1,8 @@
-VIGRA_ADD_TEST(test_threadpool test.cxx)
+VIGRA_CONFIGURE_THREADING()
 
+if(THREADING_FOUND)
+    VIGRA_ADD_TEST(test_threadpool test.cxx LIBRARIES ${THREADING_LIBRARIES})
+else()
+    MESSAGE(STATUS "** WARNING: No threading implementation found.")
+    MESSAGE(STATUS "**          test_threadpool will not be executed on this platform.")
+endif()
diff --git a/test/threadpool/test.cxx b/test/threadpool/test.cxx
index dbf4234..a20fc62 100644
--- a/test/threadpool/test.cxx
+++ b/test/threadpool/test.cxx
@@ -32,7 +32,9 @@
 /*    OTHER DEALINGS IN THE SOFTWARE.                                   */
 /*                                                                      */
 /************************************************************************/
+
 #include <vigra/unittest.hxx>
+#include <vigra/threading.hxx>
 #include <vigra/threadpool.hxx>
 #include <vigra/timing.hxx>
 #include <numeric>
@@ -74,14 +76,14 @@ struct ThreadPoolTests
         std::string exception_string = "the test exception";
         std::vector<int> v(10000);
         ThreadPool pool(4);
-        std::vector<std::future<void> > futures;
+        std::vector<threading::future<void> > futures;
         for (size_t i = 0; i < v.size(); ++i)
         {
             futures.emplace_back(
                 pool.enqueue(
                     [&v, &exception_string, i](size_t thread_id)
                     {
-                        v[i] = thread_id;
+                        v[i] = 1;
                         if (i == 5000)
                             throw std::runtime_error(exception_string);
                     }
@@ -164,7 +166,7 @@ struct ThreadPoolTests
         );
 
         size_t const sum = std::accumulate(results.begin(), results.end(), 0);
-        should(sum == (n*(n-1))/2);
+        shouldEqual(sum, (n*(n-1))/2);
     }
 
     void test_parallel_foreach_sum_serial()
@@ -183,7 +185,7 @@ struct ThreadPoolTests
         );
 
         size_t const sum = std::accumulate(results.begin(), results.end(), 0);
-        should(sum == (n*(n-1))/2);
+        shouldEqual(sum, (n*(n-1))/2);
     }
 
     void test_parallel_foreach_sum_auto()
@@ -204,7 +206,7 @@ struct ThreadPoolTests
         );
 
         size_t const sum = std::accumulate(results.begin(), results.end(), 0);
-        should(sum == (n*(n-1))/2);
+        shouldEqual(sum, (n*(n-1))/2);
     }
 
     void test_parallel_foreach_timing()
@@ -227,7 +229,7 @@ struct ThreadPoolTests
         std::cout << "parallel_foreach took " << TOCS << std::endl;
 
         size_t const sum = std::accumulate(results.begin(), results.end(), 0);
-        should(sum == n);
+        shouldEqual(sum, n);
     }
 };
 
@@ -241,10 +243,13 @@ struct ThreadPoolTestSuite : public test_suite
         add(testCase(&ThreadPoolTests::test_threadpool_exception));
         add(testCase(&ThreadPoolTests::test_parallel_foreach));
         add(testCase(&ThreadPoolTests::test_parallel_foreach_exception));
-        add(testCase(&ThreadPoolTests::test_parallel_foreach_sum));
         add(testCase(&ThreadPoolTests::test_parallel_foreach_sum_serial));
+#if !defined(USE_BOOST_THREAD) || \
+    defined(BOOST_THREAD_PROVIDES_VARIADIC_THREAD)
+        add(testCase(&ThreadPoolTests::test_parallel_foreach_sum));
         add(testCase(&ThreadPoolTests::test_parallel_foreach_sum_auto));
         add(testCase(&ThreadPoolTests::test_parallel_foreach_timing));
+#endif
     }
 };
 
diff --git a/vigranumpy/docsrc/CMakeLists.txt b/vigranumpy/docsrc/CMakeLists.txt
index cf98661..d53d9af 100644
--- a/vigranumpy/docsrc/CMakeLists.txt
+++ b/vigranumpy/docsrc/CMakeLists.txt
@@ -57,7 +57,19 @@ IF(PYTHON_SPHINX)
         SET(VIGRA_CONFIGURATION)
     ENDIF()
 
-    ADD_CUSTOM_TARGET (doc_python
+    # just rerun sphinx (useful to debug the vigranumpy docu)
+    ADD_CUSTOM_TARGET (doc_sphinx
+        COMMAND ${CMAKE_COMMAND} -E make_directory
+            ${VIGRANUMPY_DOCDIR}
+        COMMAND ${PYTHON_SPHINX} -b html
+            ${SPHINX_ALLOPTS}
+            ${VIGRA_CONFIGURATION}
+            ${CMAKE_CURRENT_SOURCE_DIR}
+            ${VIGRANUMPY_DOCDIR}
+        COMMENT "Generating vigranumpy documentation")
+
+    # create vigranumpy documentation after updating C++ docu and vigranumpy modules
+    ADD_CUSTOM_TARGET(doc_python
         COMMAND ${CMAKE_COMMAND} -E make_directory
             ${VIGRANUMPY_DOCDIR}
         COMMAND ${PYTHON_SPHINX} -b html
@@ -66,6 +78,8 @@ IF(PYTHON_SPHINX)
             ${CMAKE_CURRENT_SOURCE_DIR}
             ${VIGRANUMPY_DOCDIR}
         COMMENT "Generating vigranumpy documentation")
+    ADD_DEPENDENCIES(doc_python vigranumpy)
+    ADD_DEPENDENCIES(doc_python doc_cpp)
 
 ELSE(PYTHON_SPHINX)
     # no vigranumpy documentation if sphinx not available
@@ -77,6 +91,4 @@ ELSE(PYTHON_SPHINX)
 
 ENDIF(PYTHON_SPHINX)
 
-ADD_DEPENDENCIES(doc_python vigranumpy)
-ADD_DEPENDENCIES(doc_python doc_cpp)
 ADD_DEPENDENCIES(doc doc_python)
diff --git a/vigranumpy/docsrc/conf.py.cmake2.in b/vigranumpy/docsrc/conf.py.cmake2.in
index 679b8fa..6fa8da1 100644
--- a/vigranumpy/docsrc/conf.py.cmake2.in
+++ b/vigranumpy/docsrc/conf.py.cmake2.in
@@ -11,6 +11,8 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
+from __future__ import division, print_function
+
 import sys, os
 
 # silent lots of 'arg is not a Python function' warnings
@@ -33,7 +35,7 @@ _getargspec_workaround.__module__ = 'inspect'
 # set the PATH of the current build, so that we don't create
 # documentation for a possibly outdated installation
 vigranumpy_path=r'@VIGRANUMPY_TMP_PATH@'
-print "Working directory:", vigranumpy_path
+print("Working directory:", vigranumpy_path)
 sys.path.insert(0,vigranumpy_path)
 
 # find the build configuration ('release' or 'debug' on Windows, nothing on Linux)
@@ -44,7 +46,7 @@ for a in sys.argv:
         outdir = '/' + a[len(match):]
 
 vigraimpex_path=r'@VIGRAIMPEX_PATH@%s' % outdir
-print "vigraimpex path:", vigraimpex_path
+print("vigraimpex path:", vigraimpex_path)
 os.environ['PATH'] = os.pathsep.join([vigraimpex_path, os.environ['PATH']])
 
 
diff --git a/vigranumpy/docsrc/conf.py.in b/vigranumpy/docsrc/conf.py.in
index 003e342..02f3c4d 100644
--- a/vigranumpy/docsrc/conf.py.in
+++ b/vigranumpy/docsrc/conf.py.in
@@ -11,6 +11,7 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
+from __future__ import division, print_function
 import sys, os, re
 
 # silent lots of 'arg is not a Python function' warnings
@@ -33,7 +34,7 @@ _getargspec_workaround.__module__ = 'inspect'
 # set the PATH of the current build, so that we don't create
 # documentation for a possibly outdated installation
 vigranumpy_path=r'@VIGRANUMPY_TMP_PATH@'
-print "Working directory:", vigranumpy_path
+print("Working directory:", vigranumpy_path)
 sys.path.insert(0,vigranumpy_path)
 
 # find the build configuration ('release' or 'debug' on Windows, nothing on Linux)
@@ -44,7 +45,7 @@ for a in sys.argv:
         outdir = '/' + a[len(match):]
 
 vigraimpex_path = re.sub('/Release$', outdir, r'$<TARGET_FILE_DIR:vigraimpex>')
-print "vigraimpex path:", vigraimpex_path
+print("vigraimpex path:", vigraimpex_path)
 os.environ['PATH'] = os.pathsep.join([vigraimpex_path.replace('/', os.sep), os.environ['PATH']])
 
 
diff --git a/vigranumpy/docsrc/index.rst b/vigranumpy/docsrc/index.rst
index 3ca8378..f949049 100644
--- a/vigranumpy/docsrc/index.rst
+++ b/vigranumpy/docsrc/index.rst
@@ -25,63 +25,63 @@ Basic calling syntax is similar to C++, with one important difference: Arguments
 
     # allocate new result image
     >>> smoothImage = vigra.gaussianSmoothing(inputImage, scale)
-    
+
     # reuse and overwrite existing result image
     >>> smoothImage = vigra.gaussianSmoothing(inputImage, scale, out=smoothImage)
-    
+
 Unless otherwise noted, all functions expect and create arrays with dtype=numpy.float32.
 
 Another important concern is the interpretation and ordering of the array axes. Numpy does not provide any means to attach semantics to axes, but relies purely on the convention that the most important axis is last, as in ``array[y, x]`` or ``array[z, y, x]`` ("C-order"). However, there is no way to enforce this convention in a program, since arrays can be transposed outside of the user's control (e.g. when saving data to a file). Moreover, many imaging libraries (e.g. `Image Magick <htt [...]
 
 To solve these ambiguities in a clean way, vigranumpy introduces the concept of **axistags** which is realized in class :class:`vigra.AxisTags`. Every :class:`~vigra.VigraArray` (which is a subclass of numpy.ndarray) gets a new property ``array.axistags`` that describes axis semantics, and all vigranumpy functions account for and preserve axistag information. Unfortunately, this functionality cannot easily be retrofitted to numpy.ndarray itself. Therefore, we employ the following convers [...]
 
-* When the Python array has **no** ``array.axistags`` property, it is mapped to the C++ NumpyArray 
-  **without** any change in axis ordering. Since most VIGRA functions can work on arbitrarily 
-  transposed arrays, you will get correct results, but execution may be slower because the 
+* When the Python array has **no** ``array.axistags`` property, it is mapped to the C++ NumpyArray
+  **without** any change in axis ordering. Since most VIGRA functions can work on arbitrarily
+  transposed arrays, you will get correct results, but execution may be slower because the
   processor cache is poorly utilized in certain axis orders.
-  
-  Moreover, this may lead to overload resolution ambiguities. For example, when the array has shape 
+
+  Moreover, this may lead to overload resolution ambiguities. For example, when the array has shape
   ``(3, 60, 40)``, vigranumpy has no way to decide if this is a 2-dimensional RGB image or
-  a 3-dimensional array that happens to have only 3 slices. Thus, vigranumpy may not always 
+  a 3-dimensional array that happens to have only 3 slices. Thus, vigranumpy may not always
   execute the function you actually intended to call.
-  
-* When the Python array **has** the ``array.axistags`` property, it is transposed into a 
-  **canonical** axis ordering before vigranumpy executes a function, and the results are 
+
+* When the Python array **has** the ``array.axistags`` property, it is transposed into a
+  **canonical** axis ordering before vigranumpy executes a function, and the results are
   transposed back into the original ordering. Likewise, functions that change axis ordering
   (such as ``array.swapaxes(0,1)``) or reduce the number of axes (such as ``array.max(axis=1)``)
-  as well as array arithmetic operations preserve axistags (see section :ref:`sec-dtype-coercion`). 
-  Thus, you can work in any desired axis order without loosing control. Overload ambiguities 
+  as well as array arithmetic operations preserve axistags (see section :ref:`sec-dtype-coercion`).
+  Thus, you can work in any desired axis order without loosing control. Overload ambiguities
   can no longer occur because a function cannot be called when the axistags are unsuitable.
 
 Detailed information about the use of axistags is given in section :ref:`sec-vigraarray` below. Section :ref:`sec-own-modules` describes how you can take advantage of the axistags mechanism in your own C++ code.
 
 .. _sec-vigraarray:
-    
+
 Axistags and the VigraArray Data Structure
 ------------------------------------------
 
 While vigranumpy can directly work on numpy.ndarrays, this would not give us the advantages of axistags as described above. Therefore, vigranumpy introduces its own array class :class:`~vigra.VigraArray` which is a subclass of numpy.ndarray, but re-implements many of its methods so that axistags are respected. Arrays with a conforming ``axistags`` property are most easily constructed by one of the predefined :ref:`array factories <subsec-array-factories>`. A **view with axistags** can be [...]
 
     >>> width, height, depth = 300, 200, 3
-    
+
     # create a 2-dimensional RGB image
     >>> rgb = vigra.RGBImage((width, height))
     >>> rgb.shape
     (300, 200, 3)
     >>> rgb.axistags             # short output: only axis keys
     x y c
-    >>> print rgb.axistags       # long output
+    >>> print(rgb.axistags)      # long output
     AxisInfo: 'x' (type: Space)
     AxisInfo: 'y' (type: Space)
     AxisInfo: 'c' (type: Channels) RGB
-    
+
     # create a 3-dimensional scalar volume
     >>> volume = vigra.ScalarVolume((width, height, depth))
     >>> volume.shape
     (300, 200, 3)        # same shape as before
     >>> volume.axistags
     x y z                # but different semantic interpretation
-    >>> print volume.axistags
+    >>> print(volume.axistags)
     AxisInfo: 'x' (type: Space)
     AxisInfo: 'y' (type: Space)
     AxisInfo: 'z' (type: Space)
@@ -91,14 +91,14 @@ It is also possible to attach additional information to the axistags, in particu
     >>> rgb.axistags['x'].resolution = 1.2  # in some unit of length
     >>> rgb.axistags['y'].resolution = 1.4  # in some unit of length
     >>> rgb.axistags['c'].description = 'fluorescence microscopy, DAPI and GFP staining'
-    >>> print rgb.axistags
+    >>> print(rgb.axistags)
     AxisInfo: 'x' (type: Space, resolution=1.2)
     AxisInfo: 'y' (type: Space, resolution=1.4)
     AxisInfo: 'c' (type: Channels) fluorescence microscopy, DAPI and GFP staining
-    
+
     # interpolate the image to twice its original size
     >>> rgb2 = vigra.sampling.resize(rgb, shape=(2*width-1, 2*height-1))
-    >>> print rgb2.axistags
+    >>> print(rgb2.axistags)
     AxisInfo: 'x' (type: Space, resolution=0.6)
     AxisInfo: 'y' (type: Space, resolution=0.7)
     AxisInfo: 'c' (type: Channels) fluorescence microscopy, DAPI and GFP staining
@@ -111,19 +111,19 @@ When the array is transposed, the axistags are transposed accordingly. When axes
     (3, 200, 300)
     >>> transposed_volume.axistags
     z y x
-    
+
     # get a view to the first slice (z == 0)
     >>> first_slice = volume[..., 0]
     >>> first_slice.shape
     (300, 200)
     >>> first_slice.axistags
     x y
-    
+
     # get the maximum of each slice
     >>> volume.max(axis=0).max(axis=0)
     VigraArray(shape=(3,), axistags=z, dtype=float32, data=
     [ 0.  0.  0.])
-    
+
     # likewise, but specify axes by their keys
     >>> volume.max(axis='x').max(axis='y')
     VigraArray(shape=(3,), axistags=z, dtype=float32, data=
@@ -134,26 +134,26 @@ The initial ordering of the axes is controlled by the argument ``order`` that ca
 .. _array-order-parameter:
 
     'C' order:
-        Both strides and axes are arranged in descending order, as in a 
-        plain numpy.ndarray. For example, axistags will be 'y x c' or 
+        Both strides and axes are arranged in descending order, as in a
+        plain numpy.ndarray. For example, axistags will be 'y x c' or
         'z y x c'. array.flags['C_CONTIGUOUS'] will be true.
 
     'F' order:
-        Both strides and axes are arranged in ascending order, i.e. 
-        opposite to 'C' order. For example, axistags will be 'c x y' 
+        Both strides and axes are arranged in ascending order, i.e.
+        opposite to 'C' order. For example, axistags will be 'c x y'
         or 'c x y z'. array.flags['F_CONTIGUOUS'] will be true.
 
     'V' order:
-        VIGRA-order is an interleaved memory layout that simulates 
-        vector-valued pixels or voxels: Channels will be the last axis 
-        and have the smallest stride, whereas all other axes are arranged 
-        in ascending order. For example, axistags will be 'x y c' or 
+        VIGRA-order is an interleaved memory layout that simulates
+        vector-valued pixels or voxels: Channels will be the last axis
+        and have the smallest stride, whereas all other axes are arranged
+        in ascending order. For example, axistags will be 'x y c' or
         'x y z c'.
 
     'A' order:
         Defaults to 'V' when a new array is created, and means
         'preserve order' when an existing array is copied.
-        
+
 The meaning of 'ascending' or 'descending' order is determined by two rules: the primary order is according to axis type (see :class:`vigra.AxisType`), where ``Channels < Space < Angle < Time < Frequency < Unknown``. The secondary order (between axes of the same type) is lexicographic, such that 'x' < 'y' < 'z'. Usage examples::
 
     >>> rgbv = vigra.RGBImage((width, height), order='V')
@@ -161,19 +161,19 @@ The meaning of 'ascending' or 'descending' order is determined by two rules: the
     (300, 200, 3)
     >>> rgbv.axistags
     x y c
-    
+
     >>> rgbc = vigra.RGBImage((width, height), order='C')
     >>> rgbc.shape
     (200, 300, 3)
     >>> rgbc.axistags
     y x c
-    
+
     >>> rgbf = vigra.RGBImage((width, height), order='F')
     >>> rgbf.shape
     (3, 300, 200)
     >>> rgbf.axistags
     c x y
-    
+
 Functions that reduce the array to a one-dimensional shape (``flatten()``, ``flat``, ``ravel()``, ``take()``) always transpose the array into 'C' order before flattening.
 
 Axistags are stored in a list-like class :class:`vigra.AxisTags`, whose individual entries are of type :class:`vigra.AxisInfo`. The simplest way to attach axistags to a plain numpy.ndarray (by creating a view of type VigraArray) is via the convenience function :func:`vigra.taggedView`.
@@ -231,7 +231,7 @@ where the color values of each pixel are consecutive in memory, or in "banded fo
     B B B ...
     B B B ...
     :
-    
+
 where we have a separate scalar image for each color. In Fortran, interleaved and banded images must be indexed as ``f(color, x, y)`` and ``f(x, y, color)`` respectively, whereas in C we must use ``c[y][x][color]`` or ``c[color][y][x]``.
 
 VIGRA and numpy
@@ -239,7 +239,7 @@ VIGRA and numpy
 
 From the beginning, VIGRA adopted Fortran conventions, i.e. its default behavior is according to FM and FI (this is possible because VIGRA uses array classes, where the mapping from indices to memory is encapsulated in the appropriate way).
 
-In contrast, numpy adopted C conventions, i.e. its default behavior is CM and CI. 
+In contrast, numpy adopted C conventions, i.e. its default behavior is CM and CI.
 
 In addition, both packages provide array views which keep the memory layout intact, but change the index order. Thus, VIGRA also supports the CI convention, and numpy also supports FI. Note that changing the index order is only allowed for images. Matrices always use the fixed index order dictated by mathematics where transpose(m) is a well-defined mathematical operation (which just happens to revert the index order). Therefore, the existence of array views does not imply that VIGRA supp [...]
 
@@ -256,22 +256,22 @@ This is precisely where axistags enter: They attach information to array views t
 
     width  = image.width    # this works for any axis order!
     height = image.height
-    
+
 Now suppose we want to execute a numpy algorithm which expects the [y, x] ordering. We simply transpose the array before calling the algorithm like this::
 
     # adjust the axis order
     numpy_image = image.transposeToNumpyOrder()
-    
+
     # execute the algorithm
     for y in xrange(height):
         for x in xrange(width):
             numpy_image[y, x] = ...   # note the index order
-            
+
 When we want to execute a VIGRA algorithm which expects the [x, y] ordering, we do::
 
     # adjust the axis order
     vigra_image = image.transposeToVigraOrder()
-    
+
     # execute the algorithm
     for y in xrange(height):
         for x in xrange(width):
@@ -287,17 +287,17 @@ To handle axis meaning in a well-defined way, vigranumpy adopts the following co
 1. When the array represents a matrix, no axistags are allowed because the index order has a fixed semantic meaning and must not be messed around with. In vigranumpy, this requirement is enforced by an assertion::
 
     vigra_precondition( !matrix.axistags(), "matrix must not have axistags");
-    
+
    in the C++ gluecode functions. This applies, for example, to the feature matrices passed to a random forest and to unsupervised learning algorithms. If desired, we can introduce additional axistags for features and samples in the future because this is a common use case.
 
 2. When arrays represent image data with up to five dimensions, axistags should be used. To sort indices according to the requirements of the next algorithm to be executed, the appropriate convenience function should be called (many more convenience functions are documented in :py:class:`vigra.VigraArray`)::
 
-    numpy_array   = array.transposeToNumpyOrder()    # gives 'yx', 'zyx' etc. 
+    numpy_array   = array.transposeToNumpyOrder()    # gives 'yx', 'zyx' etc.
     vigra_array   = array.transposeToVigraOrder()    # gives 'xy', 'xyz' etc.
     ilastik_array = array.view5D()                   # gives 'txyzc' (inserts singleton axes if necessary)
     user_defined  = array.withAxes('y', 'x', 't')    # specify order explicitly (inserts singleton axes if necessary)
-    
-   Algorithms with special order requirements can then check for the correct order in an assertion. 
+
+   Algorithms with special order requirements can then check for the correct order in an assertion.
 
 3. The function ``vigra.taggedView()`` allows to attach axistags to an array very conveniently. For example, when you know from the context that the axes of a given array are to be interpreted as 'xyzt' in that order, you can make this knowledge explicit by calling::
 
@@ -309,7 +309,7 @@ To handle axis meaning in a well-defined way, vigranumpy adopts the following co
 
 6. When vigranumpy writes arrays to a file, it will always order the axes such that the memory order conforms to the established file conventions (e.g. values along the x-axis are consecutive). In particular, when you use ``vigra.impex.writeHDF5()`` to create HDF5 datasets, ``array.transposeToNumpyOrder()`` will be called before writing the data (this is a consequence of item 5, because ``writeHDF5()`` eventually forwards the actual work to h5py). In addition, the axistags (in numpy orde [...]
 
-7. When vigranumpy reads data from a file, it preserves the file's memory order and attaches the appropriate axistags. In case of images, the axis order follows from the usual file conventions. If you call ``vigra.impex.readHDF5()`` to read HDF5, the axistags will be read from the attribute ``axistags`` (if present). Upon return, the read functions automatically call ``array.transposeToVigraOrder()``, but this only changes the index order, not the memory layout. This latter convention wa [...]
+7. When vigranumpy reads data from a file, it preserves the file's memory order and attaches the appropriate axistags. In case of images, the axis order follows from the usual file conventions. If you call ``vigra.impex.readHDF5()`` to read HDF5, the axistags will be read from the attribute ``axistags`` (if present). Upon return, the read functions automatically call ``array.transposeToVigraOrder()``, but this only changes the index order, not the memory layout. This latter convention wa [...]
 
 8. When you display an image via ``image.imshow()`` or ``image.show()``, the axes are re-ordered automatically such that the image is displayed upright (i.e. x goes to the right, y goes down). If you want to override this (i.e. want to enforce transposed display), you can remove the axistags by calling ``image.view(numpy.ndarray)``.
 
@@ -328,7 +328,7 @@ Axistag Reference
 
 .. autoclass:: vigra.AxisInfo
     :members: key, typeFlags, resolution, description, isSpatial, isTemporal, isChannel, isFrequency, isAngular, isType, compatible
-    
+
 ----------------
 
 .. autoclass:: vigra.AxisTags
@@ -340,13 +340,13 @@ VigraArray Reference
 .. autoclass:: vigra.VigraArray
     :show-inheritance:
     :members: defaultAxistags, channelIndex, innerNonchannelIndex, channels, spatialDimensions, width, height, depth, duration, dropChannelAxis, insertChannelAxis, withAxes, view5D, asRGB, __getitem__, subarray, bindAxis, channelIter, sliceIter, spaceIter, timeIter, copyValues, swapaxes, transpose, T, transposeToOrder, transposeToDefaultOrder, transposeToNormalOrder, transposeToNumpyOrder, transposeToVigraOrder, permutationToOrder, permutationToNormalOrder, permutationFromNormalOrder, pe [...]
-    
+
     .. attribute:: VigraArray.axistags
-    
-      The :class:`~vigra.AxisTags` object of this array. 
+
+      The :class:`~vigra.AxisTags` object of this array.
 
     .. attribute:: VigraArray.defaultOrder
-    
+
       Get the default axis ordering, currently 'V' (:ref:`VIGRA order <array-order-parameter>`).
 
 -------------
@@ -375,7 +375,45 @@ VigraArray Reference
 .. autofunction:: vigra.Vector4Volume
 .. autofunction:: vigra.Vector6Volume
 .. autofunction:: vigra.RGBVolume
-   
+
+
+Chunked Arrays and Data Bigger than RAM
+---------------------------------------
+
+Chunked arrays allow to allocate big data lazily, i.e. one chunk (rectangular block)
+at a time. Chunks which are currently not needed can be compressed or written
+to disk in order to free memory. This effectively allows VIGRA to work on data bigger
+than RAM.
+
+Classes
+^^^^^^^
+
+.. autoclass:: vigra.vigranumpycore.ChunkedArrayBase
+   :special-members:
+   :members:
+
+-------------
+
+.. autoclass:: vigra.vigranumpycore.ChunkedArrayHDF5Base
+   :show-inheritance:
+   :members:
+
+-------------
+
+.. autoclass:: vigra.Compression
+   :members:
+.. autoclass:: vigra.HDF5Mode
+   :members:
+
+Factory Functions
+^^^^^^^^^^^^^^^^^
+
+.. autofunction:: vigra.ChunkedArrayHDF5
+.. autofunction:: vigra.ChunkedArrayLazy
+.. autofunction:: vigra.ChunkedArrayCompressed
+.. autofunction:: vigra.ChunkedArrayTmpFile
+.. autofunction:: vigra.ChunkedArrayFull
+
 
 Import and Export Functions
 ---------------------------
@@ -387,14 +425,14 @@ you may call 'vigra.readImage(...)' instead of 'vigra.impex.readImage(...)' etc.
 .. automodule:: vigra.impex
    :members:
 
-   
+
 .. _sec-dtype-coercion:
 
 Mathematical Functions and Type Coercion
 ----------------------------------------
 
-vigranumpy supports all arithmetic and algebraic functions defined in  
-`numpy.ufunc <http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs>`_, but re-implements them in module `vigra.ufunc` to take full advantage of axistags. 
+vigranumpy supports all arithmetic and algebraic functions defined in
+`numpy.ufunc <http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs>`_, but re-implements them in module `vigra.ufunc` to take full advantage of axistags.
 
 .. automodule:: vigra.ufunc
 
@@ -402,8 +440,8 @@ vigranumpy supports all arithmetic and algebraic functions defined in
 Color and Intensity Manipulation
 --------------------------------
 
-The module vigra.colors provides functions to adjust image brightness and contrast, 
-and to transform between different color spaces. 
+The module vigra.colors provides functions to adjust image brightness and contrast,
+and to transform between different color spaces.
 See `Color Conversions <../vigra/group__ColorConversions.html>`_ in the C++ documentation
 for more information.
 
@@ -416,7 +454,7 @@ Filters
 
 The module vigra.filters provides operators that consider a window around each pixel, compute
 one or several numbers from the values in the window, and store the results in the
-corresponding pixel of the output image. This includes convolution, non-linear diffusion, 
+corresponding pixel of the output image. This includes convolution, non-linear diffusion,
 morphological operators, feature detectors (such as the structure tensor) etc.
 
 .. automodule:: vigra.filters
@@ -431,10 +469,10 @@ the image sampling points, such as resizing, rotation, and interpolation.
 
 .. automodule:: vigra.sampling
    :members:
-   
+
 ---------------------------------------------
 
-Spline image views implement an interpolated view for an image which can be accessed 
+Spline image views implement an interpolated view for an image which can be accessed
 at real-valued coordinates (in contrast to the plain image, which can only be
 accessed at integer coordinates). Module vigra.sampling defines::
 
@@ -444,9 +482,9 @@ accessed at integer coordinates). Module vigra.sampling defines::
     SplineImageView3
     SplineImageView4
     SplineImageView5
-    
-The number denotes the spline interpolation order of the respective classes. 
-Below, we describe SplineImageView3 in detail, but the other classes work 
+
+The number denotes the spline interpolation order of the respective classes.
+Below, we describe SplineImageView3 in detail, but the other classes work
 analogously. See SplineImageView_ in the C++ documentation for more detailed information.
 
 .. autoclass:: vigra.sampling.SplineImageView3
@@ -462,23 +500,23 @@ analogously. See SplineImageView_ in the C++ documentation for more detailed inf
 Fourier Transforms
 ------------------
 
-The module vigra.fourier contains functions for Fourier transforms, Cosine/Sine 
+The module vigra.fourier contains functions for Fourier transforms, Cosine/Sine
 transforms, and Fourier-domain filters.
 
 .. automodule:: vigra.fourier
    :members:
 
-   
+
 Image Analysis
 --------------
 
-The module vigra.analysis contains segmentation algorithms (e.g. watershed), edge and 
+The module vigra.analysis contains segmentation algorithms (e.g. watershed), edge and
 corner detection, localization of maxima and minima etc.
 
 .. automodule:: vigra.analysis
    :members:
 
-   
+
 Geometry
 --------
 
@@ -500,7 +538,7 @@ The module vigra.optimization provides functions for constrained and unconstrain
 Machine Learning
 ----------------
 
-The module vigra.learning will eventually provide a wide range of machine learning 
+The module vigra.learning will eventually provide a wide range of machine learning
 tools. Right now, it only contains an implementation of the random forest classifier
 and probabilistic latent semantic analysis (pLSA) as an example for unsupervised learning.
 
@@ -509,7 +547,7 @@ and probabilistic latent semantic analysis (pLSA) as an example for unsupervised
 
 .. autoclass:: vigra.learning.RandomForest
    :members:
-   
+
 For more information, refer to RandomForest_ in the C++ documentation.
 
 .. autoclass:: vigra.learning.RandomForestOld
@@ -535,6 +573,7 @@ The module vigra.histogram provides histograms and channel representation
    :members:
 
 
+.. _sec-graph-algorithms:
 
 Graphs and Algorithms on Graphs
 -------------------------------
@@ -551,21 +590,19 @@ The module vigra.graphs provides graphs and graph algorithms
    :members:
 
 .. autoclass:: vigra.graphs.AdjacencyListGraph
-    :members:
+   :members:
 
 .. autoclass:: vigra.graphs.RegionAdjacencyGraph
-    :members:
+   :members:
 
 .. autoclass:: vigra.graphs.GridRegionAdjacencyGraph
-    :members:
-
+   :members:
 
 .. autoclass:: vigra.graphs.ShortestPathPathDijkstra
-    :members:
-
+   :members:
 
 
-Utilities 
+Utilities
 ----------------------------------
 
 The module vigra.utilities provides  utilities and tools
@@ -573,10 +610,6 @@ like priority queues with changeable priorities
 
 .. automodule:: vigra.utilities
    :members:
-    
-
-
-
 
 
 .. _sec-own-modules:
@@ -596,21 +629,21 @@ When you want to write your own vigranumpy extension modules, first make sure th
         #include <boost/python.hpp>
         #include <vigra/numpy_array.hxx>
         #include <vigra/numpy_array_converters.hxx>
-        
+
         ... // your includes
-        
+
         ... // implementation of your wrapper functions and classes
-        
+
         using namespace boost::python;
-        
+
         // the argument of the init macro must be the module name
         BOOST_PYTHON_MODULE_INIT(my_module)
         {
             // initialize numpy and vigranumpy
             vigra::import_vigranumpy();
-            
+
             // export a function
-            def("my_function", &my_function, 
+            def("my_function", &my_function,
                 (arg("arg1"), arg("arg2"), ...),
                 "Documentation");
 
@@ -621,10 +654,10 @@ When you want to write your own vigranumpy extension modules, first make sure th
                      (arg("arg1"), arg("arg2"), ...),
                      "Documentation")
             ;
-                     
+
             ... // more module functionality (refer to boost_python documentation)
         }
-    
+
 2. When your module uses additional C++ source files, they should start with the following defines::
 
         // this must define the same symbol as the main module file (numpy requirement)
@@ -641,71 +674,71 @@ When you want to write your own vigranumpy extension modules, first make sure th
             // Returning NumpyAnyArray is always safe, because at that point
             // C++ no longer cares about the particular type of the array.
         NumpyAnyArray foo(NumpyAnyArray array);
-        
-            // Accept a 3-dimensional float32 array and transpose it 
+
+            // Accept a 3-dimensional float32 array and transpose it
             // into ascending axis order ('F' order).
         void foo(NumpyArray<3, float> array);
-        
+
             // Accept a 2-dimensional float32 array with an arbitrary number of channels and
             // transpose the axes into VIGRA ('V') order (channels are last, other axes ascending).
             // Note that the NumpyArray dimension is 3 to account for the channel dimension.
             // If the original numpy array has no channel axis, vigranumpy will automatically
             // insert a singleton axis.
         void foo(NumpyArray<3, Multiband<float> > array);
-        
+
             // Accept a 2-dimensional float32 array that has only a single channel
             // (that is, 'array.channels == 1' must hold on the Python side).
             // Non-channel axes are transposed into ascending order.
             // Note that the NumpyArray dimension is now 2.
         void foo(NumpyArray<2, Singleband<float> > array);
-        
-            // Accept a float32 array that has 2 non-channel dimensions and 
-            // exactly 3 channels (i.e. 'array.channels == 3' on the Python side). 
+
+            // Accept a float32 array that has 2 non-channel dimensions and
+            // exactly 3 channels (i.e. 'array.channels == 3' on the Python side).
             // Non-channel axes are transposed into ascending order.
-            // Note that the NumpyArray dimension is again 2, but the pixel type is 
+            // Note that the NumpyArray dimension is again 2, but the pixel type is
             // now a vector.
             // The conversion will only succeed if the channel axis is unstrided on
             // the Python side (that is, the following expression is True:
             //      array.strides[array.channelIndex] == array.dtype.itemsize).
         void foo(NumpyArray<2, TinyVector<float, 3> > array);
         void foo(NumpyArray<2, RGBValue<float> > array);
-    
-   Or course, these functions can also be templated. 
-   
+
+   Or course, these functions can also be templated.
+
    When your functions return newly allocated arrays, it is usually desirable to transfer the input's axistags to the output (otherwise, vigranumpy will use :meth:`~vigra.VigraArray.defaultAxistags` as a fallback). There is a standard vigranumpy idiom for this task which assumes that the wrapped function has an optional parameter 'output' for a possibly pre-allocated output array. The axistags are then transferred by reshaping the output array with a ``taggedShape()`` (which is a combina [...]
-   
+
         NumpyAnyArray
-        foo(NumpyArray<3, Multiband<float32> > input, 
+        foo(NumpyArray<3, Multiband<float32> > input,
             NumpyArray<3, Multiband<float32> > output = boost::python::object())
         {
             // Reshape only if the output array was not explicitly passed in.
             // Otherwise, use the output array as is.
-            output.reshapeIfEmpty(input.taggedShape(), 
+            output.reshapeIfEmpty(input.taggedShape(),
                       "error message when shape is unsuitable.");
-                      
+
             ... // your algorithm
         }
-        
+
    It is also possible to modify the tagged shape before it is applied to the output array::
-   
+
         input.taggedShape()
              .resize(Shape2(new_width, new_height))
              .setChannelCount(new_channel_count)
              .setChannelDescription("a description")
-             
+
    The C++ code can be multi-threaded when you unlock Python's global interpreter lock. After unlocking, your wrapper code must not call any Python functions, so the unlock statement should go after ``output.reshapeIfEmpty()``::
-   
+
         NumpyAnyArray
-        foo(NumpyArray<3, Multiband<float32> > input, 
+        foo(NumpyArray<3, Multiband<float32> > input,
             NumpyArray<3, Multiband<float32> > output = boost::python::object())
         {
             output.reshapeIfEmpty(input.taggedShape(), "Message.");
-            
+
                 // Allow parallelization from here on. The destructor of
                 // _pythread will automatically regain the global interpreter lock
                 // just before this function returns to Python.
             PyAllowThreads _pythread;
-            
+
             ... // your algorithm
         }
 
@@ -715,7 +748,7 @@ When you want to write your own vigranumpy extension modules, first make sure th
         def("my_function", vigra::registerConverters(&my_function),
            (arg("arg1"), ...),
            "Documentation");
-           
+
 If you need more information, it is always a good idea to look at the source code of the existing vigranumpy modules.
 
 
diff --git a/vigranumpy/examples/VigraGraphs.ipynb b/vigranumpy/examples/VigraGraphs.ipynb
index 6305736..3dab898 100644
--- a/vigranumpy/examples/VigraGraphs.ipynb
+++ b/vigranumpy/examples/VigraGraphs.ipynb
@@ -1,309 +1,370 @@
-     {
- "metadata": {
-  "name": "Vigra Graphs"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
+{
+ "cells": [
   {
-   "cells": [
-    {
-     "cell_type": "heading",
-     "level": 1,
-     "metadata": {},
-     "source": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# to make matplotlib figures nice\n",
-      "import matplotlib\n",
-      "matplotlib.rcParams['savefig.dpi'] = 2 * matplotlib.rcParams['savefig.dpi']"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [],
-     "prompt_number": 9
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Import vigra and graph submodule, and import numpy from vigra"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import vigra\n",
-      "from vigra import graphs\n",
-      "from vigra import numpy"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [],
-     "prompt_number": 10
-    },
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# to make matplotlib figures nice\n",
+    "import matplotlib\n",
+    "%matplotlib inline\n",
+    "matplotlib.rcParams['savefig.dpi'] = 2 * matplotlib.rcParams['savefig.dpi']"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Import vigra and graph submodule, and import numpy from vigra"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import vigra\n",
+    "from vigra import graphs\n",
+    "import numpy\n",
+    "import pylab"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Read in an image"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "markdown",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH/CAYAAADwn6OeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvUusbVuSnvVFjDHmXGvvfR73mXnzUVmV9ciyXbaEVaiQ\nCgmswgZBC0s0kUq0kDGCLiAhFRIdOkbIQm7RootEA9EwErSwLGhYRhhnPdOV6cy8r/PYj7XWnHOM\nEUEjxj43ZSpF4izVrYvnL52zpb3OnnuuucaI8ccff8QRd2fHjh07duzYsWPHjh2fH/TzvoEdO3bs\n2LFjx44dO/5Zx07Kd+zYsWPHjh07duz4nLGT8h07duzYsWPHjh07PmfspHzHjh07duzYsWPHjs8Z\nOynfsWPHjh07duzYseNzxk7Kd+zYsWPHjh07duz4nLGT8h07duzYsWPHjh07PmfspHzHjh07duzY\nsWPH [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xd9f923390>"
+      ]
+     },
      "metadata": {},
-     "source": [
-      "Read in an image"
-     ]
-    },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "# load image and convert to LAB\n",
+    "filepath = '12003.jpg'\n",
+    "img = vigra.impex.readImage(filepath)\n",
+    "imgLab = vigra.colors.transform_RGB2Lab(img)\n",
+    "vigra.imshow(img,show=False)\n",
+    "vigra.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Compute gradient magnitude on image which shape*2-1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# load image and convert to LAB\n",
-      "filepath = '12003.jpg'\n",
-      "img = vigra.impex.readImage(filepath)\n",
-      "imgLab = vigra.colors.transform_RGB2Lab(img)\n",
-      "vigra.imshow(img,show=False)\n",
-      "vigra.show()"
-     ],
-     "language": "python",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH/CAYAAAAbqBidAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvUuPZFmWnbeuPd3N3NzM3Nz8FeERkYyszqxuokCyBFIA\nJyJECYKG+gkaStAfkACCFKAxBUEQNNJv0JgTDQiRIggS1Y3qzszIqIzIePnb3F7u5va8Gnh925fd\njIK6M5tdWcq7gUBEuJvde+45+7H22vucm6RpqlxyySWXXHLJJZdccsnl9yuF3/cAcskll1xyySWX\nXHLJJZccmOeSSy655JJLLrnkksuPQnJgnksuueSSSy655JJLLj8CyYF5LrnkkksuueSSSy65/Agk\nB+a55JJLLrnkkksuueTyI5AcmOeSSy655JJLLrnkksuPQHJgnksuueSSSy655JJLLj8CyYF5Lrnk\nkksu [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xd98c33a90>"
+      ]
+     },
      "metadata": {},
-     "outputs": [
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH3CAYAAAAczCHzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvUmsbVtWpveNWay19znnVq/ixYs6IIhIhNNW4gKBbAkk\nIjvZwI3sWbKFZAshjImmhUS4Ax03EbhHz13csIQQCEMnZMJCslLpxARBUEX57nvv3nuKvfdasxjD\njTH3uQFJKkMBzhco1/90Gu8Ue6+91pxz/OMf/xhXzMzYsGHDhg0bNmzYsGHD+4bwfl/Ahg0bNmzY\nsGHDhg3/vmMj5Rs2bNiwYcOGDRs2vM/YSPmGDRs2bNiwYcOGDe8zNlK+YcOGDRs2bNiwYcP7jI2U\nb9iwYcOGDRs2bNjwPmMj5Rs2bNiwYcOGDRs2vM/YSPmGDRs2bNiwYcOGDe8zNlK+YcOGDRs2bNiw\nYcP7jI2Ub [...]
-      }
-     ],
-     "prompt_number": 11
-    },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "# compute gradient on interpolated image\n",
+    "sigmaGradMag = 2.0 \n",
+    "imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])\n",
+    "gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag)\n",
+    "vigra.imshow(gradMag,show=False)\n",
+    "vigra.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Get oversegmentation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "markdown",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH/CAYAAADwn6OeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvXeQHNl95/l5L015094AaKAbHgNgMBacwfgZjiMpihSN\nSJGSqJO0Mqs9XewpYrUbd3G7GxurC0XI7GolrbSUVqelKFKkRFJDM4ZjOCTGAwNgBt402qB9d3mT\n7t0f1dPVjTaoRjuY/PzTXZlZma+qMt/7vt/7GaGUwsfHx8fHx8fHx8dn7ZBr3QAfHx8fHx8fHx+f\nmx1flPv4+Pj4+Pj4+PisMb4o9/Hx8fHx8fHx8VljfFHu4+Pj4+Pj4+Pjs8b4otzHx8fHx8fHx8dn\njfFFuY+Pj4+Pj4+Pj88a44tyHx8fHx8fHx8fnzXGF+U+Pj4+Pj4+Pj4+a4wvyn18fHx8fHx8fHzW\nGF+U [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xda0343cf8>"
+      ]
+     },
      "metadata": {},
-     "source": [
-      "Compute gradient magnitude on image which shape*2-1"
-     ]
+     "output_type": "display_data"
     },
     {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# compute gradient on interpolated image\n",
-      "sigmaGradMag = 2.0 \n",
-      "imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])\n",
-      "gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag)\n",
-      "vigra.imshow(gradMag,show=False)\n",
-      "vigra.show()"
-     ],
-     "language": "python",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH/CAYAAAAbqBidAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvHm0bVlV5vmbc6219zn33tdF84iWiICIoO8UBQRECkWg\nNMsmVVKzkDI1TQvRYWqhosNEGWXakIpShQ6xz1TUskFFBCwVQUA6E0m6MICIgCCI9r1337un2Xut\nNWf9sfZ9ODKTUVaSKRGV+zdGxB3jNfeed85ea33zm99c4u7MzMzMzMzMzMzMzHxm0c/0C5iZmZmZ\nmZmZmZmZmYX5zMzMzMzMzMzMzL2CWZjPzMzMzMzMzMzM3AuYhfnMzMzMzMzMzMzMvYBZmM/MzMzM\nzMzMzMzcC5iF+czMzMzMzMzMzMy9gFmYz8zMzMzMzMzMzNwLmIX5zMzMzMzMzMzMzL2AWZjPzMzM\nzMzM [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xda04a7d68>"
+      ]
+     },
      "metadata": {},
-     "outputs": [
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH3CAYAAAD3+5rwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsnUmPpNlZtu8YMoaMMSNyzqyq7mr3YGMQiI1lVoDEmt/A\nCiFAeA3CK/gDCJb8BJYIYVlmYwlLLMCGHtxdU3dlVkZmRmbMkTF/i9R18o5TaT7czfe5rX6PFKrK\nGN73vOc8w/3cz3POSa1Wq5WSlrSkJS1pSUta0pKWtKT9Qlv6F92BpCUtaUlLWtKSlrSkJS1pCTBP\nWtKSlrSkJS1pSUta0r4ULQHmSUta0pKWtKQlLWlJS9qXoCXAPGlJS1rSkpa0pCUtaUn7ErQEmCct\naUlLWtKSlrSkJS1pX4KWAPOkJS1pSUta0pKWtKQl7UvQEmCetKQlLWlJS1rSkpa0pH0JWgLMk5a0\npCUtaUlLW [...]
-      }
-     ],
-     "prompt_number": 12
-    },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "# get super-pixels with slic on LAB image\n",
+    "superpixelDiameter = 20 # super-pixel size\n",
+    "slicWeight = 10.0       # SLIC color - spatial weight\n",
+    "imgLab = vigra.colors.transform_RGB2Lab(img)\n",
+    "labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,\n",
+    "                                              superpixelDiameter)\n",
+    "labels = vigra.analysis.labelImage(labels)\n",
+    "# A random colormap for matplotlib\n",
+    "cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))\n",
+    "pylab.imshow ( labels.squeeze().swapaxes(0,1), cmap = cmap)\n",
+    "pylab.show()\n",
+    "\n",
+    "vigra.segShow(img,labels,alpha=0.0)\n",
+    "vigra.show()\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Get a 2d grid graph"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Get oversegmentation"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Nodes: 154401 Edges: 308000 maxNodeId: 154400 maxEdgeId: 308801\n"
      ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# get super-pixels with slic on LAB image\n",
-      "superpixelDiameter = 20 # super-pixel size\n",
-      "slicWeight = 10.0       # SLIC color - spatial weight\n",
-      "imgLab = vigra.colors.transform_RGB2Lab(img)\n",
-      "labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,\n",
-      "                                              superpixelDiameter)\n",
-      "labels = vigra.analysis.labelImage(labels)\n",
-      "# A random colormap for matplotlib\n",
-      "cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))\n",
-      "pylab.imshow ( labels.squeeze().swapaxes(0,1), cmap = cmap)\n",
-      "pylab.show()\n",
-      "\n",
-      "vigra.segShow(img,labels,alpha=0.0)\n",
-      "vigra.show()\n"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH3CAYAAAAczCHzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvXl0XNd95/m5b6u9sO8AQRJcRVISN1GLJVmLZdmyvMaO\n246TiRMnHneStsfTM+mkTzJ9Mk7OTCY96UnbPcl0ZKc7TjvtJY4tW7tkLZREiZK4byBAEiD2rQq1\nvvXOH0UCBLGDBRRIvs85OuLD225VvXfv9/7u736vkFJKfHx8fHx8fHx8fHxKhlLqAvj4+Pj4+Pj4\n+Pjc7Pii3MfHx8fHx8fHx6fE+KLcx8fHx8fHx8fHp8T4otzHx8fHx8fHx8enxPii3MfHx8fHx8fH\nx6fE+KLcx8fHx8fHx8fHp8T4otzHx8fHx8fHx8enxPii3MfHx8fHx8fHx6fE+KLcx8fHx8fHx8fH\np8T4otzHx [...]
-      },
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH3CAYAAAD3+5rwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvHm0NllV5vnb55yIeN977zfmPA/kRIJAAsU8CGqioEiL\nQqNS2JQlqIgithZqicOSUlTERrCrbShUWnGisG0xBROQQUlMi3lIICEzgY8cv+kOb0Scc/auP3bc\nm2hbq1xiF0mveNa6K/O7w/vGG3HOPs9+9rO3mJkxY8aMGTNmzJgxY8aMLyvCl/sCZsyYMWPGjBkz\nZsyYMRPzGTNmzJgxY8aMGTPuEZiJ+YwZM2bMmDFjxowZ9wDMxHzGjBkzZsyYMWPGjHsAZmI+Y8aM\nGTNmzJgxY8Y9ADMxnzFjxowZM2bMmDHjHoCZmM+YMWPGjBkzZsyYcQ/ATMxnzJgxY8aMGTNmzLgH\nYCbmM2bMm [...]
-      }
-     ],
-     "prompt_number": 13
-    },
+    }
+   ],
+   "source": [
+    "gridGraph = graphs.gridGraph(img.shape[0:2])\n",
+    "print(gridGraph)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Compute a region adjacency graph from oversegmentation labeling and grid graph"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Get a 2d grid graph"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Nodes: 764 Edges: 1963 maxNodeId: 764 maxEdgeId: 1962\n"
      ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gridGraph = graphs.gridGraph(img.shape[0:2])\n",
-      "print gridGraph"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [
-      {
-       "output_type": "stream",
-       "stream": "stdout",
-       "text": [
-        "Nodes: 154401 Edges: 308000 maxNodeId: 154400 maxEdgeId: 308801\n"
-       ]
-      }
-     ],
-     "prompt_number": 15
-    },
+    }
+   ],
+   "source": [
+    "# get region adjacency graph from super-pixel labels\n",
+    "rag = graphs.regionAdjacencyGraph(gridGraph, labels)\n",
+    "print(rag)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Get edge weight for grid graph"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Compute a region adjacency graph from oversegmentation labeling and grid graph"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(481, 321, 2)\n"
      ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# get region adjacency graph from super-pixel labels\n",
-      "rag = graphs.regionAdjacencyGraph(gridGraph, labels)\n",
-      "print rag"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [
-      {
-       "output_type": "stream",
-       "stream": "stdout",
-       "text": [
-        "Nodes: 702 Edges: 1799 maxNodeId: 702 maxEdgeId: 1798\n"
-       ]
-      }
-     ],
-     "prompt_number": 16
-    },
+    }
+   ],
+   "source": [
+    "gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph,\n",
+    "                                                                  gradMag)\n",
+    "\n",
+    "print(gridGraphEdgeIndicator.shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Get edge weight and node features accumulated from grid graph edge weights and node features"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "raw",
-     "metadata": {},
-     "source": [
-      "Get edge weight for grid graph"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(1963,)\n",
+      "(765, 3)\n"
      ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph,\n",
-      "                                                                  gradMag)\n",
-      "\n",
-      "print gridGraphEdgeIndicator.shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [
-      {
-       "output_type": "stream",
-       "stream": "stdout",
-       "text": [
-        "(481, 321, 2)\n"
-       ]
-      }
-     ],
-     "prompt_number": 18
-    },
+    }
+   ],
+   "source": [
+    "# accumulate edge weights grid graph edge weights\n",
+    "edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator)\n",
+    "\n",
+    "# accumulate node features from grid graph node map\n",
+    "# which is just a plain image (with channels)\n",
+    "nodeFeatures = rag.accumulateNodeFeatures(imgLab)\n",
+    "\n",
+    "print(edgeWeights.shape)\n",
+    "print(nodeFeatures.shape)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
     {
-     "cell_type": "raw",
-     "metadata": {},
-     "source": [
-      "Get edge weight and node features accumulated from grid graph edge weights and node features"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "prepare \n",
+      "get edge length\n",
+      "get node size\n",
+      "nodeLabels  (765,) uint32\n"
      ]
     },
     {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# accumulate edge weights grid graph edge weights\n",
-      "edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator)\n",
-      "\n",
-      "# accumulate node features from grid graph node map\n",
-      "# which is just a plain image (with channels)\n",
-      "nodeFeatures = rag.accumulateNodeFeatures(imgLab)\n",
-      "\n",
-      "print edgeWeights.shape\n",
-      "print nodeFeatures.shape\n"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": [
-      {
-       "output_type": "stream",
-       "stream": "stdout",
-       "text": [
-        "(1799,)\n",
-        "(703, 3)\n"
-       ]
-      }
-     ],
-     "prompt_number": 19
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# do agglomerativeClustering\n",
-      "beta = 0.5              # node vs edge weight\n",
-      "nodeNumStop = 50        # desired num. nodes in result\n",
-      "labels = graphs.agglomerativeClustering(graph=rag, edgeWeights=edgeWeights,\n",
-      "                                        beta=beta, nodeFeatures=nodeFeatures,\n",
-      "                                        nodeNumStop=nodeNumStop)\n",
-      "# show result \n",
-      "imgLabels =rag.projectLabelsToBaseGraph(labels)\n",
-      "cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))\n",
-      "pylab.imshow ( imgLabels.squeeze().swapaxes(0,1), cmap = cmap)\n",
-      "pylab.show()\n",
-      "\n",
-      "rag.show(img,labels)\n",
-      "vigra.show()\n"
-     ],
-     "language": "python",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH/CAYAAADwn6OeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzs3XeUZNd92PnvfalyVec8OWIGg0gSIMAMkZIIkqJISpS0\n67VNy3soWzo+OvJZB2m9ktc+OisHyl57JUsyJStYgQqkJBAESRAkGAAi58m5c6ocXrz7R/V0T+ie\nrp6p3PdzDk6jql6995sO9X7vvt/9XSGlRFEURVEURVGU1tFaHYCiKIqiKIqibHcqKVcURVEURVGU\nFlNJuaIoiqIoiqK0mErKFUVRFEVRFKXFVFKuKIqiKIqiKC2mknJFURRFURRFaTGVlCuKoiiKoihK\ni6mkXFEURVEURVFaTCXliqIoiqIoitJiKilXFEVRFEVRlBZTSbmiKIqiKIqitJhKyhVFURRFURSl\nxVRS [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xd99151be0>"
+      ]
+     },
      "metadata": {},
-     "outputs": [
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuUAAAH3CAYAAAAczCHzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzs3XmU3Fd16Pvv+f1qruq5W92tWbJmWbKw5NmWJ8xoYwcM\nhhAIITe5JA94OCSX+1ay4N61IOsmeTzWfSGYBxl4eWG4DAYMBmNjjAdZNh4lj5Kseeh5qvk3nvdH\ntbrVVre6uru6a+j9WauXVFW/YZe6VLXr/PbZR2mtNUIIIYQQQoiyMcodgBBCCCGEEIudJOVCCCGE\nEEKUmSTlQgghhBBClJkk5UIIIYQQQpSZJOVCCCGEEEKUmSTlQgghhBBClJkk5UIIIYQQQpSZJOVC\nCCGEEEKUmSTlQgghhBBClJkk5UIIIYQQQpSZJOVCCCGEEEKUmSTlQgghhBBClJkk5UIIIYQQQpRZ\n1STlp06d4 [...]
-      },
-      {
-       "output_type": "display_data",
-       "png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH3CAYAAAD3+5rwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvVuMbdl1nveNeVlr76o699MXtrpJdlNNtiSHulsMFceg\nHBGOHcCSkzgviRFLfhAEQRGNXIAoEJMAEhIkL4kgvSQRbCAX2RDkh9iyLJomI4mxaDMSQ8WkaKol\nNS/Nbp7uc6pO1d57rTXnHCMPY1a1pNCwICpmM1k/cHC6T51TtWqtueb8x///Y5SYmbFixYoVK1as\nWLFixYovK8KX+wJWrFixYsWKFStWrFixEvMVK1asWLFixYoVK14XWIn5ihUrVqxYsWLFihWvA6zE\nfMWKFStWrFixYsWK1wFWYr5ixYoVK1asWLFixesAKzFfsWLFihUrVqxYseJ1gJWYr1ixYsWKFStW\nrFjxOsBKz [...]
-      }
-     ],
-     "prompt_number": 28
+     "output_type": "display_data"
     },
     {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [],
-     "language": "python",
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAH/CAYAAAAbqBidAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAWJQAAFiUBSVIk8AAAIABJREFUeJzsvFusbVt2nvW11nsfY8611r6dy65T93KV7/cEg2OsQCwr\njokIhChCCBAoRkhAFAQC5SE8QCIhIfkhEUERPAQRCRSs8BApMjiOZEc2DpYdm6SwYxc2tsuuqlNV\n55x9WXutOcfol9Z4aGPtikIsTCpxnSLjl5aWtC5zjjlGv/zt///Wxd3ZsWPHjh07duzYsWPHlxb6\npb6AHTt27NixY8eOHTt27MR8x44dO3bs2LFjx453BXZivmPHjh07duzYsWPHuwA7Md+xY8eOHTt2\n7Nix412AnZjv2LFjx44dO3bs2PEuwE7Md+zYsWPHjh07dux4F2An5jt27NixY8eOHTt2vAuwE/Md\nO3bs [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0xd99017860>"
+      ]
+     },
      "metadata": {},
-     "outputs": []
+     "output_type": "display_data"
     }
    ],
-   "metadata": {}
+   "source": [
+    "# do agglomerativeClustering\n",
+    "beta = 0.5              # node vs edge weight\n",
+    "nodeNumStop = 50        # desired num. nodes in result\n",
+    "labels = graphs.agglomerativeClustering(graph=rag, edgeWeights=edgeWeights,\n",
+    "                                        beta=beta, nodeFeatures=nodeFeatures,\n",
+    "                                        nodeNumStop=nodeNumStop)\n",
+    "# show result \n",
+    "imgLabels =rag.projectLabelsToBaseGraph(labels)\n",
+    "cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))\n",
+    "pylab.imshow ( imgLabels.squeeze().swapaxes(0,1), cmap = cmap)\n",
+    "pylab.show()\n",
+    "\n",
+    "rag.show(img,labels)\n",
+    "vigra.show()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": []
   }
- ]
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
 }
diff --git a/vigranumpy/examples/blocking.py b/vigranumpy/examples/blocking.py
index b3860fd..aca469a 100644
--- a/vigranumpy/examples/blocking.py
+++ b/vigranumpy/examples/blocking.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import vigra
 from vigra import graphs
 from vigra import numpy
@@ -5,8 +7,6 @@ from vigra import Timer
 from vigra import blockwise as bw
 
 
-
-
 numpy.random.seed(42)
 
 # input
@@ -14,21 +14,21 @@ shape = (500, 500, 500)
 
 data = numpy.random.rand(*shape).astype('float32')
 
-print "make options object"
+print("make options object")
 options = bw.BlockwiseConvolutionOptions3D()
-print type(options)
+print(type(options))
 
 sigma = 1.0
 options.stdDev = (sigma, )*3
 options.blockShape = (128, )*3
 
-print "stddev",options.stdDev
-print "call blockwise filter"
+print("stddev",options.stdDev)
+print("call blockwise filter")
 
 with vigra.Timer("AllThread"):
-	res = bw.gaussianSmooth(data, options)
+    res = bw.gaussianSmooth(data, options)
 with vigra.Timer("1thread"):
-	resRef = vigra.gaussianSmoothing(data, sigma)
+    resRef = vigra.gaussianSmoothing(data, sigma)
 
 
-print numpy.sum(numpy.abs(res-resRef))
+print(numpy.sum(numpy.abs(res-resRef)))
diff --git a/vigranumpy/examples/gaussian_rank.py b/vigranumpy/examples/gaussian_rank.py
deleted file mode 100644
index 3a6c2f3..0000000
--- a/vigranumpy/examples/gaussian_rank.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import vigra
-from vigra import graphs
-
-filepath = '12003.jpg' 
-img = vigra.impex.readImage(filepath).astype('float32')
-imgM = img.copy()
-
-sigmaS = 1.0
-sigmaB = 5.0
-
-with vigra.Timer("compute rank"):
-    for c in range(3):
-        print "channel",c
-        imgC = img[:, :, c].squeeze()
-        imgM[:,:,c] = vigra.histogram.gaussianRankOrder(imgC,sigmas=(sigmaS, sigmaS, sigmaB), ranks=(0.5,), bins=100).squeeze()
-vigra.imshow(vigra.taggedView(imgM,'xyc'))
-vigra.show()
diff --git a/vigranumpy/examples/graph_agglomerative_clustering.py b/vigranumpy/examples/graph_agglomerative_clustering.py
index 22c727d..ed8af4d 100644
--- a/vigranumpy/examples/graph_agglomerative_clustering.py
+++ b/vigranumpy/examples/graph_agglomerative_clustering.py
@@ -2,6 +2,7 @@ import vigra
 from vigra import graphs
 from vigra import numpy
 import pylab
+
 # parameter
 filepath = '12003.jpg'  # input image path
 sigmaGradMag = 5.0      # sigma Gaussian gradient
diff --git a/vigranumpy/examples/graph_watersheds.py b/vigranumpy/examples/graph_watersheds.py
index 1858a2f..e3a18b0 100644
--- a/vigranumpy/examples/graph_watersheds.py
+++ b/vigranumpy/examples/graph_watersheds.py
@@ -51,11 +51,11 @@ labelsEdgeWeighted  = graphs.edgeWeightedWatersheds(rag, ragEdgeWeights, seeds)
 
 
 f = pylab.figure()
-ax0 = f.add_subplot(1, 2, 0)
+ax0 = f.add_subplot(1, 2, 1)
 rag.showNested(img, labelsNodeWeighted)
 ax0.set_title("node weighted")
 
-ax1 = f.add_subplot(1, 2, 1)
+ax1 = f.add_subplot(1, 2, 2)
 rag.showNested(img, labelsEdgeWeighted)
 ax1.set_title("edge weighted")
 pylab.show()
diff --git a/vigranumpy/examples/grid_graph_shortestpath.py b/vigranumpy/examples/grid_graph_shortestpath.py
index 2e24be1..c1001ec 100644
--- a/vigranumpy/examples/grid_graph_shortestpath.py
+++ b/vigranumpy/examples/grid_graph_shortestpath.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import vigra
 import vigra.graphs as vigraph
 import pylab
@@ -11,8 +13,8 @@ from matplotlib.widgets import Slider, Button, RadioButtons
 
 def makeWeights(gamma):
     global hessian,gradmag,gridGraph
-    print "hessian",hessian.min(),hessian.max()
-    print "raw ",raw.min(),raw.max()
+    print("hessian",hessian.min(),hessian.max())
+    print("raw ",raw.min(),raw.max())
     wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
     wImg = numpy.array(wImg).astype(numpy.float32)
     w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
@@ -40,7 +42,7 @@ f       = '69015.jpg'
 img     = vigra.impex.readImage(f)
 
 
-print img.shape
+print(img.shape)
 
 if(img.shape[2]==1):
     img    = numpy.concatenate([img]*3,axis=2)
@@ -59,10 +61,10 @@ img-=img.min()
 img/=img.max()
 img*=255
 
-print imgLab.shape
+print(imgLab.shape)
 
 
-print "interpolate image"
+print("interpolate image")
 imgLabSmall = imgLab
 
 # make a few edge weights
@@ -106,7 +108,7 @@ def onclick(event):
     if event.xdata != None and event.ydata != None:
         xRaw,yRaw = event.xdata,event.ydata
         if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
-            x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
+            x,y = int(math.floor(event.xdata)),int(math.floor(event.ydata))
             clickList.append((x,y))
             if len(clickList)==2:
                 source = gridGraph.coordinateToNode(clickList[0])
@@ -131,9 +133,9 @@ def unfreeze(event):
 def onslide(event):
     global img,gradmag,weights,clickList,sgamma
     weights  = makeWeights(sgamma.val)
-    print "onslide",clickList
+    print("onslide",clickList)
     if len(clickList)>=2:
-        print "we have  path"
+        print("we have  path")
         source = gridGraph.coordinateToNode(clickList[0])
         target = gridGraph.coordinateToNode(clickList[1])
         path = pathFinder.run(weights, source,target).path(pathType='coordinates')
diff --git a/vigranumpy/examples/merge_graph.py b/vigranumpy/examples/merge_graph.py
index 578b5de..dba13b6 100644
--- a/vigranumpy/examples/merge_graph.py
+++ b/vigranumpy/examples/merge_graph.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import vigra
 from vigra import graphs
 from vigra import numpy
@@ -27,26 +28,20 @@ rag = graphs.regionAdjacencyGraph(gridGraph, labels)
 # get the merge graph
 mg = graphs.mergeGraph(rag)
 
-
-
-
-
-
 # do n runs where we erase k edges in each run
-
 n = 3
 k = 200
 for r in range(n):
 
-    erased = 0 
+    erased = 0
 
     while(erased<k):
 
         # get a random edge
         randEdgeId = numpy.random.randint(rag.edgeNum)
 
-        print "random edge:",randEdgeId
-        # edge could be gone 
+        print("random edge:",randEdgeId)
+        # edge could be gone
         # -since we could have merged it already
         # - or due to transitivity of other merges
         if mg.hasEdgeId(randEdgeId):
@@ -64,5 +59,3 @@ for r in range(n):
     # get the result as pixels wise labeling
     asImage = rag.projectLabelsToGridGraph(labels)
     asImage = vigra.taggedView(asImage, "xy")
-    
-    
diff --git a/vigranumpy/examples/non_local_mean_2d_color.py b/vigranumpy/examples/non_local_mean_2d_color.py
index d1ed9b7..5388377 100644
--- a/vigranumpy/examples/non_local_mean_2d_color.py
+++ b/vigranumpy/examples/non_local_mean_2d_color.py
@@ -1,19 +1,57 @@
+from __future__ import print_function
+
 import vigra
 from vigra import numpy
 from matplotlib import pylab
 from time import time
 import multiprocessing
 
-
+path = "69015.jpg"
+#path = "12074.jpg"
+path = "100075.jpg" 
 path = "12003.jpg"
 data = vigra.impex.readImage(path).astype(numpy.float32)
-data = vigra.taggedView(100*numpy.random.rand(*data.shape),'xyc').astype('float32') + data
-data /= 2.0
-vigra.imshow(data)
-vigra.show()
+
 cpus = multiprocessing.cpu_count()
-policy = vigra.filters.NormPolicy(sigma=10.0, meanDist=300.7, varRatio=0.9)
-res = vigra.filters.nonLocalMean2d(data,policy=policy,searchRadius=8,patchRadius=2,nThreads=cpus+1,stepSize=1,verbose=True,sigmaMean=1.0)
+
+print("nCpus",cpus)
+
+t0 =time()
+
+#for c in range(3):
+#    cimg=data[:,:,c]
+#    cimg-=cimg.min()
+#    cimg/=cimg.max()
+
+
+iters = 10
+
+#policy = vigra.filters.RatioPolicy(sigma=10.0, meanRatio=0.95, varRatio=0.5)
+policy = vigra.filters.NormPolicy(sigma=50.0, meanDist=50, varRatio=0.5)
+#data-=100.0
+res = vigra.filters.nonLocalMean2d(data,policy=policy,searchRadius=5,patchRadius=1,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
+for i in range(iters-1):
+    res = vigra.filters.nonLocalMean2d(res,policy=policy,searchRadius=5,patchRadius=2,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
+t1 = time()
+
 res = vigra.taggedView(res,'xyc')
-vigra.imshow(res)
-vigra.show()
+gma = vigra.filters.gaussianGradientMagnitude(res,4.0)
+gmb = vigra.filters.gaussianGradientMagnitude(data,4.0)
+#data+=100.0
+print(t1-t0)
+imgs  = [data,res,gma,gmb]
+
+for img in imgs:
+    for c in range(img.shape[2]):
+        cimg=img[:,:,c]
+        cimg-=cimg.min()
+        cimg/=cimg.max()
+
+f = pylab.figure()
+for n, arr in enumerate(imgs):
+    arr = arr.squeeze()
+    f.add_subplot(1, len(imgs), n+1)
+    pylab.imshow(arr.swapaxes(0,1))
+
+pylab.title('denoised')
+pylab.show()
diff --git a/vigranumpy/examples/rag_features.py b/vigranumpy/examples/rag_features.py
deleted file mode 100644
index cbf4123..0000000
--- a/vigranumpy/examples/rag_features.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from vigra import *
-
-
-
-
- 
-
-def computeFeatures():
-    pass
-
-
-
-class FilterList(object):
-    def __init__(self, shape):
-        pass
-
-
-
-filterList = FilterList()
diff --git a/vigranumpy/examples/shock_filter.py b/vigranumpy/examples/shock_filter.py
index 2da5fe7..c95b050 100644
--- a/vigranumpy/examples/shock_filter.py
+++ b/vigranumpy/examples/shock_filter.py
@@ -1,25 +1,21 @@
 import vigra
 from vigra import graphs
 
-filepath = '12003.jpg' 
+filepath = '12003.jpg'
 img = vigra.impex.readImage(filepath).astype('float32')[:,:,0]
 
-
-
 res = vigra.filters.shockFilter(img,sigma=1.5, rho=10.0, updwindFactorH=1.0, iterations=5)
 res = res.squeeze()
 
-
 import numpy as np
 import pylab
 import matplotlib.cm as cm
-import Image
 
 f = pylab.figure()
 for n, arr in enumerate([img,res]):
-    arr= arr.squeeze()
+    arr= arr.squeeze().T
     #f.add_subplot(2, 1, n)  # this line outputs images on top of each other
-    f.add_subplot(1, 2, n)  # this line outputs images side-by-side
+    f.add_subplot(1, 2, n+1)  # this line outputs images side-by-side
     pylab.imshow(arr,cmap=cm.Greys_r)
 pylab.title('( III x) image')
 pylab.show()
diff --git a/vigranumpy/lib/CMakeLists.txt b/vigranumpy/lib/CMakeLists.txt
index 65729a4..109085c 100644
--- a/vigranumpy/lib/CMakeLists.txt
+++ b/vigranumpy/lib/CMakeLists.txt
@@ -11,7 +11,22 @@ SET(PYSOURCES
 
 INSTALL(FILES ${PYSOURCES} DESTINATION ${VIGRANUMPY_INSTALL_DIR}/vigra)
 
-ADD_CUSTOM_TARGET(vigranumpy_lib)
+SET(TIMESTAMP "${CMAKE_CURRENT_BINARY_DIR}/TIMESTAMP.cxx")
+FILE(GLOB TIMESTAMP_FOUND ${TIMESTAMP})
+IF(NOT TIMESTAMP_FOUND)
+    FILE(WRITE ${TIMESTAMP}
+     "// auto-generated dummy file to ensure dependency on Python files. 
+// ADD_CUSTOM_TARGET(vigranumpy_lib SOURCES 'dollarsign'{PYSOURCES}) does not seem to be sufficient.
+")
+ENDIF()
+
+add_custom_command(
+    OUTPUT ${TIMESTAMP}
+    DEPENDS ${PYSOURCES}
+    COMMAND ${CMAKE_COMMAND}
+    ARGS -E touch ${TIMESTAMP})
+
+ADD_CUSTOM_TARGET(vigranumpy_lib SOURCES ${TIMESTAMP} ${PYSOURCES})
 ADD_DEPENDENCIES(vigranumpy vigranumpy_lib)
 
 FOREACH(lib_file ${PYSOURCES})
diff --git a/vigranumpy/lib/__init__.py b/vigranumpy/lib/__init__.py
index 8d1772c..dd4ddbc 100644
--- a/vigranumpy/lib/__init__.py
+++ b/vigranumpy/lib/__init__.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #
 #         Copyright 2009-2010 by Ullrich Koethe
 #
@@ -33,12 +33,14 @@
 #
 #######################################################################
 
+from __future__ import division, print_function
 import sys, os, time, math
+
 from numbers import Number
 from multiprocessing import cpu_count
 try:
     import pylab
-except Exception, e:
+except Exception as e:
     pass
 
 
@@ -107,26 +109,35 @@ The following sub-modules group related functionality:
 * utilities  (priority queues)
 ''' % _vigra_doc_path
 
-from __version__ import version
-import vigranumpycore
-import arraytypes
-import impex
-import sampling
-import filters
-import analysis
-import learning
-import colors
-import noise
-import geometry
-import optimization
-import histogram
-import graphs
-import utilities
-import blockwise
+from .__version__ import version
+import vigra.vigranumpycore as vigranumpycore
+import vigra.arraytypes as arraytypes
+import vigra.impex as impex
+import vigra.sampling as sampling
+import vigra.filters as filters
+import vigra.analysis as analysis
+import vigra.learning as learning
+import vigra.colors as colors
+import vigra.noise as noise
+import vigra.geometry as geometry
+import vigra.optimization as optimization
+import vigra.histogram as histogram
+import vigra.graphs as graphs
+import vigra.utilities as utilities
+import vigra.blockwise as blockwise
 
 sampling.ImagePyramid = arraytypes.ImagePyramid
 
+try:
+    import vigra.fourier as fourier
+except Exception as e:
+    _fallbackModule('fourier',
+    '''
+    %s
 
+    Make sure that the fftw3 libraries are found during compilation and import.
+    They may be downloaded at http://www.fftw.org/.''' % str(e))
+    import fourier
 
 class Timer:
     def __init__(self, name, verbose=True):
@@ -135,7 +146,7 @@ class Timer:
 
     def __enter__(self):
         if self.verbose:
-            print self.name, "..."
+            print(self.name, "...")
         self.start = time.time()
         return self
 
@@ -143,38 +154,32 @@ class Timer:
         self.end = time.time()
         self.interval = self.end - self.start
         if self.verbose  :
-            print "... took ", self.interval, "sec"
-
-
-
+            print("... took ", self.interval, "sec")
 
+# portable way to inject a metaclass (taken from six.py)
+def _with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
 
 
-
-try:
-    import fourier
-except Exception, e:
-    _fallbackModule('vigra.fourier',
-    '''
-    %s
-
-    Make sure that the fftw3 libraries are found during compilation and import.
-    They may be downloaded at http://www.fftw.org/.''' % str(e))
-    import fourier
-
 # import most frequently used functions
-from arraytypes import *
+from vigra.arraytypes import *
 standardArrayType = arraytypes.VigraArray
 defaultAxistags = arraytypes.VigraArray.defaultAxistags
 
-from vigranumpycore import ChunkedArrayFull, ChunkedArrayLazy, ChunkedArrayCompressed, ChunkedArrayTmpFile, Compression
+from vigra.vigranumpycore import ChunkedArrayFull, ChunkedArrayLazy, ChunkedArrayCompressed, ChunkedArrayTmpFile, Compression
 try:
-    from vigranumpycore import ChunkedArrayHDF5, HDF5Mode
+    from vigra.vigranumpycore import ChunkedArrayHDF5, HDF5Mode
 except:
     pass
 
-
-from impex import readImage, readVolume
+from vigra.impex import readImage, readVolume
 
 def readHDF5(filenameOrGroup, pathInFile, order=None):
     '''Read an array from an HDF5 file.
@@ -293,8 +298,8 @@ readHDF5.__module__ = 'vigra.impex'
 impex.writeHDF5 = writeHDF5
 writeHDF5.__module__ = 'vigra.impex'
 
-from filters import convolve, gaussianSmoothing
-from sampling import resize
+from .filters import convolve, gaussianSmoothing
+from .sampling import resize
 
 def gaussianDerivative(array, sigma, orders, out=None, window_size=0.0):
     '''
@@ -306,14 +311,17 @@ def gaussianDerivative(array, sigma, orders, out=None, window_size=0.0):
 
         'window_size' specifies the ratio between the filter scale and the size of
         the filter window. Use values around 2.0 to speed-up the computation for the
-        price of increased cut-off error, and values >= 4.0 for vary accurate results.
+        price of increased cut-off error, and values >= 4.0 for very accurate results.
         The window size is automatically determined for the default value 0.0.
+
+        For the first and second derivatives, you can also use :func:`gaussianGradient`
+        and :func:`hessianOfGaussian`.
     '''
     if hasattr(array, 'dropChannelAxis'):
         if array.dropChannelAxis().ndim != len(orders):
             raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
     else:
-        if array.ndim == len(orders):
+        if array.ndim != len(orders):
             raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
     try:
         len(sigma)
@@ -343,7 +351,7 @@ def searchfor(searchstring):
       contents = dir(_selfdict[attr])
       for cont in contents:
          if ( cont.upper().find(searchstring.upper()) ) >= 0:
-            print attr+"."+cont
+            print(attr+"."+cont)
 
 # FIXME: use axistags here
 def imshow(image,show=True, **kwargs):
@@ -413,7 +421,7 @@ def segShow(img,labels,edgeColor=(0,0,0),alpha=0.3,show=False,returnImg=False,r=
 
     labels = numpy.squeeze(labels)
     crackedEdges = analysis.regionImageToCrackEdgeImage(labels+1).squeeze()
-    #print "cracked shape",crackedEdges.shape
+    #print("cracked shape",crackedEdges.shape)
     whereEdge    =  numpy.where(crackedEdges==0)
     whereNoEdge  =  numpy.where(crackedEdges!=0)
     crackedEdges[whereEdge] = 1
@@ -511,7 +519,7 @@ def _genKernelFactories(name):
 %(newName)s.__doc__ = filters.%(name)s.%(oldName)s.__doc__
 filters.%(newName)s=%(newName)s
 ''' % {'oldName': oldName, 'newName': newName, 'name': name}
-        exec code
+        exec(code)
 
 _genKernelFactories('Kernel1D')
 _genKernelFactories('Kernel2D')
@@ -554,7 +562,7 @@ def _genWatershedsReoptimization():
         #  pylab.show()
 
 
-        seeds=analysis.segToSeeds(labels,long(shrinkN))
+        seeds=analysis.segToSeeds(labels,int(shrinkN))
 
         if visu :
           import matplotlib,numpy
@@ -628,32 +636,6 @@ _genTensorConvenienceFunctions()
 del _genTensorConvenienceFunctions
 
 
-
-
-
-# define tensor convenience functions
-def _genDistanceTransformFunctions():
-
-    def distanceTransform(array,background=True,norm=2,pixel_pitch=None, out=None):
-        if array.squeeze().ndim == 2:
-            return filters.distanceTransform2D(array,background=background,norm=norm,
-                                               pixel_pitch=pixel_pitch, out=out)
-        elif array.squeeze().ndim == 3:
-            return filters.distanceTransform3D(array.astype('float32'),background=background,norm=2)
-        else:
-            raise RuntimeError("distanceTransform is only implemented for 2D and 3D arrays")
-
-    distanceTransform.__module__ = 'vigra.filters'
-    filters.distanceTransform = distanceTransform
-
-
-
-_genDistanceTransformFunctions()
-del _genDistanceTransformFunctions
-
-
-
-
 # define feature convenience functions
 def _genFeaturConvenienceFunctions():
     def supportedFeatures(array):
@@ -662,7 +644,7 @@ def _genFeaturConvenienceFunctions():
            just the first two features in the list, use::
 
                 f = vigra.analysis.supportedFeatures(array)
-                print "Computing features:", f[:2]
+                print("Computing features:", f[:2])
                 r = vigra.analysis.extractFeatures(array, features=f[:2])
         '''
 
@@ -678,7 +660,7 @@ def _genFeaturConvenienceFunctions():
            list, use::
 
                 f = vigra.analysis.supportedRegionFeatures(array, labels)
-                print "Computing features:", f[:2]
+                print("Computing features:", f[:2])
                 r = vigra.analysis.extractRegionFeatures(array, labels, features=f[:2])
         '''
         return analysis.extractRegionFeatures(array, labels, None).supportedFeatures()
@@ -693,7 +675,7 @@ def _genFeaturConvenienceFunctions():
            list, use::
 
                 f = vigra.analysis.supportedConvexHullFeatures(labels)
-                print "Computing Convex Hull features:", f[:2]
+                print("Computing Convex Hull features:", f[:2])
                 r = vigra.analysis.extractConvexHullFeatures(labels, features=f[:2])
         '''
         try:
@@ -711,7 +693,7 @@ def _genFeaturConvenienceFunctions():
            list, use::
 
                 f = vigra.analysis.supportedSkeletonFeatures(labels)
-                print "Computing Skeleton features:", f[:2]
+                print("Computing Skeleton features:", f[:2])
                 r = vigra.analysis.extractSkeletonFeatures(labels, features=f[:2])
         '''
         try:
@@ -727,15 +709,23 @@ def _genFeaturConvenienceFunctions():
         return len(self.keys())
     def __iter__(self):
         return self.keys().__iter__()
-    def has_key(self, key):
+    def __contains__(self, key):
         try:
             return self.isActive(key)
         except:
             return False
-    def values(self):
-        return [self[k] for k in self.keys()]
-    def items(self):
-        return [(k, self[k]) for k in self.keys()]
+    def has_key(self, key):
+        self.__contains__(key)
+    if sys.version_info[0] < 3:
+        def values(self):
+            return [self[k] for k in self.keys()]
+        def items(self):
+            return [(k, self[k]) for k in self.keys()]
+    else:
+        def values(self):
+            return self.itervalues()
+        def items(self):
+            return self.iteritems()
     def iterkeys(self):
         return self.keys().__iter__()
     def itervalues(self):
@@ -745,7 +735,7 @@ def _genFeaturConvenienceFunctions():
         for k in self.keys():
             yield (k, self[k])
 
-    for k in ['__len__', '__iter__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
+    for k in ['__len__', '__iter__', '__contains__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
         setattr(analysis.FeatureAccumulator, k, eval(k))
         setattr(analysis.RegionFeatureAccumulator, k, eval(k))
 
@@ -795,14 +785,16 @@ def _genGridGraphConvenienceFunctions():
 
         metaCls = cls.__class__
 
-        class gridGraphInjector(object):
-            class __metaclass__(metaCls):
-                def __init__(self, name, bases, dict):
-                    for b in bases:
-                        if type(b) not in (self, type):
-                            for k,v in dict.items():
-                                setattr(b,k,v)
-                    return type.__init__(self, name, bases, dict)
+        class gridGraphInjectorMeta(metaCls):
+            def __init__(self, name, bases, dict):
+                for b in bases:
+                    if type(b) not in (self, type):
+                        for k,v in dict.items():
+                            setattr(b,k,v)
+                return type.__init__(self, name, bases, dict)
+
+        class gridGraphInjector(_with_metaclass(gridGraphInjectorMeta, object)):
+            pass
 
         ##inject some methods in the point foo
         class moreGridGraph(gridGraphInjector, cls):
@@ -1156,7 +1148,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
 
             """
             if(graph is not None and labels is not None):
-                super(RegionAdjacencyGraph,self).__init__(long(labels.max()+1),long(reserveEdges))
+                super(RegionAdjacencyGraph,self).__init__(int(labels.max()+1),int(reserveEdges))
 
                 if ignoreLabel is None and isDense is not None and isDense == True:
                     if ignoreLabel is None:
@@ -1222,7 +1214,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
                     raise RuntimeError("self.edgeNum == 0  => cannot accumulate edge features")
                 if acc == 'mean':
                     weights = self.baseGraph.edgeLengths()
-                    #print "Weights",weights
+                    #print("Weights",weights)
                 else:
                     weights = graphs.graphMap(self.baseGraph,'edge',dtype=numpy.float32)
                     weights[:] = 1
@@ -1251,9 +1243,9 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
             labels = self.baseGraphLabels
             ignoreLabel = self.ignoreLabel
             if acc == 'mean':
-              #print "get node size..."
+              #print("get node size...")
               weights = self.baseGraph.nodeSize()
-              #print "weights == ", weights
+              #print("weights == ", weights)
             else :
               weights = graphs.graphMap(self.baseGraph,'node',dtype=numpy.float32)
               weights[:]=1
@@ -1279,7 +1271,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
                 ignoreLabel=self.ignoreLabel,
                 out=out
             )
-            #print "out",out.shape,out.dtype
+            #print("out",out.shape,out.dtype)
             return out
 
         def projectLabelsBack(self,steps,labels=None,_current=0):
@@ -1335,7 +1327,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
 
             affEdges = self.affiliatedEdges
             uvCoords = affEdges.getUVCoordinates(self.baseGraph, ei)
-            dim = uvCoords.shape[1]/2
+            dim = uvCoords.shape[1] // 2
             uCoords = uvCoords[:,0:dim]
             vCoords = uvCoords[:,dim:2*dim]
             return (uCoords,vCoords)
@@ -1502,7 +1494,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
                 ma = edgeFeatureShow.max()
                 cm = matplotlib.cm.ScalarMappable(cmap=cmap)
                 rgb = cm.to_rgba(edgeFeatureShow)[:,0:3]
-                print rgb.shape
+                print(rgb.shape)
 
                 if(ma > mi):
                     edgeFeatureShow -=mi
@@ -1526,7 +1518,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
 
                 imgOut[u[:,0],u[:,1],:] = showVal
                 imgOut[v[:,0],v[:,1],:] = showVal
-                #print u.shape
+                #print(u.shape)
             if returnImg:
                 return imgOut
             imshow(imgOut)
@@ -1594,7 +1586,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
             self.edgeRag2dToRag = None
             self.edgeRagToRag2d = None
             if self.dim == 3:
-                self.zOffset = self.img.shape[2]/2
+                self.zOffset = self.img.shape[2]//2
 
             self.visuImg = numpy.array(img, dtype=numpy.float32)
             self.visuImg -= self.visuImg.min()
@@ -1697,7 +1689,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
                 self.edgeLabels2d = self.edgeLabels
 
             else:
-                print 'warning: bad dimension!'
+                print('warning: bad dimension!')
 
 
         def scroll(self, event):
@@ -1718,7 +1710,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
             if self.press is None:
                 return
 
-            print event.xdata, event.ydata
+            print(event.xdata, event.ydata)
             self.handle_click(event)
 
         def on_release(self, event):
@@ -1726,7 +1718,7 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
 
         def onclick(self, event):
             self.press = event.xdata, event.ydata
-            print event.xdata, event.ydata
+            print(event.xdata, event.ydata)
             try:
                 self.handle_click(event)
             except:
@@ -1749,9 +1741,9 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
             if event.xdata != None and event.ydata != None:
                 xRaw,yRaw = event.xdata,event.ydata
                 if xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
-                    x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
+                    x,y = int(math.floor(event.xdata)),int(math.floor(event.ydata))
 
-                    #print "X,Y",x,y
+                    #print("X,Y",x,y)
                     l = labels[x,y]
                     others  = []
 
@@ -1765,8 +1757,8 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
                                     yy >=0 and yy<shape[0]:
                                     otherLabel = labels[xx, yy]
                                     if l != otherLabel:
-                                        edge = rag.findEdge(long(l), long(otherLabel))
-                                    #print edge
+                                        edge = rag.findEdge(int(l), int(otherLabel))
+                                    #print(edge)
                                         others.append((xx,yy,edge))
                                         #break
                         #if other is not None:
@@ -1797,27 +1789,27 @@ def _genRegionAdjacencyGraphConvenienceFunctions():
 
     def loadGridRagHDF5(filename , dset):
 
-        #print "load labels and make grid graph"
+        #print("load labels and make grid graph")
         labels = readHDF5(filename,  dset+'/labels')
         shape = labels.shape
         gridGraph = graphs.gridGraph(shape)
-        #print gridGraph
+        #print(gridGraph)
 
 
-        #print "load graph serialization"
+        #print("load graph serialization")
         graphSerialization = readHDF5(filename, dset+'/graph')
 
-        #print "make empty grid rag"
+        #print("make empty grid rag")
         gridRag = GridRegionAdjacencyGraph()
 
-        #print "deserialize"
+        #print("deserialize")
         gridRag.deserialize(graphSerialization)
 
 
-        #print "load affiliatedEdges"
+        #print("load affiliatedEdges")
         affEdgeSerialization = readHDF5(filename, dset+'/affiliated_edges')
 
-        #print "deserialize"
+        #print("deserialize")
         affiliatedEdges = graphs._deserialzieGridGraphAffiliatedEdges(gridGraph, gridRag, affEdgeSerialization)
 
 
@@ -2289,29 +2281,29 @@ def _genGraphSegmentationFunctions():
 
         assert edgeWeights is not None or nodeFeatures is not None
 
-        #print "prepare "
+        print("prepare ")
 
         if nodeNumStop is None:
-            nodeNumStop = max(graph.nodeNum/2,min(graph.nodeNum,2))
+            nodeNumStop = max(graph.nodeNum//2,min(graph.nodeNum,2))
 
 
         if edgeLengths is None :
-            #print "get edge length"
+            print("get edge length")
             edgeLengths = graphs.getEdgeLengths(graph)
 
 
         if nodeSizes is None:
-            #print "get node size"
+            print("get node size")
             nodeSizes = graphs.getNodeSizes(graph)
 
 
         if edgeWeights is None :
-            #print "get wegihts length"
+            print("get wegihts length")
             edgeWeights = graphs.graphMap(graph,'edge')
             edgeWeights[:]=0
 
         if nodeFeatures is None :
-            #print "get node feat"
+            print("get node feat")
             nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
             nodeFeatures[:]=0
 
@@ -2321,15 +2313,15 @@ def _genGraphSegmentationFunctions():
 
 
         #import sys
-        #print "graph refcout", sys.getrefcount(graph)
+        #print("graph refcout", sys.getrefcount(graph))
         mg = graphs.mergeGraph(graph)
-        #print "graph refcout", sys.getrefcount(graph)
+        #print("graph refcout", sys.getrefcount(graph))
         #mg = []
         #del mg
         #import gc
         #gc.collect()
 
-        #print "graph refcout", sys.getrefcount(graph)
+        #print("graph refcout", sys.getrefcount(graph))
         #sys.exit(0)
 
 
@@ -2398,7 +2390,7 @@ def _genGraphSegmentationFunctions():
                 raise RuntimeError("'%s' is not a supported distance type"%str(metric))
 
             # call unsave c++ function and make it sav
-            print "nodeLabels ",nodeLabels.shape, nodeLabels.dtype
+            print("nodeLabels ",nodeLabels.shape, nodeLabels.dtype)
             op = graphs.__minEdgeWeightNodeDistOperator(mergeGraph,edgeWeights,edgeLengths,nodeFeatures,nodeSizes,outWeight,nodeLabels,
                 float(beta),nd,float(wardness),float(gamma))
 
@@ -2429,7 +2421,7 @@ def _genGraphSegmentationFunctions():
 
     def hierarchicalClustering(clusterOperator,nodeNumStopCond,buildMergeTreeEncoding=True):
         # call unsave c++ function and make it save
-        hc = graphs.__hierarchicalClustering(clusterOperator,long(nodeNumStopCond),bool(buildMergeTreeEncoding))
+        hc = graphs.__hierarchicalClustering(clusterOperator,int(nodeNumStopCond),bool(buildMergeTreeEncoding))
         #hc.__dict__['__base_object__']=clusterOperator
         hc.__base_object__ = clusterOperator
         return hc
@@ -2468,6 +2460,7 @@ def _genHistogram():
     def gaussianRankOrder(image, minVal=None, maxVal=None,
                      bins=20, sigmas=None, ranks=[0.1,0.25,0.5,0.75,0.9],
                      out=None):
+        # FIXME: crashes on Python3
         image = numpy.require(image.squeeze(),dtype='float32')
         nDim = image.ndim
         if sigmas is None:
@@ -2485,9 +2478,9 @@ def _genHistogram():
         if maxVal is None :
             maxVal = image.max()
 
-        #print "image",image.shape,image.dtype
-        #print "ranks",ranks.shape,ranks.dtype
-        #print "sigmas",sigmas
+        #print("image",image.shape,image.dtype)
+        #print("ranks",ranks.shape,ranks.dtype)
+        #print("sigmas",sigmas)
         return histogram._gaussianRankOrder(image=image,
                                             minVal=float(minVal),
                                             maxVal=float(maxVal),
@@ -2667,10 +2660,10 @@ def loadBSDGt(filename):
         gt =  matContents['groundTruth'][0][gti][0]['Segmentation'][0]
         gt = numpy.swapaxes(gt,0,1)
         gt = gt.astype(numpy.uint32)
-        print gt.min(),gt.max()
+        print(gt.min(),gt.max())
         gts.append(gt[:,:,None])
     gtArray = numpy.concatenate(gts,axis=2)
-    print gtArray.shape
+    print(gtArray.shape)
     return gtArray
 
 
diff --git a/vigranumpy/lib/arraytypes.py b/vigranumpy/lib/arraytypes.py
index 3e1e0ec..5a81853 100644
--- a/vigranumpy/lib/arraytypes.py
+++ b/vigranumpy/lib/arraytypes.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #
 #         Copyright 2009-2011 by Ullrich Koethe
 #
@@ -32,15 +32,21 @@
 #    OTHER DEALINGS IN THE SOFTWARE.
 #
 #######################################################################
+from __future__ import print_function
+from functools import reduce
 
 import sys
 import copy
 import numpy
-import ufunc
+import vigra.ufunc as ufunc
 import collections
-import vigranumpycore
+import vigra.vigranumpycore as vigranumpycore
+
+from vigra.vigranumpycore import AxisType, AxisInfo, AxisTags
 
-from vigranumpycore import AxisType, AxisInfo, AxisTags
+if sys.version_info[0] > 2:
+    buffer = memoryview
+    xrange = range
 
 def _preserve_doc(f):
     npy_doc = eval('numpy.ndarray.%s.__doc__' % f.__name__)
@@ -636,7 +642,7 @@ class VigraArray(numpy.ndarray):
             permutation = self.permutationFromNumpyOrder()
         )
         socket.send_json(metadata, flags|zmq.SNDMORE)
-        socket.send(self.axistags.toJSON(), flags|zmq.SNDMORE)
+        socket.send(self.axistags.toJSON().encode('ascii'), flags|zmq.SNDMORE)
         return socket.send(transposed, flags, copy=copy, track=track)
 
     def imshow(self):
@@ -710,7 +716,7 @@ class VigraArray(numpy.ndarray):
         '''
         try:
             import qimage2ndarray
-        except Exception, e:
+        except Exception as e:
             from vigra import _fallbackModule
             _fallbackModule('qimage2ndarray',
             '''
@@ -770,7 +776,7 @@ class VigraArray(numpy.ndarray):
                 clip = False
             if m == M:
                 return res
-            f = 255.0 / (M - m)
+            f = 255.0 // (M - m)
             img = f * (img - m)
             if clip:
                 img = numpy.minimum(255.0, numpy.maximum(0.0, img))
@@ -926,7 +932,7 @@ class VigraArray(numpy.ndarray):
             >>> s = vigra.ScalarImage((2,2))
             >>> s.ravel()[...] = range(4)
             >>> for p in s.spaceIter():
-            ....    print p
+            ....    print(p)
             0.0
             1.0
             2.0
@@ -1252,8 +1258,9 @@ class VigraArray(numpy.ndarray):
         except:
             if not isinstance(index, collections.Iterable):
                 raise
-            res = numpy.ndarray.__getitem__(self,
-                     map(lambda x: None if isinstance(x, AxisInfo) else x, index))
+            #create temporary index without AxisInfor in order to use np.ndarray.__getitem__
+            tmpindex = [None if isinstance(x, AxisInfo) else x for x in index]
+            res = numpy.ndarray.__getitem__(self, tmpindex)
         if res is not self and hasattr(res, 'axistags'):
             if res.base is self or res.base is self.base:
                 res.axistags = res._transform_axistags(index)
@@ -2002,7 +2009,7 @@ class ImagePyramid(list):
         self[level][...] = image[...]
 
     def expandImpl(self, src, dest, centerValue):
-        import filters
+        import vigra.filters as filters
 
         ss, ds = src.shape, dest.shape
         s = [ss[k] if 2*ss[k] == ds[k] else -1 for k in range(len(ss))]
@@ -2023,7 +2030,7 @@ class ImagePyramid(list):
         '''
         # FIXME: This should be implemented in C++
         # FIXME: This should be implemented for arbitrary dimensions
-        import filters
+        import vigra.filters as filters
 
         if srcLevel > destLevel:
             raise RuntimeError("ImagePyramid::reduce(): srcLevel <= destLevel required.")
@@ -2062,7 +2069,7 @@ class ImagePyramid(list):
         '''
         # FIXME: This should be implemented in C++
         # FIXME: This should be implemented for arbitrary dimensions
-        import filters
+        import vigra.filters as filters
 
         if srcLevel > destLevel:
             raise RuntimeError("ImagePyramid::reduceLaplacian(): srcLevel <= destLevel required.")
@@ -2086,7 +2093,7 @@ class ImagePyramid(list):
         '''
         # FIXME: This should be implemented in C++
         # FIXME: This should be implemented for arbitrary dimensions
-        import filters
+        import vigra.filters as filters
 
         if srcLevel < destLevel:
             raise RuntimeError("ImagePyramid::expandLaplacian(): srcLevel >= destLevel required.")
@@ -2111,7 +2118,7 @@ class ImagePyramid(list):
         if level > self.highestLevel:
             image = list.__getitem__(self, -1)
             for i in range(self.highestLevel, level):
-                newShape = [int((k + 1) / 2) for k in image.shape]
+                newShape = [int((k + 1) // 2) for k in image.shape]
                 if hasChannels:
                     newShape[channelIndex] = image.shape[channelIndex]
                 if axistags:
diff --git a/vigranumpy/lib/axistags.py b/vigranumpy/lib/axistags.py
index 54131e0..5c564ea 100644
--- a/vigranumpy/lib/axistags.py
+++ b/vigranumpy/lib/axistags.py
@@ -1,5 +1,9 @@
+import sys
 import numpy as np
 
+if sys.version_info[0] > 2:
+    xrange = range
+
 class TaggedArray(np.ndarray):
 
     def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
@@ -33,29 +37,29 @@ class TaggedArray(np.ndarray):
         if axis is not None:
             del res.axistags[axis]
         return res
-        
+
     def argmin(self, axis=None, out=None):
         res = np.ndarray.argmin(self, axis, out)
         if axis is not None:
             del res.axistags[axis]
         return res
-    
+
     def cumsum(self, axis=None, dtype=None, out=None):
         res = np.ndarray.cumsum(self, axis, dtype, out)
         if res.ndim != self.ndim:
             res.axistags = [None]*res.ndim
-        return res        
+        return res
 
     def cumprod(self, axis=None, dtype=None, out=None):
         res = np.ndarray.cumprod(self, axis, dtype, out)
         if res.ndim != self.ndim:
             res.axistags = [None]*res.ndim
-        return res        
+        return res
 
     def flatten(self, order='C'):
         res = np.ndarray.flatten(self, order)
         res.axistags = [None]
-        return res        
+        return res
 
     def max(self, axis=None, out=None):
         res = np.ndarray.max(self, axis, out)
@@ -68,13 +72,13 @@ class TaggedArray(np.ndarray):
         if axis is not None:
             del res.axistags[axis]
         return res
-    
+
     def min(self, axis=None, out=None):
         res = np.ndarray.min(self, axis, out)
         if axis is not None:
             del res.axistags[axis]
         return res
-    
+
     def nonzero(self):
         res = np.ndarray.nonzero(self)
         for k in xrange(len(res)):
@@ -96,30 +100,30 @@ class TaggedArray(np.ndarray):
     def ravel(self, order='C'):
         res = np.ndarray.ravel(self, order)
         res.axistags = [None]
-        return res        
+        return res
 
     def repeat(self, repeats, axis=None):
         res = np.ndarray.repeat(self, repeats, axis)
         if axis is None:
             res.axistags = [None]*res.ndim
-        return res        
+        return res
 
     def reshape(self, shape, order='C'):
         res = np.ndarray.reshape(self, shape, order)
         res.axistags = [None]*res.ndim
-        return res        
+        return res
 
     def resize(self, new_shape, refcheck=True, order=False):
         res = np.ndarray.reshape(self, new_shape, refcheck, order)
         res.axistags = [None]*res.ndim
-        return res        
-            
+        return res
+
     def squeeze(self):
         res = np.ndarray.squeeze(self)
         for k in xrange(self.ndim-1, -1, -1):
             if self.shape[k] == 1:
                 del res.axistags[k]
-        return res        
+        return res
 
     def std(self, axis=None, dtype=None, out=None, ddof=0):
         res = np.ndarray.std(self, axis, dtype, out, ddof)
@@ -134,19 +138,19 @@ class TaggedArray(np.ndarray):
         if axis is not None:
             del res.axistags[axis]
         return res
-            
+
     def swapaxes(self, i, j):
         res = np.ndarray.swapaxes(self, i, j)
         res.axistags[i] = self.axistags[j]
         res.axistags[j] = self.axistags[i]
-        return res        
- 
+        return res
+
     def take(self, indices, axis=None, out=None, mode='raise'):
         res = np.ndarray.take(self, indices, axis, out, mode)
         if axis is None:
             res.axistags = [None]*res.ndim
-        return res        
-           
+        return res
+
     def transpose(self, *axes):
         res = np.ndarray.transpose(self, *axes)
         if len(axes) == 1:
@@ -164,14 +168,14 @@ class TaggedArray(np.ndarray):
         if len(res.shape) == 0:
             res = res.item()
         return res
-    
+
     @property
     def T(self):
         return self.transpose()
 
     def __getitem__(self, index):
         '''x.__getitem__(y) <==> x[y]
-         
+
            In addition to the usual indexing functionality, this function
            also updates the axistags of the result array. There are three cases:
              * getitem creates a value => no axistags are required
@@ -198,15 +202,15 @@ class TaggedArray(np.ndarray):
                 if lindex < self.ndim and index.count(Ellipsis) == 0:
                     index += (Ellipsis,)
                     lindex += 1
-                
+
                 # how many missing axes are represented by an Ellipsis ?
                 lellipsis = self.ndim - lindex
-                
+
                 knew, kold, kindex = 0, 0, 0
                 while knew < lnew:
                     try:
                         # if index[kindex] is int, the dimension is bound => drop this axis
-                        int(index[kindex]) 
+                        int(index[kindex])
                         kold += 1
                         kindex += 1
                     except:
@@ -224,27 +228,28 @@ class TaggedArray(np.ndarray):
                         else:
                             kindex += 1
         return res
-    
-    for k in ['all', 'any', 'argmax', 'argmin', 'cumsum', 'cumprod', 'flatten', 
-               'max', 'mean', 'min', 'nonzero', 'prod', 'ptp', 'ravel', 'repeat', 
-               'reshape', 'resize', 'squeeze', 'std', 'sum', 'swapaxes', 'take', 
+
+    for k in ['all', 'any', 'argmax', 'argmin', 'cumsum', 'cumprod', 'flatten',
+               'max', 'mean', 'min', 'nonzero', 'prod', 'ptp', 'ravel', 'repeat',
+               'reshape', 'resize', 'squeeze', 'std', 'sum', 'swapaxes', 'take',
                'transpose', 'var']:
-        exec k + '.__doc__ = np.ndarray.' + k + '.__doc__'
+        exec(k + '.__doc__ = np.ndarray.' + k + '.__doc__')
+
 
-        
 def benchmark(expression):
     '''transfer of axistags causes a slowdown by a factor of about 10,
        when getitem returns a value, the slowdown is about 3 (due to Python calls)
     '''
     import timeit, axistags
+    from imp import reload
     reload(axistags)
     repetitions = 100000
-    t1 = timeit.Timer(expression, 
+    t1 = timeit.Timer(expression,
          "import numpy, axistags\na = axistags.TaggedArray((2,3,4), axistags='zyx', dtype=numpy.uint8)")
-    t2 = timeit.Timer(expression, 
+    t2 = timeit.Timer(expression,
          "import numpy, axistags\na = numpy.ndarray((2,3,4), dtype=numpy.uint8)")
-    t3 = timeit.Timer(expression, 
+    t3 = timeit.Timer(expression,
          "import numpy, axistags\na = axistags.TaggedArray((2,3,4), axistags='zyx', dtype=numpy.uint8).view(numpy.ndarray)")
-    print "TaggedArray:", t1.timeit(repetitions)/repetitions*1e6,"musec"
-    print "ndarray:", t2.timeit(repetitions)/repetitions*1e6,"musec"
-    print "TaggedArray as ndarray:", t3.timeit(repetitions)/repetitions*1e6,"musec"
+    print("TaggedArray:", t1.timeit(repetitions)/repetitions*1e6,"musec")
+    print("ndarray:", t2.timeit(repetitions)/repetitions*1e6,"musec")
+    print("TaggedArray as ndarray:", t3.timeit(repetitions)/repetitions*1e6,"musec")
diff --git a/vigranumpy/lib/pyqt/imagewindow.py b/vigranumpy/lib/pyqt/imagewindow.py
index 449030a..31deb90 100644
--- a/vigranumpy/lib/pyqt/imagewindow.py
+++ b/vigranumpy/lib/pyqt/imagewindow.py
@@ -32,8 +32,9 @@
 #    OTHER DEALINGS IN THE SOFTWARE.
 #
 #######################################################################
+from __future__ import print_function
 
-import math, os, numpy, PyQt4
+import math, os, sys, numpy, PyQt4
 
 import PyQt4.QtCore as qcore
 import PyQt4.QtGui  as qt
@@ -42,9 +43,12 @@ from PyQt4.QtCore import SIGNAL
 import vigra
 import vigra.ufunc
 
+if sys.version_info[0] > 2:
+    xrange = range
+
 try:
     from VigraQt import OverlayViewer, ImageCursor
-except Exception, e:
+except Exception as e:
     vigra._fallbackModule('VigraQt',
     '''
     %s
@@ -174,15 +178,15 @@ class ImageViewer(OverlayViewer):
                 OverlayViewer.removeOverlay(self, self.overlays[overlay])
                 self.overlays.pop(overlay)
                 self.update()
-            except IndexError, e:
-                print "No such overlay."
+            except IndexError as e:
+                print("No such overlay.")
         else:
             try:
                 self.overlays.remove(overlay)
                 OverlayViewer.removeOverlay(self, overlay)
                 self.update()
-            except ValueError, e:
-                print "No such overlay."
+            except ValueError as e:
+                print("No such overlay.")
 
     def _slideAfterZoom(self, shift):
         if self.zoomLevel() > 0:
@@ -299,7 +303,7 @@ class ImageViewer(OverlayViewer):
 
         try:
             image, normalized = self.getDisplayedImage()
-        except Exception, e:
+        except Exception as e:
             qt.QMessageBox.critical(self, "Error Applying Expression", str(e))
             return
 
@@ -319,10 +323,10 @@ class ImageViewer(OverlayViewer):
             image = self.image
         else:
             for f in vigra.ufunc.__all__:
-                exec 'from vigra.ufunc import %s' % f
+                exec('from vigra.ufunc import %s' % f)
             for f in dir(vigra.colors):
                 if not f.startswith('__'):
-                    exec 'from vigra.colors import %s' % f
+                    exec('from vigra.colors import %s' % f)
             x = self.image
             image = eval(self._savedExpression)
 
@@ -370,7 +374,7 @@ class ImageViewer(OverlayViewer):
                     image = self.getDisplay()[0]
                 try:
                     image.writeImage(filename, pixelType)
-                except RuntimeError, e:
+                except RuntimeError as e:
                     qt.QMessageBox.critical(self, "Error", str(e))
                 else:
                     return
@@ -426,7 +430,7 @@ class ImageViewer(OverlayViewer):
                     viewer2svg.viewer2svg(self, basename, not allOVs)
                 else:
                     viewer2svg.viewer2svg(self, filename, not allOVs)
-            except RuntimeError, e:
+            except RuntimeError as e:
                 qt.QMessageBox.critical(self, "Error", str(e))
             return
 
diff --git a/vigranumpy/lib/pyqt/overlays.py b/vigranumpy/lib/pyqt/overlays.py
index 9bd8a9f..5adbf4c 100644
--- a/vigranumpy/lib/pyqt/overlays.py
+++ b/vigranumpy/lib/pyqt/overlays.py
@@ -155,8 +155,7 @@ class TextOverlay(Overlay):
         if self.coordinateSystem() & VigraQt.Overlay.Scaled:
             position = QtCore.QPointF(*self.pos)
         else:
-            position = QtCore.QPointF(*map(
-              lambda x: (x+0.5) * self.parent().zoomFactor(), self.pos))
+            position = QtCore.QPointF(*[(x+0.5) * self.parent().zoomFactor() for x in self.pos])
 
         self._setupPainter(p)
         if self.pointsize:
diff --git a/vigranumpy/lib/pyqt/viewer2svg.py b/vigranumpy/lib/pyqt/viewer2svg.py
index 445455a..26a0215 100644
--- a/vigranumpy/lib/pyqt/viewer2svg.py
+++ b/vigranumpy/lib/pyqt/viewer2svg.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import os 
 from PyQt4 import QtCore, QtGui
 
@@ -66,7 +68,7 @@ def viewer2svg(viewer, basepath, onlyVisible = False, moveBy = QtCore.QPointF(0.
                 else:
                     outvec.extend(writeText(text = element[0], position = element[1]))
         else:
-            print str(overlay[0]) + " not supported yet.\n"
+            print(str(overlay[0]) + " not supported yet.\n")
 
     outvec.append('\n</g>\n')
     outvec.append('</svg>\n')
diff --git a/vigranumpy/lib/tagged_array.py b/vigranumpy/lib/tagged_array.py
index ee920f2..b5aa954 100644
--- a/vigranumpy/lib/tagged_array.py
+++ b/vigranumpy/lib/tagged_array.py
@@ -33,23 +33,26 @@
 #
 #######################################################################
 
-import copy
+import copy, sys
 import numpy
-    
+
+if sys.version_info[0] > 2:
+    xrange = range
+
 def preserve_doc(f):
     f.__doc__ = eval('numpy.ndarray.%s.__doc__' % f.__name__)
     return f
 
 class TaggedArray(numpy.ndarray):
     '''
-TaggedArray extends numpy.ndarray with an attribute 'axistags'. Any 
-axistags object must support the standard sequence interface, and its 
-length must match the number of dimensions of the array. Each item in 
-the axistags sequence is supposed to provide a description of the 
-corresponding array axis. All array functions that change the number or 
-ordering of an array's axes (such as transpose() and __getitem__()) are 
-overloaded so that they apply the same transformation to the axistags 
-object. 
+TaggedArray extends numpy.ndarray with an attribute 'axistags'. Any
+axistags object must support the standard sequence interface, and its
+length must match the number of dimensions of the array. Each item in
+the axistags sequence is supposed to provide a description of the
+corresponding array axis. All array functions that change the number or
+ordering of an array's axes (such as transpose() and __getitem__()) are
+overloaded so that they apply the same transformation to the axistags
+object.
 
 Example:
   >>> axistags = ['x', 'y']
@@ -62,9 +65,9 @@ Example:
   ['y']
   >>> a.transpose().axistags
   ['y', 'x']
-  
-Except for the new 'axistags' keyword, the 'TaggedArray' constructor is identical to the constructor 
-of 'numpy.ndarray'. 
+
+Except for the new 'axistags' keyword, the 'TaggedArray' constructor is identical to the constructor
+of 'numpy.ndarray'.
     '''
     def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, axistags=None):
         res = numpy.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, order)
@@ -75,20 +78,20 @@ of 'numpy.ndarray'.
                 raise RuntimeError('TaggedArray(): len(axistags) must match ndim')
             res.axistags = copy.copy(axistags)
         return res
-        
+
     def default_axistags(self):
         '''Create an axistags object with non-informative entries.
         '''
         return [None]*self.ndim
-    
+
     def copy_axistags(self):
-        '''Create a copy of 'self.axistags'. If the array doesn't have axistags, default_axistags() 
+        '''Create a copy of 'self.axistags'. If the array doesn't have axistags, default_axistags()
            will be returned.
         '''
         return copy.copy(getattr(self, 'axistags', self.default_axistags()))
-        
+
     def transpose_axistags(self, axes=None):
-        '''Create a copy of 'self.axistags' according to the given axes permutation 
+        '''Create a copy of 'self.axistags' according to the given axes permutation
            (internally called in transpose()).
         '''
         axistags = self.default_axistags()
@@ -98,16 +101,16 @@ of 'numpy.ndarray'.
             for k in xrange(self.ndim):
                 axistags[k] = self.axistags[int(axes[k])]
         return axistags
-        
+
     def transform_axistags(self, index):
-        '''Create a copy of 'self.axistags' according to the given index or slice object 
+        '''Create a copy of 'self.axistags' according to the given index or slice object
            (internally called in __getitem__()).
         '''
         # we assume that self.ndim is already set to its new value, whereas
         # self.axistags has just been copied by __array_finalize__
-        
+
         new_axistags = self.default_axistags()
-        
+
         if hasattr(self, 'axistags'):
             old_axistags = self.axistags
             old_ndim = len(old_axistags)
@@ -122,15 +125,15 @@ of 'numpy.ndarray'.
             if len_index < old_ndim and index.count(Ellipsis) == 0:
                 index += (Ellipsis,)
                 len_index += 1
-            
+
             # how many missing axes are represented by an Ellipsis ?
             len_ellipsis = old_ndim - len_index
-            
+
             knew, kold, kindex = 0, 0, 0
             while knew < new_ndim:
                 try:
                     # if index[kindex] is int, the dimension is bound => drop this axis
-                    int(index[kindex]) 
+                    int(index[kindex])
                     kold += 1
                     kindex += 1
                 except:
@@ -145,9 +148,9 @@ of 'numpy.ndarray'.
                     else:
                         kindex += 1
         return new_axistags
-    
+
     __array_priority__ = 10.0
-    
+
     def __array_finalize__(self, obj):
         if hasattr(obj, 'axistags'):
             self.axistags = obj.axistags
@@ -157,18 +160,18 @@ of 'numpy.ndarray'.
         result = numpy.ndarray.__copy__(self, order)
         result.axistags = result.copy_axistags()
         return result
-    
+
     @preserve_doc
     def __deepcopy__(self, memo):
         result = numpy.ndarray.__deepcopy__(self, memo)
         memo[id(self)] = result
         result.__dict__ = copy.deepcopy(self.__dict__, memo)
         return result
-    
+
     def __repr__(self):
         return "%s(shape=%s, axistags=%s, dtype=%s, data=\n%s)" % \
           (self.__class__.__name__, str(self.shape), repr(self.axistags), str(self.dtype), str(self))
-          
+
     @preserve_doc
     def all(self, axis=None, out=None):
         res = numpy.ndarray.all(self, axis, out)
@@ -192,7 +195,7 @@ of 'numpy.ndarray'.
             res.axistags = res.copy_axistags()
             del res.axistags[axis]
         return res
-        
+
     @preserve_doc
     def argmin(self, axis=None, out=None):
         res = numpy.ndarray.argmin(self, axis, out)
@@ -200,20 +203,20 @@ of 'numpy.ndarray'.
             res.axistags = res.copy_axistags()
             del res.axistags[axis]
         return res
-    
+
     @preserve_doc
     def cumsum(self, axis=None, dtype=None, out=None):
         res = numpy.ndarray.cumsum(self, axis, dtype, out)
         if res.ndim != self.ndim:
             res.axistags = res.default_axistags()
-        return res        
+        return res
 
     @preserve_doc
     def cumprod(self, axis=None, dtype=None, out=None):
         res = numpy.ndarray.cumprod(self, axis, dtype, out)
         if res.ndim != self.ndim:
             res.axistags = res.default_axistags()
-        return res        
+        return res
 
     # FIXME: we should also provide a possibility to determine flattening order by axistags
     #        (the same applies to flat and ravel)
@@ -221,7 +224,7 @@ of 'numpy.ndarray'.
     def flatten(self, order='C'):
         res = numpy.ndarray.flatten(self, order)
         res.axistags = res.default_axistags()
-        return res        
+        return res
 
     @preserve_doc
     def max(self, axis=None, out=None):
@@ -238,7 +241,7 @@ of 'numpy.ndarray'.
             res.axistags = res.copy_axistags()
             del res.axistags[axis]
         return res
-    
+
     @preserve_doc
     def min(self, axis=None, out=None):
         res = numpy.ndarray.min(self, axis, out)
@@ -246,7 +249,7 @@ of 'numpy.ndarray'.
             res.axistags = res.copy_axistags()
             del res.axistags[axis]
         return res
-    
+
     @preserve_doc
     def nonzero(self):
         res = numpy.ndarray.nonzero(self)
@@ -274,27 +277,27 @@ of 'numpy.ndarray'.
     def ravel(self, order='C'):
         res = numpy.ndarray.ravel(self, order)
         res.axistags = res.default_axistags()
-        return res        
+        return res
 
     @preserve_doc
     def repeat(self, repeats, axis=None):
         res = numpy.ndarray.repeat(self, repeats, axis)
         if axis is None:
             res.axistags = res.default_axistags()
-        return res        
+        return res
 
     @preserve_doc
     def reshape(self, shape, order='C'):
         res = numpy.ndarray.reshape(self, shape, order)
         res.axistags = res.default_axistags()
-        return res        
+        return res
 
     @preserve_doc
     def resize(self, new_shape, refcheck=True, order=False):
         res = numpy.ndarray.reshape(self, new_shape, refcheck, order)
         res.axistags = res.default_axistags()
-        return res        
-            
+        return res
+
     @preserve_doc
     def squeeze(self):
         res = numpy.ndarray.squeeze(self)
@@ -303,7 +306,7 @@ of 'numpy.ndarray'.
             for k in xrange(self.ndim-1, -1, -1):
                 if self.shape[k] == 1:
                     del res.axistags[k]
-        return res        
+        return res
 
     @preserve_doc
     def std(self, axis=None, dtype=None, out=None, ddof=0):
@@ -322,21 +325,21 @@ of 'numpy.ndarray'.
             res.axistags = res.copy_axistags()
             del res.axistags[axis]
         return res
-            
+
     @preserve_doc
     def swapaxes(self, i, j):
         res = numpy.ndarray.swapaxes(self, i, j)
         res.axistags = res.copy_axistags()
         res.axistags[i], res.axistags[j] = res.axistags[j], res.axistags[i]
-        return res        
- 
+        return res
+
     @preserve_doc
     def take(self, indices, axis=None, out=None, mode='raise'):
         res = numpy.ndarray.take(self, indices, axis, out, mode)
         if axis is None:
             res.axistags = res.default_axistags()
-        return res        
-           
+        return res
+
     @preserve_doc
     def transpose(self, *axes):
         res = numpy.ndarray.transpose(self, *axes)
@@ -359,7 +362,7 @@ of 'numpy.ndarray'.
 
     def __getitem__(self, index):
         '''x.__getitem__(y) <==> x[y]
-         
+
            In addition to the usual indexing functionality, this function
            also updates the axistags of the result array. There are three cases:
              * getitem creates a scalar value => no axistags are required
diff --git a/vigranumpy/lib/ufunc.py b/vigranumpy/lib/ufunc.py
index 9b3bbb0..5f0df00 100644
--- a/vigranumpy/lib/ufunc.py
+++ b/vigranumpy/lib/ufunc.py
@@ -1,36 +1,36 @@
-#######################################################################
-#                                                                      
-#         Copyright 2009-2010 by Ullrich Koethe                        
-#                                                                      
-#    This file is part of the VIGRA computer vision library.           
-#    The VIGRA Website is                                              
-#        http://hci.iwr.uni-heidelberg.de/vigra/                       
-#    Please direct questions, bug reports, and contributions to        
-#        ullrich.koethe at iwr.uni-heidelberg.de    or                    
-#        vigra at informatik.uni-hamburg.de                               
-#                                                                      
-#    Permission is hereby granted, free of charge, to any person       
-#    obtaining a copy of this software and associated documentation    
-#    files (the "Software"), to deal in the Software without           
-#    restriction, including without limitation the rights to use,      
-#    copy, modify, merge, publish, distribute, sublicense, and/or      
-#    sell copies of the Software, and to permit persons to whom the    
-#    Software is furnished to do so, subject to the following          
-#    conditions:                                                       
-#                                                                      
-#    The above copyright notice and this permission notice shall be    
-#    included in all copies or substantial portions of the             
-#    Software.                                                         
-#                                                                      
-#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND    
-#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES   
-#    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND          
-#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT       
-#    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,      
-#    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING      
-#    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR     
-#    OTHER DEALINGS IN THE SOFTWARE.                                   
-#                                                                      
+#######################################################################
+#
+#         Copyright 2009-2010 by Ullrich Koethe
+#
+#    This file is part of the VIGRA computer vision library.
+#    The VIGRA Website is
+#        http://hci.iwr.uni-heidelberg.de/vigra/
+#    Please direct questions, bug reports, and contributions to
+#        ullrich.koethe at iwr.uni-heidelberg.de    or
+#        vigra at informatik.uni-hamburg.de
+#
+#    Permission is hereby granted, free of charge, to any person
+#    obtaining a copy of this software and associated documentation
+#    files (the "Software"), to deal in the Software without
+#    restriction, including without limitation the rights to use,
+#    copy, modify, merge, publish, distribute, sublicense, and/or
+#    sell copies of the Software, and to permit persons to whom the
+#    Software is furnished to do so, subject to the following
+#    conditions:
+#
+#    The above copyright notice and this permission notice shall be
+#    included in all copies or substantial portions of the
+#    Software.
+#
+#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
+#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+#    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+#    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+#    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+#    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+#    OTHER DEALINGS IN THE SOFTWARE.
+#
 #######################################################################
 
 import numpy
@@ -38,65 +38,65 @@ import copy
 
 vigraTypecastingRules = '''
 Default output types are thus determined according to the following rules:
-   
+
    1. The output type does not depend on the order of the arguments::
-   
+
          a + b results in the same type as b + a
-   
-   2.a With exception of logical functions and abs(), the output type 
+
+   2.a With exception of logical functions and abs(), the output type
        does not depend on the function to be executed.
-        
-   2.b The output type of logical functions is bool. 
-   
-   2.c The output type of abs() follows general rules unless the 
-       input contains complex numbers, in which case the output type 
+
+   2.b The output type of logical functions is bool.
+
+   2.c The output type of abs() follows general rules unless the
+       input contains complex numbers, in which case the output type
        is the corresponding float number type::
-      
+
          a + b results in the same type as a / b
          a == b => bool
          abs(complex128) => float64
-         
+
    3. If the inputs have the same type, the type is preserved::
-   
+
          uint8 + uint8 => uint8
-   
-   4. If (and only if) one of the inputs has at least 64 bits, the output 
+
+   4. If (and only if) one of the inputs has at least 64 bits, the output
       will also have at least 64 bits::
-      
+
          int64 + uint32 => int64
          int64 + 1.0    => float64
-         
+
    5. If an array is combined with a scalar of the same kind (integer,
-      float, or complex), the array type is preserved. If an integer 
-      array with at most 32 bits is combined with a float scalar, the 
+      float, or complex), the array type is preserved. If an integer
+      array with at most 32 bits is combined with a float scalar, the
       result is float32 (and rule 4 kicks in if the array has 64 bits)::
-      
+
          uint8   + 1   => uint8
          uint8   + 1.0 => float32
          float32 + 1.0 => float32
          float64 + 1.0 => float64
-         
+
    6. Integer expressions with mixed types always produce signed results.
-      If the arguments have at most 32 bits, the result will be int32, 
+      If the arguments have at most 32 bits, the result will be int32,
       otherwise it will be int64 (cf. rule 4)::
-      
+
          int8  + uint8  => int32
          int32 + uint8  => int32
          int32 + uint32 => int32
          int32 + int64  => int64
          int64 + uint64 => int64
-         
-   7. In all other cases, the output type is equal to the highest input 
+
+   7. In all other cases, the output type is equal to the highest input
       type::
-      
+
          int32   + float32    => float32
          float32 + complex128 => complex128
-         
+
    8. All defaults can be overridden by providing an explicit output array::
-   
+
          ufunc.add(uint8, uint8, uint16) => uint16
-         
-In order to prevent overflow, necessary upcasting is performed before 
+
+In order to prevent overflow, necessary upcasting is performed before
 the function is executed.
 '''
 
@@ -106,7 +106,7 @@ class Function(object):
     kindToNumber = {'b': 1, 'u': 2, 'i': 2, 'f': 3, 'c': 4}
     boolFunctions = ['equal', 'greater', 'greater_equal', 'less', 'less_equal', 'not_equal',
                      'logical_and', 'logical_not', 'logical_or', 'logical_xor']
-    
+
     def __init__(self, function):
         self.function = function
         self.is_bool = function.__name__ in self.boolFunctions
@@ -114,27 +114,27 @@ class Function(object):
         self.__doc__ = function.__doc__
         self.nin = function.nin
         self.nout = function.nout
-        
+
     def __getattr__(self, name):
         return getattr(self.function, name)
-        
+
     def __repr__(self):
         return "<vigra.ufunc '%s'>" % self.__name__
-    
+
     def priorities(self, *args):
-        '''Among the inputs with largest size, find the one with highest 
-           __array_priority__. Return this input, or None if there is no 
+        '''Among the inputs with largest size, find the one with highest
+           __array_priority__. Return this input, or None if there is no
            inputs with 'size' and '__array_priority__' defined.'''
         maxSize = max([getattr(x, 'size', 0) for x in args])
         if maxSize == 0:
             return None
         priorities = [(getattr(x, '__array_priority__', -1.0), x) for x in args if getattr(x, 'size', 0) == maxSize]
-        priorities.sort(key = lambda (p, x): p)
+        priorities = sorted(priorities,key = lambda tuplepx: tuplepx[0])
         if priorities[-1][0] == -1.0:
             return None
         else:
             return priorities[-1][1]
-    
+
     def common_type_numpy(self, *args):
         '''Find a common type for the given inputs.
            This function will become obsolete when numpy.find_common_type() will be fixed.
@@ -163,14 +163,14 @@ class Function(object):
                (h == 'c' and s == 'c'):
                 return (highestArrayType, highestArrayType)
             return (highestArrayType, scalarTypes[0])
-        
+
     def common_type(self, *args):
-        '''Find the appropriate pair (in_dtype, out_dtype) according to 
-           vigranumpy typecasting rules. in_dtype is the type into which 
+        '''Find the appropriate pair (in_dtype, out_dtype) according to
+           vigranumpy typecasting rules. in_dtype is the type into which
            the arguments will be casted before performing the operation
            (to prevent possible overflow), out_type is the type the output
            array will have (unless an explicit out-argument is provided).
-           
+
            See ufunc.vigraTypecastingRules for detailed information on coercion rules.
         '''
         if self.is_abs and args[0].dtype.kind == "c" and args[1] is None:
@@ -184,13 +184,13 @@ class Function(object):
         arrayTypes = [(self.kindToNumber[x.dtype.kind], x.dtype.itemsize, x.dtype) for x in args if hasattr(x, 'dtype')]
         arrayTypes.sort()
         if arrayTypes[0] != arrayTypes[-1] and arrayTypes[-1][0] == 2:
-            if arrayTypes[-1][1] <= 4: 
+            if arrayTypes[-1][1] <= 4:
                 highestArrayType = (2, 4, numpy.int32)
-            else: 
+            else:
                 highestArrayType = (2, 8, numpy.int64)
         else:
             highestArrayType = arrayTypes[-1]
-            
+
         if self.is_bool:
             return (highestArrayType[-1], numpy.bool8)
 
@@ -201,15 +201,15 @@ class Function(object):
         if highestArrayType[0] >= scalarType[0]:
             return (highestArrayType[-1], highestArrayType[-1])
         elif scalarType[0] == 3 and highestArrayType[1] <= 4:
-            return (highestArrayType[-1], numpy.float32)        
+            return (highestArrayType[-1], numpy.float32)
         else:
-            return (highestArrayType[-1], scalarType[-1])        
-        
+            return (highestArrayType[-1], scalarType[-1])
+
 class UnaryFunction(Function):
     def __call__(self, arg, out=None):
         a = arg.squeeze().transposeToNumpyOrder()
         dtype, out_dtype = self.common_type(a, out)
-        
+
         if out is None:
             out = arg.__class__(arg, dtype=out_dtype, order='A', init=False)
             o = out.squeeze().transposeToNumpyOrder()
@@ -217,10 +217,10 @@ class UnaryFunction(Function):
             o = out.squeeze().transposeToNumpyOrder()
             if not a.axistags.compatible(o.axistags):
                 raise RuntimeError("%s(): axistag mismatch" % self.function.__name__)
-        
+
         a = numpy.require(a, dtype).view(numpy.ndarray) # view(ndarray) prevents infinite recursion
         self.function(a, o)
-        return out            
+        return out
 
 class UnaryFunctionOut2(Function):
     def __call__(self, arg, out1=None, out2=None):
@@ -236,40 +236,43 @@ class UnaryFunctionOut2(Function):
                 raise RuntimeError("%s(): axistag mismatch" % self.function.__name__)
 
         if out2 is None:
-            out2 = arg.__class__(arg, dtype=out_dtype, order='A', init=False)            
+            out2 = arg.__class__(arg, dtype=out_dtype, order='A', init=False)
             o2 = out2.squeeze().transposeToNumpyOrder()
         else:
             o2 = out2.squeeze().transposeToNumpyOrder()
             if not a.axistags.compatible(o2.axistags):
                 raise RuntimeError("%s(): axistag mismatch" % self.function.__name__)
-            
+
         a = numpy.require(a, dtype).view(numpy.ndarray) # view(ndarray) prevents infinite recursion
         self.function(a, o1, o2)
         return out1, out2
-                
+
 class BinaryFunction(Function):
     def __call__(self, arg1, arg2, out=None):
+        if arg1.__class__ is numpy.ndarray or arg2.__class__ is numpy.ndarray:
+            return self.function(arg1, arg2, out)
+
         dtype, out_dtype = self.common_type(arg1, arg2, out)
-        
+
         if isinstance(arg1, numpy.ndarray):
             a1 = arg1.transposeToNumpyOrder()
             if isinstance(arg2, numpy.ndarray):
                 a2 = arg2.transposeToNumpyOrder()
-                
+
                 if arg1.__array_priority__ == arg2.__array_priority__:
                     priorityArg = arg2 if arg1.ndim < arg2.ndim else arg1
                 else:
                     priorityArg = arg2 if arg1.__array_priority__ < arg2.__array_priority__ else arg1
-                
+
                 if a1.ndim < a2.ndim:
                     a1 = a1.insertChannelAxis(order='C')
                 elif a1.ndim > a2.ndim:
                     a2 = a2.insertChannelAxis(order='C')
-                    
+
                 axistags = a1.axistags
-                
+
                 if not axistags.compatible(a2.axistags):
-                    raise RuntimeError("%s(): input axistag mismatch %r vs. %r" % 
+                    raise RuntimeError("%s(): input axistag mismatch %r vs. %r" %
                                          (self.function.__name__, axistags, a2.axistags))
                 shape = tuple(max(k) for k in zip(a1.shape, a2.shape))
                 a2 = numpy.require(a2, dtype).view(numpy.ndarray)
@@ -286,7 +289,7 @@ class BinaryFunction(Function):
             shape = a2.shape
             priorityArg = arg2
             a2 = numpy.require(a2, dtype).view(numpy.ndarray)
-            
+
         if out is None:
             outClass = priorityArg.__class__
             inversePermutation = priorityArg.permutationFromNumpyOrder()
@@ -300,33 +303,33 @@ class BinaryFunction(Function):
             if o.ndim < len(shape):
                 o = o.insertChannelAxis(order='C')
             if not axistags.compatible(o.axistags):
-                raise RuntimeError("%s(): output axistag mismatch %r vs. %r" % 
+                raise RuntimeError("%s(): output axistag mismatch %r vs. %r" %
                                          (self.function.__name__, axistags, o.axistags))
         self.function(a1, a2, o)
         return out
-        
+
 __all__ = []
 
-for _k in numpy.__dict__.itervalues():
+for _k in numpy.__dict__.values():
      if type(_k) == numpy.ufunc:
         if _k.nin == 1 and _k.nout == 1:
-            exec _k.__name__ + " = UnaryFunction(_k)"
+            exec(_k.__name__ + " = UnaryFunction(_k)")
         if _k.nin == 1 and _k.nout == 2:
-            exec _k.__name__ + " = UnaryFunctionOut2(_k)"
+            exec(_k.__name__ + " = UnaryFunctionOut2(_k)")
         if _k.nin == 2:
-            exec _k.__name__ + " = BinaryFunction(_k)"
+            exec(_k.__name__ + " = BinaryFunction(_k)")
         __all__.append(_k.__name__)
 
-__all__.sort()
+__all__ = sorted(__all__)
 
 def _prepareDoc():
     doc = '''
 The following mathematical functions are available in this module
 (refer to numpy for detailed documentation)::
-    
+
 '''
 
-    k = 0    
+    k = 0
     while k < len(__all__):
         t = 8
         while True:
@@ -338,7 +341,7 @@ The following mathematical functions are available in this module
         k += t
 
     return doc + '''
-Some of these functions are also provided as member functions of 
+Some of these functions are also provided as member functions of
 VigraArray::
 
     __abs__   __add__   __and__   __div__   __divmod__   __eq__
@@ -354,14 +357,14 @@ As usual, these functions are applied independently at each pixel.
 Vigranumpy overloads the numpy-versions of these functions in order to make their
 behavior more suitable for image analysis. In particular, we changed two aspects:
 
-* Axistag consistency is checked, and the order of axes and strides is 
-  preserved in the result array. (In contrast, plain numpy functions 
-  always create C-order arrays, disregarding the stride order of the 
+* Axistag consistency is checked, and the order of axes and strides is
+  preserved in the result array. (In contrast, plain numpy functions
+  always create C-order arrays, disregarding the stride order of the
   inputs.)
-* Typecasting rules are changed such that (i) data are represented with 
-  at most 32 bits, when possible, (ii) the number of types that occur as 
-  results of mixed expressions is reduced, and (iii) the chance of bad 
-  surprises is minimized. 
+* Typecasting rules are changed such that (i) data are represented with
+  at most 32 bits, when possible, (ii) the number of types that occur as
+  results of mixed expressions is reduced, and (iii) the chance of bad
+  surprises is minimized.
 
 ''' + vigraTypecastingRules
 
diff --git a/vigranumpy/src/CMakeLists.txt b/vigranumpy/src/CMakeLists.txt
index c8b5232..0be2f1e 100644
--- a/vigranumpy/src/CMakeLists.txt
+++ b/vigranumpy/src/CMakeLists.txt
@@ -1,3 +1,6 @@
+VIGRA_CONFIGURE_THREADING()
+SET(VIGRANUMPY_THREAD_LIBRARIES ${THREADING_LIBRARIES})
+
 ADD_SUBDIRECTORY(core)
 
 IF(FFTW3F_FOUND)
diff --git a/vigranumpy/src/core/CMakeLists.txt b/vigranumpy/src/core/CMakeLists.txt
index 9def1c3..e003402 100644
--- a/vigranumpy/src/core/CMakeLists.txt
+++ b/vigranumpy/src/core/CMakeLists.txt
@@ -2,42 +2,26 @@ IF(MSVC)
     SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
 ENDIF()
 
-include(VigraConfigureThreading)
-VIGRA_CONFIGURE_THREADING(REQUIRED) # We require a working threading implementation.
-
-if(WITH_BOOST_THREAD)
-    INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
-    SET(VIGRANUMPY_THREAD_LIBRARIES ${Boost_THREAD_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_CHRONO_LIBRARY})
-elseif(NOT MSVC)
-    IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.0")
-        SET(CMAKE_CXX_FLAGS "-pthread -std=c++0x ${CMAKE_CXX_FLAGS}")
-    elseif(CMAKE_COMPILER_IS_GNUCXX)
-        SET(CMAKE_CXX_FLAGS "-pthread -std=c++11 ${CMAKE_CXX_FLAGS}")
-    else()
-        SET(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}")
-    endif()
-endif()
-
-# note special treatment of target vigranumpy_core: 
+# note special treatment of target vigranumpy_core:
 # module name is automatically changed into vigranumpycore
-VIGRA_ADD_NUMPY_MODULE(core 
-  SOURCES 
+VIGRA_ADD_NUMPY_MODULE(core
+  SOURCES
     vigranumpycore.cxx
     converters.cxx
     axistags.cxx
     multi_array_chunked.cxx
-  LIBRARIES   
+  LIBRARIES
     ${VIGRANUMPY_IMPEX_LIBRARIES} ${VIGRANUMPY_THREAD_LIBRARIES}
   VIGRANUMPY)
 
-VIGRA_ADD_NUMPY_MODULE(impex 
+VIGRA_ADD_NUMPY_MODULE(impex
   SOURCES
     impex.cxx
-  LIBRARIES   
+  LIBRARIES
     ${VIGRANUMPY_IMPEX_LIBRARIES}
-  VIGRANUMPY)   
-     
-VIGRA_ADD_NUMPY_MODULE(sampling 
+  VIGRANUMPY)
+
+VIGRA_ADD_NUMPY_MODULE(sampling
   SOURCES
     sampling.cxx
   VIGRANUMPY)
@@ -49,10 +33,10 @@ VIGRA_ADD_NUMPY_MODULE(filters SOURCES
     tensors.cxx
     morphology.cxx
     non_local_mean.cxx
-  LIBRARIES 
+  LIBRARIES
     ${VIGRANUMPY_THREAD_LIBRARIES}
   VIGRANUMPY)
-  
+
 VIGRA_ADD_NUMPY_MODULE(analysis SOURCES
     segmentation.cxx
     edgedetection.cxx
@@ -60,28 +44,30 @@ VIGRA_ADD_NUMPY_MODULE(analysis SOURCES
     accumulator.cxx
     accumulator-region-singleband.cxx
     accumulator-region-multiband.cxx
+  LIBRARIES
+    ${VIGRANUMPY_THREAD_LIBRARIES}
   VIGRANUMPY)
-   
+
 VIGRA_ADD_NUMPY_MODULE(learning SOURCES
     random_forest_old.cxx
     random_forest.cxx
     learning.cxx
-  LIBRARIES   
+  LIBRARIES
     ${VIGRANUMPY_IMPEX_LIBRARIES}
   VIGRANUMPY)
-   
+
 VIGRA_ADD_NUMPY_MODULE(colors SOURCES
     colors.cxx
   VIGRANUMPY)
-   
+
 VIGRA_ADD_NUMPY_MODULE(noise SOURCES
     noise.cxx
   VIGRANUMPY)
-   
+
 VIGRA_ADD_NUMPY_MODULE(geometry SOURCES
     geometry.cxx
   VIGRANUMPY)
-   
+
 VIGRA_ADD_NUMPY_MODULE(optimization SOURCES
     optimization.cxx
   VIGRANUMPY)
@@ -97,7 +83,7 @@ VIGRA_ADD_NUMPY_MODULE(graphs SOURCES
     grid_graph_implicit_edge_maps.cxx
     #eccentricity.cxx
   LIBRARIES
-    ${VIGRANUMPY_IMPEX_LIBRARIES}  ${VIGRANUMPY_THREAD_LIBRARIES}
+    ${VIGRANUMPY_IMPEX_LIBRARIES} ${VIGRANUMPY_THREAD_LIBRARIES}
   VIGRANUMPY)
 
 VIGRA_ADD_NUMPY_MODULE(histogram SOURCES
@@ -111,5 +97,7 @@ VIGRA_ADD_NUMPY_MODULE(utilities SOURCES
 
 VIGRA_ADD_NUMPY_MODULE(blockwise SOURCES
     blockwise.cxx
+  LIBRARIES
+    ${VIGRANUMPY_THREAD_LIBRARIES}
   VIGRANUMPY)
 
diff --git a/vigranumpy/src/core/axistags.cxx b/vigranumpy/src/core/axistags.cxx
index a07f789..9327e58 100644
--- a/vigranumpy/src/core/axistags.cxx
+++ b/vigranumpy/src/core/axistags.cxx
@@ -36,6 +36,7 @@
 #define PY_ARRAY_UNIQUE_SYMBOL vigranumpycore_PyArray_API
 #define NO_IMPORT_ARRAY
 
+#include <typeinfo>
 #include <vigra/numpy_array.hxx>
 #include <vigra/axistags.hxx>
 #include <boost/python.hpp>
@@ -81,7 +82,11 @@ generic__deepcopy__(python::object copyable, python::dict memo)
 {
     python::object copyMod = python::import("copy");
     python::object deepcopy = copyMod.attr("deepcopy");
+#if PY_MAJOR_VERSION < 3
     python::object builtin = python::import("__builtin__");
+#else
+    python::object builtin = python::import("builtins");
+#endif
     python::object globals = builtin.attr("__dict__");
     
     Copyable* newCopyable(new Copyable(python::extract<const Copyable &>(copyable)()));
@@ -198,7 +203,11 @@ AxisTags_create(python::object i1, python::object i2,
     {
         res = VIGRA_UNIQUE_PTR<AxisTags>(new AxisTags(tags()));
     }
+#if PY_MAJOR_VERSION < 3
     else if(PyString_Check(i1.ptr()))
+#else
+    else if (PyUnicode_Check(i1.ptr()))
+#endif
     {
         res = VIGRA_UNIQUE_PTR<AxisTags>(new AxisTags(python::extract<std::string>(i1)()));
     }
@@ -216,7 +225,11 @@ AxisTags_create(python::object i1, python::object i2,
             res->push_back(info());
         }
     }
+#if PY_MAJOR_VERSION < 3
     else if(PyInt_Check(i1.ptr()))
+#else
+    else if (PyLong_Check(i1.ptr()))
+#endif
     {
         int size = python::extract<int>(i1)();
         for(int k=0; k<size; ++k)
@@ -438,7 +451,11 @@ AxisTags_transform(AxisTags const & oldTags, python::object index, int lnew)
     while(knew < lnew)
     {
         python::object item = index[kindex];
+#if PY_MAJOR_VERSION < 3
         if(PyInt_Check(item.ptr()))
+#else
+        if(PyLong_Check(item.ptr()))
+#endif
         {
             ++kold;
             ++kindex;
@@ -593,12 +610,12 @@ void defineAxisTags()
              "    >>> a = vigra.RGBImage((200,100))\n"
              "    >>> a.axistags['x'].resolution = 1.0\n"
              "    >>> a.axistags['y'].resolution = 1.2\n"
-             "    >>> print a.axistags\n"
+             "    >>> print(a.axistags)\n"
              "    AxisInfo: 'x' (type: Space, resolution=1)\n"
              "    AxisInfo: 'y' (type: Space, resolution=1.2)\n"
              "    AxisInfo: 'c' (type: Channels) RGB\n"
              "    >>> b = a[::2, ::4, :]\n"
-             "    >>> print b.axistags\n"
+             "    >>> print(b.axistags)\n"
              "    AxisInfo: 'x' (type: Space, resolution=2)\n"
              "    AxisInfo: 'y' (type: Space, resolution=4.8)\n"
              "    AxisInfo: 'c' (type: Channels) RGB\n\n")
@@ -669,12 +686,12 @@ void defineAxisTags()
             "The entries of an axistags object (i.e. the individual axisinfo objects)\n"
             "can be accessed via the index operator, where the argument can either be\n"
             "the axis index or the axis key::\n\n"
-            "    >>> print array.axistags[0]\n"
+            "    >>> print(array.axistags[0])\n"
             "    AxisInfo: 'x' (type: Space, resolution=1.2)\n"
-            "    >>> print array.axistags['x']\n"
+            "    >>> print(array.axistags['x'])\n"
             "    AxisInfo: 'x' (type: Space, resolution=1.2)\n"
             "    >>> array.axistags['x'].resolution = 2.0\n"
-            "    >>> print array.axistags['x']\n"
+            "    >>> print(array.axistags['x'])\n"
             "    AxisInfo: 'x' (type: Space, resolution=2)\n\n",
             no_init)
         .def("__init__", make_constructor(&AxisTags_create,
diff --git a/vigranumpy/src/core/colors.cxx b/vigranumpy/src/core/colors.cxx
index 9bfc3d1..1bea4be 100644
--- a/vigranumpy/src/core/colors.cxx
+++ b/vigranumpy/src/core/colors.cxx
@@ -153,7 +153,7 @@ pythonBrightnessTransform(NumpyArray<N, Multiband<PixelType> > image,
 
     double min = 0.0, max = 0.0;
     bool computeRange = !parseRange(range, &min, &max, "brightness(): Invalid range argument.");
-    
+
     {
         PyAllowThreads _pythread;
         if(computeRange)
@@ -215,7 +215,7 @@ pythonGammaTransform(NumpyArray<N, Multiband<PixelType> > image,
             "gamma_correction(): Output images has wrong dimensions");
 
     double min = 0.0, max = 0.0;
-    bool computeRange = !parseRange(range, &min, &max, "gamma_correction(): Invalid range argument.");    
+    bool computeRange = !parseRange(range, &min, &max, "gamma_correction(): Invalid range argument.");
     {
         PyAllowThreads _pythread;
         if(computeRange)
@@ -247,14 +247,14 @@ pythonLinearRangeMapping(NumpyArray<N, Multiband<SrcPixelType> > image,
 
     double oldMin = 0.0, oldMax = 0.0,
            newMin = 0.0, newMax = 0.0;
-    bool computeRange = !parseRange(oldRange, &oldMin, &oldMax, 
+    bool computeRange = !parseRange(oldRange, &oldMin, &oldMax,
                                     "linearRangeMapping(): Argument 'oldRange' is invalid.");
     if(!parseRange(newRange, &newMin, &newMax, "linearRangeMapping(): Argument 'newRange' is invalid."))
     {
         newMin = 0.0;
         newMax = 255.0;
     }
-    
+
     {
         PyAllowThreads _pythread;
         if(computeRange)
@@ -264,14 +264,14 @@ pythonLinearRangeMapping(NumpyArray<N, Multiband<SrcPixelType> > image,
             oldMin = minmax.min;
             oldMax = minmax.max;
         }
-        
+
         vigra_precondition(oldMin < oldMax && newMin < newMax,
               "linearRangeMapping(): Range upper bound must be greater than lower bound.");
 
         transformMultiArray(srcMultiArrayRange(image), destMultiArray(res),
                             linearRangeMapping(oldMin, oldMax, newMin, newMax));
     }
-    
+
     return res;
 }
 
@@ -310,22 +310,22 @@ def("transform_" #name, registerConverters(&pythonColorTransform<float, 2, name#
     "For details see " #name "Functor_ in the C++ documentation.\n")
 
 template<class T>
-NumpyAnyArray pythonApplyColortable(const NumpyArray<2, Singleband<T> >& valueImage, 
+NumpyAnyArray pythonApplyColortable(const NumpyArray<2, Singleband<T> >& valueImage,
                                     const NumpyArray<2, UInt8>& colortable,
                                     NumpyArray<3, Multiband<npy_uint8> > res =  NumpyArray<3, Multiband<npy_uint8> >())
 {
     vigra_precondition(!colortable.axistags(),
                        "applyColortable(): colortable must not have axistags\n"
                        "(use 'array.view(numpy.ndarray)' to remove them).");
-    
+
     // Singleband: there is only a singleton channel axis (which is removed when converted from python numpy array to C++
     // Multiband: channel axis is allowed to be singleband, but does not have to be,
     //            will be last when converted Python -> C++ and channel axis is counted in the dimension ('3')
     typedef NumpyArray<2, Singleband<T> > InputType;
-    
+
     res.reshapeIfEmpty(valueImage.taggedShape().setChannelCount(colortable.shape(1)),
                        "pythonApplyColortable: shape of res is wrong");
-    
+
     const unsigned int N = colortable.shape(0);
 
     bool startsWithTransparent = (colortable(0,3) == 0);
@@ -333,10 +333,10 @@ NumpyAnyArray pythonApplyColortable(const NumpyArray<2, Singleband<T> >& valueIm
     for(MultiArrayIndex c=0; c<colortable.shape(1); ++c)
     {
         MultiArrayView<2, UInt8>::iterator channelIter = res.bind<2>(c).begin();
-        
+
         //make an unstrided copy of the current column of the colortable
         ArrayVector<UInt8> ctable(colortable.bind<1>(c).begin(), colortable.bind<1>(c).end());
-        
+
         for(typename InputType::const_iterator v = valueImage.begin(); v != valueImage.end(); ++v, ++channelIter)
         {
             if (*v == 0)
@@ -359,52 +359,52 @@ NumpyAnyArray pythonApplyColortable(const NumpyArray<2, Singleband<T> >& valueIm
             }
         }
     }
-    
+
     return res;
 }
 VIGRA_PYTHON_MULTITYPE_FUNCTOR(pyApplyColortable, pythonApplyColortable)
 
 template<class T>
 void pythonGray2QImage_ARGB32Premultiplied(
-    const NumpyArray<2, Singleband<T> >& image, 
+    const NumpyArray<2, Singleband<T> >& image,
     NumpyArray<3, Multiband<npy_uint8> > qimageView,
     NumpyArray<1, T> normalize = boost::python::object()
-) 
+)
 {
     vigra_precondition(image.isUnstrided() || image.transpose().isUnstrided(),
         "gray2qimage_ARGB32Premultiplied(): Can only handle arrays with contiguous memory.");
     typedef typename NumericTraits<T>::RealPromote TmpType;
-    
-    T* data = image.data(); 
+
+    T* data = image.data();
     const T* dataEnd = data+image.size();
     UInt8* imgData = qimageView.data();
     UInt8 pixel = 0;
     TmpType pixelF;
-    
-    TmpType normalizeLow, normalizeHigh; 
+
+    TmpType normalizeLow, normalizeHigh;
     if(normalize.pyObject() != Py_None)
     {
         vigra_precondition(normalize.shape(0) == 2,
             "gray2qimage_ARGB32Premultiplied(): normalize.shape[0] == 2 required.");
-            
+
         //normalize = None
         normalizeLow = normalize[0];
         normalizeHigh = normalize[1];
-        
+
         vigra_precondition(normalizeHigh > normalizeLow,
             "gray2qimage_ARGB32Premultiplied(): normalize[0] < normalize[1] is required.");
-            
+
         const TmpType f = TmpType(255) / static_cast<TmpType>(normalizeHigh-normalizeLow);
-        
-        while(data < dataEnd) 
+
+        while(data < dataEnd)
         {
             pixelF = detail::RequiresExplicitCast<TmpType>::cast(*data);
-            
-            if(pixelF < normalizeLow) 
+
+            if(pixelF < normalizeLow)
             {
                 pixel = 0;
             }
-            else if(pixelF > normalizeHigh) 
+            else if(pixelF > normalizeHigh)
             {
                 pixel = 255;
             }
@@ -419,9 +419,9 @@ void pythonGray2QImage_ARGB32Premultiplied(
             ++data;
         }
     }
-    else 
+    else
     {
-        while(data < dataEnd) 
+        while(data < dataEnd)
         {
             pixel = detail::RequiresExplicitCast<UInt8>::cast(*data);
             *imgData = pixel; ++imgData; //B
@@ -436,37 +436,37 @@ VIGRA_PYTHON_MULTITYPE_FUNCTOR(pyGray2QImage_ARGB32Premultiplied, pythonGray2QIm
 
 template<class T>
 void pythonAlphaModulated2QImage_ARGB32Premultiplied(
-    const NumpyArray<2, Singleband<T> >& image, 
+    const NumpyArray<2, Singleband<T> >& image,
     NumpyArray<3, Multiband<npy_uint8> > qimageView,
     NumpyArray<1, float> tintColor,
     NumpyArray<1, T> normalize
-) 
+)
 {
     vigra_precondition(image.isUnstrided() || image.transpose().isUnstrided(),
         "alphamodulated2qimage_ARGB32Premultiplied(): Can only handle arrays with contiguous memory.");
     typedef typename NumericTraits<T>::RealPromote TmpType;
-    
+
     vigra_precondition(normalize.shape(0) == 2,
         "alphamodulated2qimage_ARGB32Premultiplied(): normalize.shape[0] == 2 required.");
     vigra_precondition(tintColor.shape(0) == 3,
         "alphamodulated2qimage_ARGB32Premultiplied(): tintColor.shape[0] == 3 required.");
-            
+
     const TmpType l = normalize[0];
     const TmpType h = normalize[1];
-    
+
     vigra_precondition(h > l,
         "alphamodulated2qimage_ARGB32Premultiplied(): normalize[0] < normalize[1] is required.");
-    
+
     const TmpType r = tintColor[0];
     const TmpType g = tintColor[1];
     const TmpType b = tintColor[2];
-    
+
     T* data = image.data();
     const T* dataEnd = image.data()+image.size();
     unsigned char* imgData = qimageView.data();
     TmpType pixelF;
     const TmpType f = TmpType(255) / static_cast<TmpType>(h-l);
-    while(data < dataEnd) 
+    while(data < dataEnd)
     {
         pixelF = detail::RequiresExplicitCast<TmpType>::cast(*data);
         if(pixelF < l)
@@ -496,33 +496,36 @@ void defineColors()
 
     docstring_options doc_options(true, true, false);
 
-    multidef("applyColortable", pyApplyColortable<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32>(),
-        (arg("valueImage"), 
-        arg("colortable"),
-        arg("out")=python::object()), 
+    multidef("applyColortable",
+        pyApplyColortable<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32>().installFallback(),
+        (arg("valueImage"),
+         arg("colortable"),
+         arg("out")=python::object()),
         "Applies a colortable to the given 2D valueImage.\n\n"
         "Colortable must have 4 columns, each row represents a color (for example, RGBA). \n"
         "Values in valueImage are first taken modulo the length of the colortable. \n"
         "In the special case where the first color in the table is transparent, that value "
         "is NOT repeated for values outside the colortable length.\n\n"
         "Returns: uint8 image with 4 channels\n");
-    
-    multidef("gray2qimage_ARGB32Premultiplied", pyGray2QImage_ARGB32Premultiplied<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>(),
-        (arg("image"), 
-        arg("qimage"),
-        arg("normalize")=python::object()), 
+
+    multidef("gray2qimage_ARGB32Premultiplied",
+        pyGray2QImage_ARGB32Premultiplied<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>().installFallback(),
+        (arg("image"),
+         arg("qimage"),
+         arg("normalize")=python::object()),
         "Convert the image (single-band) into a QImage of format Format_ARGB32_Premultiplied.\n"
         "\n"
         "import qimage2ndarray\n"
         "qimg = QImage(a.shape[0], a.shape[1], QImage.Format_ARGB32_Premultiplied)\n"
         "normalize = numpy.asarray([10, 217], dtype=image.dtype)\n"
         "vigra.colors.gray2qimage_ARGB32Premultiplied(a, qimage2ndarray.byte_view(qimg), normalize)\n");
-    
-    multidef("alphamodulated2qimage_ARGB32Premultiplied", pyAlphaModulated2QImage_ARGB32Premultiplied<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>(),
-        (arg("image"), 
-        arg("qimage"),
-        arg("tintColor"),
-        arg("normalize")), 
+
+    multidef("alphamodulated2qimage_ARGB32Premultiplied",
+        pyAlphaModulated2QImage_ARGB32Premultiplied<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>().installFallback(),
+        (arg("image"),
+         arg("qimage"),
+         arg("tintColor"),
+         arg("normalize")),
         "Convert the image (single-band) into a QImage of format Format_ARGB32_Premultiplied.\n"
         "\n"
         "import qimage2ndarray\n"
@@ -530,7 +533,7 @@ void defineColors()
         "normalize = numpy.asarray([10, 217], dtype=image.dtype)\n"
         "tintColor = numpy.asarray([1.0, 0.0, 0.0], dtype=numpy.float32) #RGB\n"
         "vigra.colors.alphamodulated2qimage_ARGB32Premultiplied(a, qimage2ndarray.byte_view(qimg), tintColor, normalize)\n");
-    
+
     def("brightness",
          registerConverters(&pythonBrightnessTransform<float, 3>),
          (arg("image"), arg("factor"), arg("range")=make_tuple(0.0, 255.0), arg("out")=object()),
@@ -589,8 +592,12 @@ void defineColors()
          (arg("volume"), arg("gamma"), arg("range")=make_tuple(0.0, 255.0), arg("out")=object()),
          "Likewise for a 3D scalar or multiband volume.\n");
 
-    multidef("linearRangeMapping", pyLinearRangeMapping2D<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>(),
-         (arg("image"), arg("oldRange")="auto", arg("newRange")=make_tuple(0.0, 255.0), arg("out")=object()),
+    multidef("linearRangeMapping",
+        pyLinearRangeMapping2D<vigra::Int8, vigra::UInt8, vigra::Int16, vigra::UInt16, vigra::Int32, vigra::UInt32, float, double>().installFallback(),
+        (arg("image"),
+         arg("oldRange")="auto",
+         arg("newRange")=make_tuple(0.0, 255.0),
+         arg("out")=object()),
         "Convert the intensity range of a 2D scalar or multiband image. The function applies a linear transformation "
         "to the intensities such that the value oldRange[0] is mapped onto newRange[0], "
         "and oldRange[1] is mapped onto newRange[1]. That is, the algorithm applies the formula::\n"
diff --git a/vigranumpy/src/core/converters.cxx b/vigranumpy/src/core/converters.cxx
index 2ae2444..a2f9988 100644
--- a/vigranumpy/src/core/converters.cxx
+++ b/vigranumpy/src/core/converters.cxx
@@ -378,8 +378,9 @@ constructArrayFromAxistags(python::object type, ArrayVector<npy_intp> const & sh
     }
     
     TaggedShape tagged_shape(norm_shape, pyaxistags);
-    // FIXME: check that type is an array class?
-    return constructArray(tagged_shape, typeCode, init, python_ptr(type.ptr()));
+    
+	// FIXME: check that type is an array class?
+	return constructArray(tagged_shape, typeCode, init, python_ptr(type.ptr()));
 }
 
 template <class T>
diff --git a/vigranumpy/src/core/convolution.cxx b/vigranumpy/src/core/convolution.cxx
index 1ebb185..2819bc8 100644
--- a/vigranumpy/src/core/convolution.cxx
+++ b/vigranumpy/src/core/convolution.cxx
@@ -56,10 +56,10 @@ namespace vigra
 
 template < class VoxelType, unsigned int ndim >
 NumpyAnyArray
-pythonConvolveOneDimensionND(NumpyArray<ndim, Multiband<VoxelType> > array,
-                             unsigned int dim,
-                             Kernel const & kernel,
-                             NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
+pythonConvolveOneDimension(NumpyArray<ndim, Multiband<VoxelType> > array,
+                           unsigned int dim,
+                           Kernel const & kernel,
+                           NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
 {
     vigra_precondition(dim < ndim-1,
            "convolveOneDimension(): dim out of range.");
@@ -80,11 +80,13 @@ pythonConvolveOneDimensionND(NumpyArray<ndim, Multiband<VoxelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyConvolveOneDimension, pythonConvolveOneDimension)
+
 template < class VoxelType, unsigned int ndim >
 NumpyAnyArray
-pythonSeparableConvolveND_1Kernel(NumpyArray<ndim, Multiband<VoxelType> > array,
-                                  Kernel const & kernel,
-                                  NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
+pythonSeparableConvolve_1Kernel(NumpyArray<ndim, Multiband<VoxelType> > array,
+                                Kernel const & kernel,
+                                NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
 {
     res.reshapeIfEmpty(array.taggedShape(),
             "convolve(): Output array has wrong shape.");
@@ -101,15 +103,17 @@ pythonSeparableConvolveND_1Kernel(NumpyArray<ndim, Multiband<VoxelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pySeparableConvolve_1Kernel, pythonSeparableConvolve_1Kernel)
+
 template < class VoxelType, unsigned int ndim >
 NumpyAnyArray
-pythonSeparableConvolveND_NKernels(NumpyArray<ndim, Multiband<VoxelType> > array,
-                                   python::tuple pykernels,
-                                   NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
+pythonSeparableConvolve_NKernels(NumpyArray<ndim, Multiband<VoxelType> > array,
+                                 python::tuple pykernels,
+                                 NumpyArray<ndim, Multiband<VoxelType> > res=python::object())
 {
     if(python::len(pykernels) == 1)
     {
-        return pythonSeparableConvolveND_1Kernel(array,
+        return pythonSeparableConvolve_1Kernel(array,
                     python::extract<Kernel1D<KernelValueType> const &>(pykernels[0]), res);
     }
 
@@ -138,6 +142,8 @@ pythonSeparableConvolveND_NKernels(NumpyArray<ndim, Multiband<VoxelType> > array
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pySeparableConvolve_NKernels, pythonSeparableConvolve_NKernels)
+
 template <class PixelType>
 NumpyAnyArray
 pythonConvolveImage(NumpyArray<3, Multiband<PixelType> > image,
@@ -160,6 +166,8 @@ pythonConvolveImage(NumpyArray<3, Multiband<PixelType> > image,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR(pyConvolveImage, pythonConvolveImage)
+
 template <class PixelType>
 NumpyAnyArray
 pythonNormalizedConvolveImage(NumpyArray<3, Multiband<PixelType> > image,
@@ -235,6 +243,8 @@ pythonGaussianSmoothing(NumpyArray<ndim, Multiband<VoxelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyGaussianSmoothing, pythonGaussianSmoothing)
+
 template < class VoxelType>
 NumpyAnyArray
 pythonRecursiveGaussian(NumpyArray<3, Multiband<VoxelType> > image,
@@ -385,6 +395,8 @@ pythonLaplacianOfGaussian(NumpyArray<N, Multiband<PixelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyLaplacianOfGaussian, pythonLaplacianOfGaussian)
+
 template <class PixelType, unsigned int N>
 NumpyAnyArray
 pythonGaussianDivergence(NumpyArray<N, TinyVector<PixelType, N> > array,
@@ -426,6 +438,8 @@ pythonGaussianDivergence(NumpyArray<N, TinyVector<PixelType, N> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyGaussianDivergence, pythonGaussianDivergence)
+
 template <class PixelType>
 NumpyAnyArray
 pythonRecursiveFilter1(NumpyArray<3, Multiband<PixelType> > image,
@@ -547,65 +561,55 @@ void defineConvolutionFunctions()
 
     docstring_options doc_options(true, true, false);
 
-    def("convolveOneDimension",
-        registerConverters(&pythonConvolveOneDimensionND<float,3>),
-        (arg("image"), arg("dim"), arg("kernel"), arg("out")=python::object()),
-        "Convolution along a single dimension of a 2D scalar or multiband image. "
+    multidef("convolveOneDimension",
+        pyConvolveOneDimension<2, 5, float, double>().installFallback(),
+        (arg("array"),
+         arg("dim"),
+         arg("kernel"),
+         arg("out")=python::object()),
+        "\n"
+        "Convolve a single dimension of a scalar or multiband array with up to five dimensions.\n"
+        "'dim' denotes the dimension to be convolved.\n"
         "'kernel' must be an instance of Kernel1D.\n"
         "\n"
         "For details see convolveMultiArrayOneDimension_ in the vigra C++ documentation.\n");
 
-    def("convolveOneDimension",
-        registerConverters(&pythonConvolveOneDimensionND<float,4>),
-        (arg("volume"), arg("dim"), arg("kernel"), arg("out")=python::object()),
-        "Likewise for a 3D scalar or multiband volume.\n");
-
-    def("convolveOneDimension",
-        registerConverters(&pythonConvolveOneDimensionND<float,5>),
-        (arg("volume"), arg("dim"), arg("kernel"), arg("out")=python::object()),
-        "Likewise for a 4D scalar or multiband volume.\n");
-
-    def("convolve", registerConverters(&pythonSeparableConvolveND_1Kernel<float,3>),
-        (arg("image"), arg("kernel"), arg("out")=python::object()),
-        "Convolve an image with the given 'kernel' (or kernels).\n"
+    multidef("convolve",
+        pySeparableConvolve_1Kernel<2, 5, float, double>().installFallback().noPythonSignature(),
+        (arg("array"),
+         arg("kernel"),
+         arg("out")=python::object()),
+         "");
+
+    multidef("convolve",
+        pySeparableConvolve_NKernels<2, 5, float, double>().noPythonSignature(),
+        (arg("array"),
+         arg("kernels"),
+         arg("out")=python::object()),
+         "");
+
+    multidef("convolve",
+        pyConvolveImage<float, double>().noPythonSignature(),
+        (arg("image"),
+         arg("kernel"),
+         arg("out")=python::object()),
+        "convolve( (ndarray)array, kernel [, (ndarray)out=None]) -> ndarray\n"
+        "\n"
+        "Convolve an array (up to 5D) with the given 'kernel' (or kernels).\n"
         "If the input has multiple channels, the filter is applied to each channel\n"
         "independently. The function can be used in 3 different ways:\n"
         "\n"
         "* When 'kernel' is a single object of type :class:`Kernel1D`, this kernel\n"
         "  is applied along all spatial dimensions of the data (separable filtering).\n"
-        "* When 'kernel' is a tuple of :class:`Kernel1D` objects, one different kernel\n"
+        "* When 'kernel' is a tuple of :class:`Kernel1D` objects, a different kernel\n"
         "  is used for each spatial dimension (separable filtering). The number of\n"
-        "  kernels must equal the number of dimensions).\n"
+        "  kernels must equal the number of dimensions.\n"
         "* When 'kernel' is an instance of :class:`Kernel2D`, a 2-dimensional convolution\n"
-        "  is performed (non-separable filtering). This is only applicable to 2D images.\n"
+        "  is performed (non-separable filtering). This is only applicable to 2D arrays.\n"
         "\n"
         "For details see separableConvolveMultiArray_ and "
         "convolveImage_ in the vigra C++ documentation.\n");
 
-    def("convolve", registerConverters(&pythonSeparableConvolveND_1Kernel<float,4>),
-        (arg("volume"), arg("kernel"), arg("out")=python::object()),
-        "Convolve a volume with the same 1D kernel along all dimensions.\n");
-
-    def("convolve", registerConverters(&pythonSeparableConvolveND_1Kernel<float,5>),
-        (arg("volume"), arg("kernel"), arg("out")=python::object()),
-        "Convolve a volume with the same 1D kernel along all dimensions.\n");
-
-    def("convolve", registerConverters(&pythonSeparableConvolveND_NKernels<float,3>),
-        (arg("image"), arg("kernels"), arg("out")=python::object()),
-        "Convolve an image with a different 1D kernel along each dimensions.\n");
-
-    def("convolve", registerConverters(&pythonSeparableConvolveND_NKernels<float,4>),
-        (arg("volume"), arg("kernels"), arg("out")=python::object()),
-        "Convolve a volume with a different 1D kernel along each dimensions.\n");
-
-    def("convolve", registerConverters(&pythonSeparableConvolveND_NKernels<float,5>),
-        (arg("volume"), arg("kernels"), arg("out")=python::object()),
-        "Convolve a volume with a different 1D kernel along each dimensions.\n");
-
-    def("convolve", registerConverters(&pythonConvolveImage<float>),
-        (arg("image"), arg("kernel"), arg("out") = python::object()),
-        "Convolve an image with a 2D kernel.\n");
-
     def("normalizedConvolveImage", registerConverters(&pythonNormalizedConvolveImage<float>),
         (arg("image"), arg("mask"), arg("kernel"), arg("out") = python::object()),
         "Perform normalized convolution of an image. If the image has multiple channels, "
@@ -615,54 +619,44 @@ void defineConvolutionFunctions()
         "used for all channels input channels) or as many channels as the input image.\n\n"
         "For details, see normalizedConvolveImage_ in the C++ documentation.\n");
 
-    def("gaussianSmoothing",
-        registerConverters(&pythonGaussianSmoothing<float,2>),
-        (arg("array"), arg("sigma"), arg("out")=python::object(),
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Smooth 1D sequence with Gaussian.\n");
-
-    def("gaussianSmoothing",
-        registerConverters(&pythonGaussianSmoothing<float,3>),
-        (arg("array"), arg("sigma"), arg("out")=python::object(),
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Perform Gaussian smoothing of a 2D or 3D scalar or multiband array.\n\n"
-        "Each channel of the array is smoothed independently. "
-        "If 'sigma' is a single value, an isotropic Gaussian filter at this scale is "
-        "applied (i.e. each dimension is smoothed in the same way). "
-        "If 'sigma' is a tuple or list of values, the amount of smoothing will be different "
-        "for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"
-        "'window_size' specifies the ratio between the effective filter scale and "
-        "the size of the filter window. Use a value around 2.0 to speed-up "
-        "the computation by increasing the error resulting from cutting off the Gaussian. "
+    multidef("gaussianSmoothing",
+        pyGaussianSmoothing<2, 5, npy_uint8, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=python::object(),
+         arg("sigma_d")=0.0,
+         arg("step_size")=1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Perform Gaussian smoothing of an array with up to five dimensions.\n\n"
+        "If the array has multiple channels, each channel is smoothed independently.\n"
+        "\n"
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        "will be different for each spatial dimension.\n"
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
+        "'window_size' specifies the ratio between the effective filter scale and\n"
+        "the size of the filter window. Use a value around 2.0 to speed-up\n"
+        "the computation by increasing the error resulting from cutting off the Gaussian.\n"
         "For the default 0.0, the window size is automatically determined.\n"
         "\n"
-        "If 'roi' is not None, it must specify the desired region-of-interest as "
-        "a pair '(first_point, beyond_last_point)' (e.g. 'roi=((10,20), (200,250))'). "
-        "As usual, the second point is the first point outside the ROI, and the ROI "
-        "must not be outside the input array dimensions. "
-        "The coordinates refer only to non-channel axes - if your array has an explicit "
-        "channel axis, the ROI dimension must be one less than the array dimension. "
-        "If you pass in an explicit 'out' array and specify an ROI, the 'out' array "
+        "If 'roi' is not None, it must specify the desired region-of-interest as\n"
+        "a pair '(first_point, beyond_last_point)' (e.g. 'roi=((10,20), (200,250))').\n"
+        "As usual, the second point is the first point outside the ROI, and the ROI\n"
+        "must not be outside the input array dimensions.\n"
+        "The coordinates refer only to non-channel axes - if your array has an explicit\n"
+        "channel axis, the ROI dimension must be one less than the array dimension.\n"
+        "If you pass in an explicit 'out' array and specify an ROI, the 'out' array\n"
         "must have the shape of the ROI.\n\n"
         "For details see gaussianSmoothing_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
-    def("gaussianSmoothing",
-        registerConverters(&pythonGaussianSmoothing<float,4>),
-        (arg("array"), arg("sigma"), arg("out")=python::object(),
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Smooth volume with Gaussian.\n");
-
-    def("gaussianSmoothing",
-        registerConverters(&pythonGaussianSmoothing<float,5>),
-        (arg("array"), arg("sigma"), arg("out")=python::object(),
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Smooth 5D array with Gaussian.\n");
-
     def("recursiveGaussianSmoothing2D",
         registerConverters(&pythonRecursiveGaussian<float>),
         (arg("image"), arg("sigma"), arg("out")=python::object()),
@@ -696,51 +690,58 @@ void defineConvolutionFunctions()
           "\n\n"
           "For details see gaussianSharpening_ in the vigra C++ documentation.\n");
 
-    def("laplacianOfGaussian",
-         registerConverters(&pythonLaplacianOfGaussian<float,3>),
-         (arg("array"), arg("scale") = 1.0, arg("out") = python::object(),
-          arg("sigma_d") = 0.0, arg("step_size") = 1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-          "Filter 2D or 3D scalar array with the Laplacian of Gaussian operator at the given scale.\n\n"
-          "If 'sigma' is a single value, an isotropic filter at this scale is "
-          "applied (i.e., each dimension is filtered in the same way). "
-          "If 'sigma' is a tuple or list of values, the amount of smoothing "
-          "will be different for each spatial dimension.\n"
-          "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-          "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-          "pixels for each dimension. "
-          "The length of the tuples or lists must be equal to the "
-          "number of spatial dimensions.\n\n"
-          "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
-          "For details see laplacianOfGaussianMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
-
-    def("laplacianOfGaussian",
-         registerConverters(&pythonLaplacianOfGaussian<float,4>),
-         (arg("array"), arg("scale") = 1.0, arg("out") = python::object(),
-         arg("sigma_d") = 0.0, arg("step_size") = 1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-         "Likewise for a scalar volume.\n");
-
-    def("gaussianDivergence",
-         registerConverters(&pythonGaussianDivergence<float,2>),
-         (arg("array"), arg("scale") = 1.0, arg("out") = python::object(),
-          arg("sigma_d") = 0.0, arg("step_size") = 1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-          "Compute the divergence of a 2D vector field with a first derivative of Gaussian at the given scale.\n\n"
-          "If 'sigma' is a single value, an isotropic filter at this scale is "
-          "applied (i.e., each dimension is filtered in the same way). "
-          "If 'sigma' is a tuple or list of values, the amount of smoothing "
-          "will be different for each spatial dimension.\n"
-          "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-          "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-          "pixels for each dimension. "
-          "The length of the tuples or lists must be equal to the "
-          "number of spatial dimensions.\n\n"
-          "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
-          "For details see gaussianDivergenceMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
-
-    def("gaussianDivergence",
-         registerConverters(&pythonGaussianDivergence<float,3>),
-         (arg("array"), arg("scale") = 1.0, arg("out") = python::object(),
-         arg("sigma_d") = 0.0, arg("step_size") = 1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-         "Likewise for a 3D vector field.\n");
+    multidef("laplacianOfGaussian",
+        pyLaplacianOfGaussian<3, 4, float, double>().installFallback(),
+        (arg("array"),
+         arg("scale") = 1.0,
+         arg("out") = python::object(),
+         arg("sigma_d") = 0.0,
+         arg("step_size") = 1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Filter a 2D or 3D scalar array with the Laplacian of Gaussian operator\n"
+        "at the given scale. Multiple channels are filtered independently.\n"
+        "\n"
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        "will be different for each spatial dimension.\n"
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
+        "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        "For details see laplacianOfGaussianMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
+
+    multidef("gaussianDivergence",
+        pyGaussianDivergence<2, 3, float, double>().installFallback(),
+        (arg("array"),
+         arg("scale") = 1.0,
+         arg("out") = python::object(),
+         arg("sigma_d") = 0.0,
+         arg("step_size") = 1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Compute the divergence of a 2D or 3D vector field with a first\n"
+        "derivative of Gaussian at the given scale. The array must have\n"
+        "as many channels as spatial dimensions.\n"
+        "\n"
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        "will be different for each spatial dimension.\n"
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
+        "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        "For details see gaussianDivergenceMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
     def("recursiveFilter2D", registerConverters(&pythonRecursiveFilter1<float>),
               (arg("image"), arg("b"), arg("borderTreament") = BORDER_TREATMENT_REFLECT, arg("out") = python::object()),
diff --git a/vigranumpy/src/core/edgedetection.cxx b/vigranumpy/src/core/edgedetection.cxx
index 0dd0b15..7783376 100644
--- a/vigranumpy/src/core/edgedetection.cxx
+++ b/vigranumpy/src/core/edgedetection.cxx
@@ -80,20 +80,20 @@ PyObject * Edgel__repr__(Edgel const & e)
         std::stringstream s;
         s << std::setprecision(14)
           << "Edgel(x=" << e.x << ", y=" << e.y << ", strength=" << e.strength << ", angle=" << e.orientation << ")";
-        return PyString_FromString(s.str().c_str());
+        return pythonFromData(s.str().c_str());
 }
 
 template < class PixelType>
 python::list
 pythonFindEdgelsFromGrad(NumpyArray<2, TinyVector<PixelType, 2> > grad,
-                         double threshold) 
+                         double threshold)
 {
     std::vector<Edgel> edgels;
     {
         PyAllowThreads _pythread;
         cannyEdgelList(srcImageRange(grad), edgels);
     }
-    
+
     python::list pyEdgels;
     for(unsigned int i = 0; i < edgels.size(); ++i)
     {
@@ -113,7 +113,7 @@ pythonFindEdgels(NumpyArray<2, Singleband<PixelType> > image,
         PyAllowThreads _pythread;
         cannyEdgelList(srcImageRange(image), edgels, scale);
     }
-    
+
     python::list pyEdgels;
     for(unsigned int i = 0; i < edgels.size(); ++i)
     {
@@ -126,14 +126,14 @@ pythonFindEdgels(NumpyArray<2, Singleband<PixelType> > image,
 template < class PixelType>
 python::list
 pythonFindEdgels3x3FromGrad(NumpyArray<2, TinyVector<PixelType, 2> > grad,
-                            double threshold) 
+                            double threshold)
 {
     std::vector<Edgel> edgels;
     {
         PyAllowThreads _pythread;
         cannyEdgelList3x3(srcImageRange(grad), edgels);
     }
-    
+
     python::list pyEdgels;
     for(unsigned int i = 0; i < edgels.size(); ++i)
     {
@@ -163,154 +163,154 @@ pythonFindEdgels3x3(NumpyArray<2, Singleband<PixelType> > image,
 }
 
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonCannyEdgeImage(NumpyArray<2, Singleband<SrcPixelType> > image,
-                     double scale, double threshold, DestPixelType edgeMarker, 
+                     double scale, double threshold, DestPixelType edgeMarker,
                      NumpyArray<2, Singleband<DestPixelType> > res = python::object())
 {
     std::string description("Canny edges, scale=");
     description += asString(scale) + ", threshold=" + asString(threshold);
 
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-            "cannyEdgeImage(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+            "cannyEdgeImage(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
-        cannyEdgeImage(srcImageRange(image), destImage(res), 
+        cannyEdgeImage(srcImageRange(image), destImage(res),
                        scale, threshold, edgeMarker);
     }
-    
+
     return res;
 }
 
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonCannyEdgeImageWithThinning(NumpyArray<2, Singleband<SrcPixelType> > image,
-                                 double scale, double threshold, 
+                                 double scale, double threshold,
                                  DestPixelType edgeMarker, bool addBorder = true,
                                  NumpyArray<2, Singleband<DestPixelType> > res = python::object())
 {
     std::string description("Canny edges with thinning, scale=");
     description += asString(scale) + ", threshold=" + asString(threshold);
 
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-            "cannyEdgeImageWithThinning(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+            "cannyEdgeImageWithThinning(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         cannyEdgeImageWithThinning(srcImageRange(image), destImage(res),
                                    scale, threshold, edgeMarker, addBorder);
     }
-    
+
     return res;
 }
 
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonShenCastanEdgeImage(NumpyArray<2, Singleband<SrcPixelType> > image,
-                          double scale, double threshold, DestPixelType edgeMarker, 
+                          double scale, double threshold, DestPixelType edgeMarker,
                           NumpyArray<2, Singleband<DestPixelType> > res = python::object())
 {
     std::string description("Shen/Castan edges, scale=");
     description += asString(scale) + ", threshold=" + asString(threshold);
 
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-            "shenCastanEdgeImage(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+            "shenCastanEdgeImage(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
-        differenceOfExponentialEdgeImage(srcImageRange(image), destImage(res), 
+        differenceOfExponentialEdgeImage(srcImageRange(image), destImage(res),
                                          scale, threshold, edgeMarker);
     }
-    
+
     return res;
 }
 
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonShenCastanCrackEdgeImage(NumpyArray<2, Singleband<SrcPixelType> > image,
-                               double scale, double threshold, DestPixelType edgeMarker, 
+                               double scale, double threshold, DestPixelType edgeMarker,
                                NumpyArray<2, Singleband<DestPixelType> > res = python::object())
 {
     std::string description("Shen/Castan crack edges, scale=");
     description += asString(scale) + ", threshold=" + asString(threshold);
 
     MultiArrayShape<2>::type newShape = 2*image.shape() - MultiArrayShape<2>::type(1,1);
-    res.reshapeIfEmpty(image.taggedShape().resize(newShape).setChannelDescription(description), 
-                       "shenCastanCrackEdgeImage(): Output array has wrong shape. Needs to be (w,h)*2 - 1.");    
-    
+    res.reshapeIfEmpty(image.taggedShape().resize(newShape).setChannelDescription(description),
+                       "shenCastanCrackEdgeImage(): Output array has wrong shape. Needs to be (w,h)*2 - 1.");
+
     {
         PyAllowThreads _pythread;
-        differenceOfExponentialCrackEdgeImage(srcImageRange(image), destImage(res), 
+        differenceOfExponentialCrackEdgeImage(srcImageRange(image), destImage(res),
                                               scale, threshold, edgeMarker);
     }
-    
+
     return res;
 }
 
 template < class PixelType>
-NumpyAnyArray 
+NumpyAnyArray
 pythonRemoveShortEdges(NumpyArray<2, Singleband<PixelType> > image,
-                       int minEdgeLength, PixelType nonEdgeMarker, 
+                       int minEdgeLength, PixelType nonEdgeMarker,
                        NumpyArray<2, Singleband<PixelType> > res = python::object())
 {
-    res.reshapeIfEmpty(image.taggedShape(), 
-            "removeShortEdges(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(image.taggedShape(),
+            "removeShortEdges(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         copyImage(srcImageRange(image), destImage(res));
         removeShortEdges(destImageRange(res), minEdgeLength, nonEdgeMarker);
     }
-    
+
     return res;
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonBeautifyCrackEdgeImage(NumpyArray<2, Singleband<PixelType> > image,
-                             PixelType edgeMarker, 
+                             PixelType edgeMarker,
                              PixelType backgroundMarker,
                              NumpyArray<2, Singleband<PixelType> > res = python::object())
 {
-    res.reshapeIfEmpty(image.taggedShape(), 
-            "beautifyCrackEdgeImage(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(image.taggedShape(),
+            "beautifyCrackEdgeImage(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         copyImage(srcImageRange(image), destImage(res));
         beautifyCrackEdgeImage(destImageRange(res), edgeMarker, backgroundMarker);
     }
-    
+
     return res;
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonCloseGapsInCrackEdgeImage(NumpyArray<2, Singleband<PixelType> > image,
                                 PixelType edgeMarker,
                                 NumpyArray<2, Singleband<PixelType> > res = python::object())
 {
-    res.reshapeIfEmpty(image.taggedShape(), 
+    res.reshapeIfEmpty(image.taggedShape(),
             "closeGapsInCrackEdgeImage(): Output array has wrong shape.");
-    
+
     {
         PyAllowThreads _pythread;
         copyImage(srcImageRange(image), destImage(res));
         closeGapsInCrackEdgeImage(destImageRange(res), edgeMarker);
     }
-    
+
     return res;
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonRegionImageToCrackEdgeImage(NumpyArray<2, Singleband<PixelType> > image,
                                   PixelType edgeLabel = 0,
                                   NumpyArray<2, Singleband<PixelType> > res = python::object())
 {
     MultiArrayShape<2>::type newShape = 2*image.shape() - MultiArrayShape<2>::type(1,1);
-    res.reshapeIfEmpty(image.taggedShape().resize(newShape), 
+    res.reshapeIfEmpty(image.taggedShape().resize(newShape),
             "regionImageToCrackEdgeImage(): Output array has wrong shape. Needs to be (w,h)*2 - 1.");
 
     {
@@ -321,12 +321,12 @@ pythonRegionImageToCrackEdgeImage(NumpyArray<2, Singleband<PixelType> > image,
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonRegionImageToEdgeImage(NumpyArray<2, Singleband<PixelType> > image,
                              PixelType edgeLabel = 1,
                              NumpyArray<2, Singleband<PixelType> > res = python::object())
 {
-    res.reshapeIfEmpty(image.taggedShape(), 
+    res.reshapeIfEmpty(image.taggedShape(),
             "regionImageToEdgeImage2D(): Output array has wrong shape.");
 
     {
@@ -348,7 +348,7 @@ void defineEdgedetection()
                                   "For details, see Edgel_ in the vigra C++ documentation.\n",
                          init<>("Standard constructor::\n\n   Edgel()\n\n"));
     edgel
-       .def(init<float, float, float, float>(args("x", "y", "strength", "orientation"), 
+       .def(init<float, float, float, float>(args("x", "y", "strength", "orientation"),
             "Constructor::\n\n    Edgel(x, y, strength, orientation)\n\n"))
        .def_readwrite("x", &Edgel::x, "The edgel's x position.")
        .def_readwrite("y", &Edgel::y, "The edgel's y position.")
@@ -360,7 +360,7 @@ void defineEdgedetection()
        .def("__len__", &Edgel__len__)
        ;
 
-    def("cannyEdgelList", 
+    def("cannyEdgelList",
         registerConverters(&pythonFindEdgelsFromGrad<float>),
         args("gradient", "threshold"),
         "Return a list of :class:`Edgel` objects whose strength is at least 'threshold'.\n\n"
@@ -372,12 +372,12 @@ void defineEdgedetection()
         "gradient internally at 'scale'.\n\n"
         "For details see cannyEdgelList_ in the vigra C++ documentation.\n");
 
-    def("cannyEdgelList",  
+    def("cannyEdgelList",
         registerConverters(&pythonFindEdgels<float>),
         args("image", "scale", "threshold"),
          "Compute edgels of a 2D scalar image, given the filter scale.\n");
 
-    def("cannyEdgelList3x3", 
+    def("cannyEdgelList3x3",
         registerConverters(&pythonFindEdgels3x3FromGrad<float>),
         args("gradient", "threshold"),
         "Return a list of :class:`Edgel` objects whose strength is at least 'threshold'.\n\n"
@@ -390,7 +390,7 @@ void defineEdgedetection()
         "those of :func:`cannyEdgelList`.\n\n"
         "For details see cannyEdgelList3x3_ in the vigra C++ documentation.\n");
 
-    def("cannyEdgelList3x3",  
+    def("cannyEdgelList3x3",
         registerConverters(&pythonFindEdgels3x3<float>),
         args("image", "scale", "threshold"),
          "Compute edgels of a 2D scalar image, given the filter scale.\n");
@@ -440,7 +440,7 @@ void defineEdgedetection()
 
     def("regionImageToEdgeImage",
         registerConverters(&pythonRegionImageToEdgeImage<npy_uint32>),
-        (arg("image"), 
+        (arg("image"),
          arg("edgeLabel") = 1,
          arg("out")=python::object()),
         "Transform a labeled uint32 image into an edge image.\n\n"
@@ -448,23 +448,23 @@ void defineEdgedetection()
 
     def("regionImageToEdgeImage",
         registerConverters(&pythonRegionImageToEdgeImage<npy_uint64>),
-        (arg("image"), 
+        (arg("image"),
          arg("edgeLabel") = 1,
          arg("out")=python::object()),
          "Likewise for a uint64 image.\n");
 
     def("regionImageToCrackEdgeImage",
          registerConverters(&pythonRegionImageToCrackEdgeImage<npy_uint32>),
-         (arg("image"), 
-          arg("edgeLabel") = 0, 
+         (arg("image"),
+          arg("edgeLabel") = 0,
           arg("out")=python::object()),
          "Transform a labeled uint32 image into a crack edge image. \n\n"
          "For details see regionImageToCrackEdgeImage_ in the vigra C++ documentation.\n");
 
     def("regionImageToCrackEdgeImage",
          registerConverters(&pythonRegionImageToCrackEdgeImage<npy_uint64>),
-         (arg("image"), 
-          arg("edgeLabel") = 0, 
+         (arg("image"),
+          arg("edgeLabel") = 0,
           arg("out")=python::object()),
          "Likewise for a uint64 image.\n");
 
diff --git a/vigranumpy/src/core/geometry.cxx b/vigranumpy/src/core/geometry.cxx
index 7063249..69ab102 100644
--- a/vigranumpy/src/core/geometry.cxx
+++ b/vigranumpy/src/core/geometry.cxx
@@ -55,7 +55,7 @@ pyconvexHull(NumpyArray<1, TinyVector<Coordinate, 2>, UnstridedArrayTag > points
     ArrayVector<TinyVector<Coordinate, 2> > hull;
     {
         PyAllowThreads _pythread;
-    
+
         convexHull(ArrayVectorView<TinyVector<Coordinate, 2> >(points.shape(0), points.data()), hull);
     }
 
@@ -75,8 +75,8 @@ void defineGeometry()
     docstring_options doc_options(true, true, false);
 
     multidef("convexHull",
-         pythonConvexHull<double, float, Int32>(),
-         args("points"),
+        pythonConvexHull<double, float, Int32>().installFallback(),
+        args("points"),
         "Compute the convex hull of a point set.\n"
         "\n"
         "For details see convexHull_ in the vigra C++ documentation.\n\n");
diff --git a/vigranumpy/src/core/impex.cxx b/vigranumpy/src/core/impex.cxx
index 46b48a2..db88a67 100644
--- a/vigranumpy/src/core/impex.cxx
+++ b/vigranumpy/src/core/impex.cxx
@@ -56,7 +56,7 @@ template <class T>
 NumpyAnyArray readImageImpl(ImageImportInfo const & info, std::string order = "")
 {
     typedef UnstridedArrayTag Stride;
-    
+
     if(order == "")
         order = detail::defaultOrder();
 
@@ -116,7 +116,7 @@ std::string numpyTypeIdToImpexString(NPY_TYPES typeID)
 
 } // namespace detail
 
-NumpyAnyArray 
+NumpyAnyArray
 readImage(const char * filename, python::object import_type, unsigned int index, std::string order = "")
 {
     ImageImportInfo info(filename, index);
@@ -204,7 +204,7 @@ VIGRA_PYTHON_MULTITYPE_FUNCTOR(pywriteImage, writeImage)
 namespace detail {
 
 template <class T>
-NumpyAnyArray 
+NumpyAnyArray
 readVolumeImpl(VolumeImportInfo const & info, std::string order = "")
 {
     if(order == "")
@@ -254,7 +254,7 @@ readVolumeImpl(VolumeImportInfo const & info, std::string order = "")
 
 } // namespace detail
 
-NumpyAnyArray 
+NumpyAnyArray
 readVolume(const char * filename, python::object import_type, std::string order = "")
 {
     VolumeImportInfo info(filename);
@@ -293,16 +293,16 @@ readVolume(const char * filename, python::object import_type, std::string order
 
 template <class T>
 void writeVolume(NumpyArray<3, T > const & volume,
-                    const char * filename_base, 
-                    const char * filename_ext, 
-                    python::object export_type,  
+                    const char * filename_base,
+                    const char * filename_ext,
+                    python::object export_type,
                     const char * compression = "")
 {
     VolumeExportInfo info(filename_base, filename_ext);
-    
+
     if(python::extract<std::string>(export_type).check())
     {
-        std::string type = python::extract<std::string>(export_type)();        
+        std::string type = python::extract<std::string>(export_type)();
         if(type == "NBYTE")
         {
             info.setForcedRangeMapping(0.0, 0.0, 0.0, 255.0);
@@ -319,7 +319,7 @@ void writeVolume(NumpyArray<3, T > const & volume,
     }
     else if(export_type)
         vigra_precondition(false, "writeVolume(filename, export_type): export_type must be a string or a numpy dtype.");
-        
+
     if(std::string(compression) == "RunLength")
         info.setCompression("RLE");
     else if(std::string(compression) != "")
@@ -375,7 +375,7 @@ void defineImpexFunctions()
         .def("getShape", &pythonGetShape, "Get shape of image in the file.")
         .def("getAxisTags", &pythonGetAxisTags, "Get axistags of image in the file.")
     ;
-    
+
     // FIXME: add an order parameter to the import functions
     def("readVolume", &readVolume, (arg("filename"), arg("dtype") = "FLOAT", arg("order") = ""),
         "Read a 3D volume from a directory::\n"
@@ -416,15 +416,21 @@ void defineImpexFunctions()
         "etc.), the returned volume will have the requested pixel type. \n"
         "\n"
         "The order parameter determines the axis ordering of the resulting array\n"
-        "(allowed values: 'C', 'F', 'V'). When order == '' (the default), " 
+        "(allowed values: 'C', 'F', 'V'). When order == '' (the default), "
         "vigra.VigraArray.defaultOrder is used.\n"
         "\n"
         "For details see the help for :func:`readImage`.\n");
-        
-    multidef("writeVolume", pywriteVolume<Singleband<Int8>, Singleband<UInt64>, Singleband<Int64>, Singleband<UInt16>, 
-                                          Singleband<Int16>, Singleband<UInt32>, Singleband<Int32>, Singleband<double>, 
-                                          Singleband<float>, Singleband<UInt8>, TinyVector<float, 3>, TinyVector<UInt8, 3> >(), 
-       (arg("volume"), arg("filename_base"), arg("filename_ext"), arg("dtype") = "", arg("compression") = ""),
+
+    multidef("writeVolume",
+        pywriteVolume<Singleband<Int8>, Singleband<UInt64>, Singleband<Int64>,
+                      Singleband<UInt16>, Singleband<Int16>, Singleband<UInt32>,
+                      Singleband<Int32>, Singleband<double>, Singleband<float>,
+                      Singleband<UInt8>, TinyVector<float, 3>, TinyVector<UInt8, 3> >().installFallback(),
+       (arg("volume"),
+        arg("filename_base"),
+        arg("filename_ext"),
+        arg("dtype") = "",
+        arg("compression") = ""),
        "Write a volume as a sequence of images::\n\n"
        "   writeVolume(volume, filename_base, filename_ext, dtype='', compression='')\n\n"
        "The resulting image sequence will be enumerated in the form::\n\n"
@@ -432,8 +438,8 @@ void defineImpexFunctions()
        "Write a volume as a multi-page tiff (filename_ext must be an empty string)::\n\n"
        "   writeVolume(volume, filename, '', dtype='', compression='')\n\n"
        "Parameters 'dtype' and 'compression' will be handled as in :func:`writeImage`.\n\n");
-    
-    def("readImage", &readImage, 
+
+    def("readImage", &readImage,
         (arg("filename"), arg("dtype") = "FLOAT", arg("index") = 0, arg("order") = ""),
         "Read an image from a file::\n"
         "\n"
@@ -455,16 +461,22 @@ void defineImpexFunctions()
         "an entire multi-page TIFF in one go.\n"
         "\n"
         "The 'order' parameter determines the axis ordering of the resulting array\n"
-        "(allowed values: 'C', 'F', 'V'). When order == '' (the default), \n" 
+        "(allowed values: 'C', 'F', 'V'). When order == '' (the default), \n"
         "'vigra.VigraArray.defaultOrder' is used.\n"
         "\n"
         "Supported file formats are listed by the function :func:`listFormats`.\n"
         "When 'filename' does not refer to a recognized image file format, an\n"
         "exception is raised. The file can be checked beforehand with the function\n"
         ":func:`isImage`.\n");
-        
-    multidef("writeImage", pywriteImage<Int8, UInt64, Int64, UInt16, Int16, UInt32, Int32, double, float, UInt8>(),
-       (arg("image"), arg("filename"), arg("dtype") = "", arg("compression") = "", arg("mode") = "w"),
+
+    multidef("writeImage",
+        pywriteImage<Int8, UInt64, Int64, UInt16, Int16, UInt32,
+                     Int32, double, float, UInt8>().installFallback(),
+        (arg("image"),
+         arg("filename"),
+         arg("dtype") = "",
+         arg("compression") = "",
+         arg("mode") = "w"),
         "Save an image to a file::\n"
         "\n"
         "   writeImage(image, filename, dtype='', compression='', mode='w')\n"
@@ -542,21 +554,21 @@ void defineImpexFunctions()
         "   VIFF:\n"
         "       Khoros Visualization image file (pixel types: UINT8, INT16\n"
         "       INT32, FLOAT, DOUBLE with arbitrary many channels).\n\n");
-        
+
     def("listFormats", &impexListFormats,
         "Ask for the image file formats that vigra.impex understands::\n\n"
         "    listFormats() -> string\n\n"
         "This function returns a string containing the supported image file "
         "formats for reading and writing with the functions :func:`readImage` and "
         ":func:`writeImage`.\n");
-        
+
     def("listExtensions", &impexListExtensions,
         "Ask for the image file extensions that vigra.impex understands::\n\n"
         "    listExtensions() -> string\n\n"
         "This function returns a string containing the supported image file "
         "extensions for reading and writing with the functions :func:`readImage` and "
         ":func:`writeImage`.\n");
-        
+
     def("isImage", &isImage, args("filename"),
         "Check whether the given file name contains image data::\n\n"
         "   isImage(filename) -> bool\n\n"
diff --git a/vigranumpy/src/core/morphology.cxx b/vigranumpy/src/core/morphology.cxx
index a197a4e..76e6a0f 100644
--- a/vigranumpy/src/core/morphology.cxx
+++ b/vigranumpy/src/core/morphology.cxx
@@ -52,9 +52,9 @@ namespace vigra
 {
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDiscRankOrderFilter(NumpyArray<3, Multiband<PixelType> > image,
-                          int radius, float rank, 
+                          int radius, float rank,
                           NumpyArray<3, Multiband<PixelType> > res)
 {
     vigra_precondition((rank >= 0.0) && (rank <= 1.0),
@@ -67,10 +67,10 @@ pythonDiscRankOrderFilter(NumpyArray<3, Multiband<PixelType> > image,
     {
         PyAllowThreads _pythread;
         for(int k=0; k<image.shape(2); ++k)
-        { 
+        {
             MultiArrayView<2, PixelType, StridedArrayTag> bimage = image.bindOuter(k);
             MultiArrayView<2, PixelType, StridedArrayTag> bres = res.bindOuter(k);
-            discRankOrderFilter(srcImageRange(bimage,StandardValueAccessor<UInt8>()), 
+            discRankOrderFilter(srcImageRange(bimage,StandardValueAccessor<UInt8>()),
                                 destImage(bres), radius, rank);
         }
     }
@@ -78,7 +78,7 @@ pythonDiscRankOrderFilter(NumpyArray<3, Multiband<PixelType> > image,
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDiscRankOrderFilterWithMask(NumpyArray<3, Multiband<PixelType> > image,
                                   NumpyArray<3, Multiband<PixelType> > mask,
                                   int radius, float rank,
@@ -98,50 +98,50 @@ pythonDiscRankOrderFilterWithMask(NumpyArray<3, Multiband<PixelType> > image,
     {
         PyAllowThreads _pythread;
         for(int k=0; k<image.shape(2); ++k)
-        { 
+        {
             MultiArrayView<2, PixelType, StridedArrayTag> bimage = image.bindOuter(k);
             MultiArrayView<2, PixelType, StridedArrayTag> bres = res.bindOuter(k);
             MultiArrayView<2, PixelType, StridedArrayTag> bmask = mask.bindOuter(mask.shape(2)==1?0:k);
-            discRankOrderFilterWithMask(srcImageRange(bimage,StandardValueAccessor<UInt8>()), 
+            discRankOrderFilterWithMask(srcImageRange(bimage,StandardValueAccessor<UInt8>()),
                                         srcImage(bmask),
                                         destImage(bres), radius, rank);
         }
     }
-    
+
     return res;
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDiscErosion(NumpyArray<3, Multiband<PixelType> > image,
-                  int radius, 
+                  int radius,
                   NumpyArray<3, Multiband<PixelType> > res)
 {
     return pythonDiscRankOrderFilter(image, radius, 0.0f, res);
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDiscDilation(NumpyArray<3, Multiband<PixelType> > image,
-                   int radius, 
+                   int radius,
                    NumpyArray<3, Multiband<PixelType> > res)
 {
     return pythonDiscRankOrderFilter(image, radius, 1.0f, res);
 }
 
 template < class PixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDiscMedian(NumpyArray<3, Multiband<PixelType> > image,
-                 int radius, 
+                 int radius,
                  NumpyArray<3, Multiband<PixelType> > res)
 {
     return pythonDiscRankOrderFilter(image, radius, 0.5f, res);
 }
 
 template < class PixelType >
-NumpyAnyArray 
-pythonDiscOpening(NumpyArray<3, Multiband<PixelType> > image, 
-                  int radius, 
+NumpyAnyArray
+pythonDiscOpening(NumpyArray<3, Multiband<PixelType> > image,
+                  int radius,
                   NumpyArray<3, Multiband<PixelType> > res)
 {
     vigra_precondition(radius >= 0, "Radius must be >=0.");
@@ -165,9 +165,9 @@ pythonDiscOpening(NumpyArray<3, Multiband<PixelType> > image,
 }
 
 template < class PixelType >
-NumpyAnyArray 
-pythonDiscClosing(NumpyArray<3, Multiband<PixelType> > image, 
-                  int radius, 
+NumpyAnyArray
+pythonDiscClosing(NumpyArray<3, Multiband<PixelType> > image,
+                  int radius,
                   NumpyArray<3, Multiband<PixelType> > res)
 {
     vigra_precondition(radius >= 0, "Radius must be >=0.");
@@ -190,10 +190,10 @@ pythonDiscClosing(NumpyArray<3, Multiband<PixelType> > image,
     return res;
 }
 
-template < int dim, class PixelType >
-NumpyAnyArray 
+template < class PixelType, int dim >
+NumpyAnyArray
 pythonMultiBinaryErosion(NumpyArray<dim, Multiband<PixelType> > array,
-                         double radius, 
+                         double radius,
                          NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -211,10 +211,12 @@ pythonMultiBinaryErosion(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template < int dim, class PixelType >
-NumpyAnyArray 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiBinaryErosion, pythonMultiBinaryErosion)
+
+template < class PixelType, int dim >
+NumpyAnyArray
 pythonMultiBinaryDilation(NumpyArray<dim, Multiband<PixelType> > array,
-                          double radius, 
+                          double radius,
                           NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -232,10 +234,12 @@ pythonMultiBinaryDilation(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template <int dim, class PixelType >
-NumpyAnyArray 
-pythonMultiBinaryOpening(NumpyArray<dim, Multiband<PixelType> > array, 
-                         double radius, 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiBinaryDilation, pythonMultiBinaryDilation)
+
+template <class PixelType, int dim >
+NumpyAnyArray
+pythonMultiBinaryOpening(NumpyArray<dim, Multiband<PixelType> > array,
+                         double radius,
                          NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -256,10 +260,12 @@ pythonMultiBinaryOpening(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template <int dim, class PixelType >
-NumpyAnyArray 
-pythonMultiBinaryClosing(NumpyArray<dim, Multiband<PixelType> > array, 
-                         double radius, 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiBinaryOpening, pythonMultiBinaryOpening)
+
+template <class PixelType, int dim >
+NumpyAnyArray
+pythonMultiBinaryClosing(NumpyArray<dim, Multiband<PixelType> > array,
+                         double radius,
                          NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -280,10 +286,12 @@ pythonMultiBinaryClosing(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template < int dim , class PixelType>
-NumpyAnyArray 
-pythonMultiGrayscaleErosion(NumpyArray<dim, Multiband<PixelType> > array, 
-                            double sigma, 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiBinaryClosing, pythonMultiBinaryClosing)
+
+template <class PixelType, int dim>
+NumpyAnyArray
+pythonMultiGrayscaleErosion(NumpyArray<dim, Multiband<PixelType> > array,
+                            double sigma,
                             NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -300,10 +308,13 @@ pythonMultiGrayscaleErosion(NumpyArray<dim, Multiband<PixelType> > array,
     }
     return res;
 }
-template < int dim, class PixelType >
-NumpyAnyArray 
-pythonMultiGrayscaleDilation(NumpyArray<dim, Multiband<PixelType> > array, 
-                             double sigma, 
+
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiGrayscaleErosion, pythonMultiGrayscaleErosion)
+
+template <class PixelType, int dim >
+NumpyAnyArray
+pythonMultiGrayscaleDilation(NumpyArray<dim, Multiband<PixelType> > array,
+                             double sigma,
                              NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -321,10 +332,12 @@ pythonMultiGrayscaleDilation(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template <int dim, class PixelType>
-NumpyAnyArray 
-pythonMultiGrayscaleOpening(NumpyArray<dim, Multiband<PixelType> > array, 
-                            double sigma, 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiGrayscaleDilation, pythonMultiGrayscaleDilation)
+
+template <class PixelType, int dim>
+NumpyAnyArray
+pythonMultiGrayscaleOpening(NumpyArray<dim, Multiband<PixelType> > array,
+                            double sigma,
                             NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -345,10 +358,12 @@ pythonMultiGrayscaleOpening(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
-template <int dim, class PixelType>
-NumpyAnyArray 
-pythonMultiGrayscaleClosing(NumpyArray<dim, Multiband<PixelType> > array, 
-                            double sigma, 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiGrayscaleOpening, pythonMultiGrayscaleOpening)
+
+template <class PixelType, int dim>
+NumpyAnyArray
+pythonMultiGrayscaleClosing(NumpyArray<dim, Multiband<PixelType> > array,
+                            double sigma,
                             NumpyArray<dim, Multiband<PixelType> > res)
 {
     res.reshapeIfEmpty(array.taggedShape(),
@@ -369,13 +384,15 @@ pythonMultiGrayscaleClosing(NumpyArray<dim, Multiband<PixelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyMultiGrayscaleClosing, pythonMultiGrayscaleClosing)
+
 namespace detail {
 
 template <class PixelType>
 struct IsBackgroundAccessor
 {
     typedef bool value_type;
-    
+
     template <class Iterator>
     value_type operator()(Iterator const & i) const
     {
@@ -386,27 +403,27 @@ struct IsBackgroundAccessor
 } // namespace detail
 
 template < class PixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonDistanceTransform2D(NumpyArray<2, Singleband<PixelType> > image,
-                          bool background, 
+                          bool background,
                           int norm,
                           ArrayVector<double> pixelPitch = ArrayVector<double>(),
                           NumpyArray<2, Singleband<DestPixelType> > res = python::object())
 {
-    res.reshapeIfEmpty(image.taggedShape(), 
+    res.reshapeIfEmpty(image.taggedShape(),
             "distanceTransform2D(): Output array has wrong shape.");
-    
+
     if(pixelPitch.size() == 0)
     {
         PyAllowThreads _pythread;
         if(background)
         {
-            distanceTransform(srcImageRange(image), destImage(res), 
+            distanceTransform(srcImageRange(image), destImage(res),
                               NumericTraits<PixelType>::zero(), norm);
         }
         else
         {
-            distanceTransform(srcImageRange(image, detail::IsBackgroundAccessor<PixelType>()), 
+            distanceTransform(srcImageRange(image, detail::IsBackgroundAccessor<PixelType>()),
                               destImage(res), false, norm);
         }
     }
@@ -415,7 +432,7 @@ pythonDistanceTransform2D(NumpyArray<2, Singleband<PixelType> > image,
         vigra_precondition(norm == 2,
              "distanceTransform2D(): Anisotropic transform is only supported for norm=2.");
         pixelPitch = image.permuteLikewise(pixelPitch);
-        
+
         PyAllowThreads _pythread;
         separableMultiDistance(srcMultiArrayRange(image), destMultiArray(res), background, pixelPitch);
     }
@@ -423,16 +440,16 @@ pythonDistanceTransform2D(NumpyArray<2, Singleband<PixelType> > image,
     return res;
 }
 
-template < unsigned int N, class VoxelType >
-NumpyAnyArray 
-pythonDistanceTransformND(NumpyArray<N, Singleband<VoxelType> > volume, 
-                          bool background, 
-                          ArrayVector<double> pixelPitch = ArrayVector<double>(),
-                          NumpyArray<N, Singleband<VoxelType> > res=python::object())
+template <class VoxelType, int N>
+NumpyAnyArray
+pythonDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
+                        bool background,
+                        ArrayVector<double> pixelPitch = ArrayVector<double>(),
+                        NumpyArray<N, Singleband<float> > res=python::object())
 {
-    res.reshapeIfEmpty(volume.taggedShape(), 
-            "distanceTransform3D(): Output array has wrong shape.");
-    
+    res.reshapeIfEmpty(volume.taggedShape(),
+            "distanceTransform(): Output array has wrong shape.");
+
     if (pixelPitch.size() == 0)
     {
         pixelPitch = ArrayVector<double>(N, 1.0);
@@ -441,7 +458,7 @@ pythonDistanceTransformND(NumpyArray<N, Singleband<VoxelType> > volume,
     {
         pixelPitch = volume.permuteLikewise(pixelPitch);
     }
-    
+
     {
         PyAllowThreads _pythread;
         separableMultiDistance(srcMultiArrayRange(volume), destMultiArray(res), background, pixelPitch);
@@ -449,26 +466,28 @@ pythonDistanceTransformND(NumpyArray<N, Singleband<VoxelType> > volume,
     return res;
 }
 
-template < unsigned int N, class VoxelType >
-NumpyAnyArray 
-pythonVectorDistanceTransformND(NumpyArray<N, Singleband<VoxelType> > volume, 
-                                bool background, 
-                                ArrayVector<double> pyPixelPitch = ArrayVector<double>(),
-                                NumpyArray<N, TinyVector<float, N> > res=python::object())
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyDistanceTransform, pythonDistanceTransform)
+
+template <class VoxelType, int N>
+NumpyAnyArray
+pythonVectorDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
+                              bool background,
+                              ArrayVector<double> pyPixelPitch = ArrayVector<double>(),
+                              NumpyArray<N, TinyVector<float, N> > res=python::object())
 {
     vigra_precondition(pyPixelPitch.size() == 0 || pyPixelPitch.size() == N,
         "vectorDistanceTransform(): pixel_pitch has wrong shape.");
-    
-    res.reshapeIfEmpty(volume.taggedShape(), 
+
+    res.reshapeIfEmpty(volume.taggedShape(),
             "vectorDistanceTransform(): Output array has wrong shape.");
-            
+
     TinyVector<double, N> pixelPitch(1.0);
     if (pyPixelPitch.size() > 0)
     {
         pixelPitch.init(pyPixelPitch.begin(), pyPixelPitch.end());
         pixelPitch = volume.permuteLikewise(pixelPitch);
     }
-    
+
     {
         PyAllowThreads _pythread;
         separableVectorDistance(volume, res, background, pixelPitch);
@@ -476,7 +495,9 @@ pythonVectorDistanceTransformND(NumpyArray<N, Singleband<VoxelType> > volume,
     return res;
 }
 
-template < unsigned int N, class VoxelType >
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyVectorDistanceTransform, pythonVectorDistanceTransform)
+
+template <class VoxelType, int N>
 NumpyAnyArray
 pythonboundaryDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
                                 bool array_border_is_active,
@@ -485,7 +506,7 @@ pythonboundaryDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
 {
     res.reshapeIfEmpty(volume.taggedShape(),
             "boundaryDistanceTransform(): Output array has wrong shape.");
-            
+
     boundary = tolower(boundary);
     BoundaryDistanceTag boundary_tag = InterpixelBoundary;
     if(boundary == "outerboundary")
@@ -495,7 +516,7 @@ pythonboundaryDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
     else if(boundary == "innerboundary")
         boundary_tag = InnerBoundary;
     else
-        vigra_precondition(false, 
+        vigra_precondition(false,
                            "boundaryDistanceTransform(): invalid 'boundary' specification.");
     {
         PyAllowThreads _pythread;
@@ -504,7 +525,9 @@ pythonboundaryDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
     return res;
 }
 
-template < unsigned int N, class VoxelType >
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyboundaryDistanceTransform, pythonboundaryDistanceTransform)
+
+template <class VoxelType, int N>
 NumpyAnyArray
 pythonboundaryVectorDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volume,
                                 bool array_border_is_active,
@@ -513,7 +536,7 @@ pythonboundaryVectorDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volu
 {
     res.reshapeIfEmpty(volume.taggedShape(),
             "boundaryVectorDistanceTransform(): Output array has wrong shape.");
-            
+
     boundary = tolower(boundary);
     BoundaryDistanceTag boundary_tag = InterpixelBoundary;
     if(boundary == "outerboundary")
@@ -523,7 +546,7 @@ pythonboundaryVectorDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volu
     else if(boundary == "innerboundary")
         boundary_tag = InnerBoundary;
     else
-        vigra_precondition(false, 
+        vigra_precondition(false,
                            "boundaryVectorDistanceTransform(): invalid 'boundary' specification.");
     {
         PyAllowThreads _pythread;
@@ -532,10 +555,12 @@ pythonboundaryVectorDistanceTransform(NumpyArray<N, Singleband<VoxelType> > volu
     return res;
 }
 
-template < unsigned int N, class T, class S >
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyboundaryVectorDistanceTransform, pythonboundaryVectorDistanceTransform)
+
+template <class T, int N>
 NumpyAnyArray
 pythonEccentricityTransform(const NumpyArray<N, T> & image,
-                            NumpyArray<N, S> res)
+                            NumpyArray<N, float> res)
 {
     res.reshapeIfEmpty(image.taggedShape(),
                        "eccentricityTransform(): Output array has wrong shape.");
@@ -543,7 +568,9 @@ pythonEccentricityTransform(const NumpyArray<N, T> & image,
     return res;
 }
 
-template < unsigned int N, class T >
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyEccentricityTransform, pythonEccentricityTransform)
+
+template <class T, int N>
 python::list
 pythonEccentricityCenters(const NumpyArray<N, T> & image)
 {
@@ -558,10 +585,12 @@ pythonEccentricityCenters(const NumpyArray<N, T> & image)
     return centerlist;
 }
 
-template < unsigned int N, class T, class S >
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyEccentricityCenters, pythonEccentricityCenters)
+
+template <class T, int N>
 python::tuple
 pythonEccentricityTransformWithCenters(const NumpyArray<N, T> & image,
-                                       NumpyArray<N, S> res)
+                                       NumpyArray<N, float> res)
 {
     typedef typename MultiArrayShape<N>::type Point;
     res.reshapeIfEmpty(image.taggedShape(),
@@ -576,16 +605,18 @@ pythonEccentricityTransformWithCenters(const NumpyArray<N, T> & image,
     return python::make_tuple(res, centerlist);
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyEccentricityTransformWithCenters, pythonEccentricityTransformWithCenters)
+
 template <unsigned int N, class T>
 NumpyAnyArray
 pySkeletonizeImage(NumpyArray<N, Singleband<T> > const & labels,
-              std::string mode,
-              double pruning_threshold)
+                   std::string mode,
+                   double pruning_threshold)
 {
     mode = tolower(mode);
     SkeletonOptions options;
     bool returnFloat = false;
-    
+
     if(mode == "dontprune")
     {
         options.dontPrune();
@@ -628,29 +659,29 @@ pySkeletonizeImage(NumpyArray<N, Singleband<T> > const & labels,
     {
         vigra_precondition(false, "skeletonizeImage(): invalid mode.");
     }
-    
+
     if(returnFloat)
     {
         NumpyArray<N, Singleband<float> > res(labels.taggedShape());
-        
+
         {
             PyAllowThreads _pythread;
-            
+
             skeletonizeImage(labels, res, options);
         }
-        
+
         return res;
     }
     else
     {
         NumpyArray<N, Singleband<T> > res(labels.taggedShape());
-        
+
         {
             PyAllowThreads _pythread;
-            
+
             skeletonizeImage(labels, res, options);
         }
-        
+
         return res;
     }
 }
@@ -658,7 +689,7 @@ pySkeletonizeImage(NumpyArray<N, Singleband<T> > const & labels,
 void defineMorphology()
 {
     using namespace python;
-    
+
     docstring_options doc_options(true, true, false);
 
     def("discRankOrderFilter",
@@ -669,7 +700,7 @@ void defineMorphology()
         "Rank must be in the range 0.0 <= rank <= 1.0. The filter acts as a minimum filter if rank = 0.0, as a median "
         "if rank = 0.5, and as a maximum filter if rank = 1.0. "
         "This function also works for multiband images, it is then executed on every band.\n"
-        "\n" 
+        "\n"
         "For details see discRankOrderFilter_ in the C++ documentation.\n"
        );
 
@@ -696,7 +727,7 @@ void defineMorphology()
         "the same number of bands, as the image the bands are used for the corresponding image bands.\n\n"
         "For details see discRankOrderFilterWithMask_ in the C++ documentation.\n"
         );
-    
+
     def("discRankOrderFilterWithMask",
         registerConverters(&pythonDiscRankOrderFilterWithMask<UInt8>),
         (arg("image"), arg("mask"), arg("radius"), arg("rank"), arg("out")=object()),
@@ -747,193 +778,142 @@ void defineMorphology()
         "See discRankOrderFilter_ in the C++ documentation for more information.\n"
        );
 
-    def("multiBinaryErosion",
-        registerConverters(&pythonMultiBinaryErosion<4, UInt8>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-       "Binary erosion on a 3D scalar or multiband uint8 array.\n"
-       "\n"
-       "This function applies a flat circular erosion operator with a given radius. "
-       "The operation is isotropic. The input is a uint8 or boolean multi-dimensional array "
-       "where non-zero pixels represent foreground and zero pixels represent background. "
-       "This function also works for multiband arrays, it is then executed on every band.\n"
-       "\n"
-       "For details see multiBinaryErosion_ in the C++ documentation.\n"
+    multidef("multiBinaryErosion",
+        pyMultiBinaryErosion<3, 4, UInt8, bool>().installFallback(),
+        (arg("array"),
+         arg("radius"),
+         arg("out")=object()),
+        "\n"
+        "Binary erosion on a scalar or multiband array (up to 3D, uint8 or bool).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a flat circular erosion operator with a given radius.\n"
+        "The operation is isotropic. The input is a uint8 or boolean multi-dimensional\n"
+        "array where non-zero elements represent foreground and zero elements represent\n"
+        "background.\n"
+        "\n"
+        "For details see multiBinaryErosion_ in the C++ documentation.\n"
         );
-        
-    def("multiBinaryErosion",
-        registerConverters(&pythonMultiBinaryErosion<4, bool>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Likewise for a bool array.\n");
-
-    def("multiBinaryDilation",
-        registerConverters(&pythonMultiBinaryDilation<4, UInt8>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-       "Binary dilation on a 3D scalar or multiband uint8 array.\n"
-       "\n"
-       "This function applies a flat circular dilation operator with a given radius. "
-       "The operation is isotropic. The input is a uint8 or boolean multi-dimensional array "
-       "where non-zero pixels represent foreground and zero pixels represent background. "
-       "This function also works for multiband arrays, it is then executed on every band.\n"
-       "\n"
-       "For details see multiBinaryDilation_ in the C++ documentation.\n"
+
+    multidef("multiBinaryDilation",
+        pyMultiBinaryDilation<3, 4, UInt8, bool>().installFallback(),
+        (arg("array"),
+         arg("radius"),
+         arg("out")=object()),
+        "\n"
+        "Binary dilation on a scalar or multiband array (up to 3D, uint8 or bool).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a flat circular dilation operator with a given radius.\n"
+        "The operation is isotropic. The input is a uint8 or boolean multi-dimensional\n"
+        "array where non-zero elements represent foreground and zero elements represent\n"
+        "background.\n"
+        "\n"
+        "For details see multiBinaryDilation_ in the C++ documentation.\n"
        );
-       
-    def("multiBinaryDilation",
-        registerConverters(&pythonMultiBinaryDilation<4, bool>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Likewise for bool arrays.\n");
-    
-    def("multiBinaryOpening",
-        registerConverters(&pythonMultiBinaryOpening<4, UInt8>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Binary opening on a 3D scalar or multiband uint8 array.\n"
+
+    multidef("multiBinaryOpening",
+        pyMultiBinaryOpening<3, 4, UInt8, bool>().installFallback(),
+        (arg("array"),
+         arg("radius"),
+         arg("out")=object()),
         "\n"
-        "This function applies a flat circular opening operator (sequential erosion "
-        "and dilation) with a given radius. The operation is isotropic. "
-        "The input is a uint8 or boolean multi-dimensional array where non-zero pixels represent "
-        "foreground and zero pixels represent background. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "Binary opening on a scalar or multiband array (up to 3D, uint8 or bool).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a flat circular opening operator (sequential erosion\n"
+        "and dilation) with a given radius. The operation is isotropic. The input is a\n"
+        "uint8 or boolean multi-dimensional array where non-zero elements represent\n"
+        "foreground and zero elements represent background.\n"
         "\n"
         "For details see vigra C++ documentation (multiBinaryDilation_ and multiBinaryErosion_).\n"
         );
-        
-    def("multiBinaryOpening",
-        registerConverters(&pythonMultiBinaryOpening<4, bool>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Likewise for a bool array.\n");
-        
-    def("multiBinaryClosing",
-        registerConverters(&pythonMultiBinaryClosing<4, UInt8>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Binary closing on a 3D scalar or multiband uint8 array.\n"
+
+    multidef("multiBinaryClosing",
+        pyMultiBinaryClosing<3, 4, UInt8, bool>().installFallback(),
+        (arg("array"),
+         arg("radius"),
+         arg("out")=object()),
         "\n"
-        "This function applies a flat circular opening operator (sequential dilation "
-        "and erosion) with a given radius. The operation is isotropic. "
-        "The input is a uint8 or boolean multi-dimensional array where non-zero pixels represent "
-        "foreground and zero pixels represent background. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "Binary closing on a scalar or multiband array (up to 3D, uint8 or bool).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a flat circular closing operator (sequential dilation\n"
+        "and erosion) with a given radius. The operation is isotropic. The input is a\n"
+        "uint8 or boolean multi-dimensional array where non-zero elements represent\n"
+        "foreground and zero elements represent background.\n"
         "\n"
         "For details see vigra C++ documentation (multiBinaryDilation_ and multiBinaryErosion_).\n"
         );
-        
-    def("multiBinaryClosing",
-        registerConverters(&pythonMultiBinaryClosing<4, bool>),
-        (arg("volume"), arg("radius"), arg("out")=object()),
-        "Likewise for a bool array.\n");
-    
-    def("multiGrayscaleErosion",
-        registerConverters(&pythonMultiGrayscaleErosion<4,UInt8>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Parabolic grayscale erosion on a 3D scalar or multiband uint8 array.\n"
+
+    multidef("multiGrayscaleErosion",
+        pyMultiGrayscaleErosion<3, 4, UInt8, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=object()),
         "\n"
-        "This function applies a parabolic erosion operator with a given spread 'sigma' on a grayscale array. "
-        "The operation is isotropic. The input is a grayscale multi-dimensional array. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "Parabolic grayscale erosion on a scalar or multiband array (up to 3D).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a parabolic erosion operator with a given spread 'sigma'\n"
+        "on a grayscale array. The operation is isotropic.\n"
         "\n"
         "For details see multiGrayscaleErosion_ in the C++ documentation.\n"
         );
-                
-    def("multiGrayscaleErosion",
-        registerConverters(&pythonMultiGrayscaleErosion<4,float>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 3D float array.\n");
-
-    def("multiGrayscaleErosion",
-        registerConverters(&pythonMultiGrayscaleErosion<3,UInt8>),
-        (arg("image"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 2D uint8 array.\n");
-    
-    def("multiGrayscaleErosion",
-        registerConverters(&pythonMultiGrayscaleErosion<3,float>),
-        (arg("image"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 2D float array.\n");
-
-    def("multiGrayscaleDilation",
-        registerConverters(&pythonMultiGrayscaleDilation<4,UInt8>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Parabolic grayscale dilation on multi-dimensional arrays.\n"
+
+    multidef("multiGrayscaleDilation",
+        pyMultiGrayscaleDilation<3, 4, UInt8, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=object()),
         "\n"
-        "This function applies a parabolic dilation operator with a given spread 'sigma' on a grayscale array. "
-        "The operation is isotropic. The input is a grayscale multi-dimensional array. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "Parabolic grayscale dilation on a scalar or multiband array (up to 3D).\n"
+        "Multiple channels are treated independently.\n"
+        "\n"
+        "This function applies a parabolic dilation operator with a given spread 'sigma'\n"
+        "on a grayscale array. The operation is isotropic.\n"
         "\n"
         "For details see multiGrayscaleDilation_ in the C++ documentation.\n"
         );
-        
-    def("multiGrayscaleDilation",
-        registerConverters(&pythonMultiGrayscaleDilation<4,float>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 3D float array.\n");
-
-    def("multiGrayscaleDilation",
-        registerConverters(&pythonMultiGrayscaleDilation<3,UInt8>),
-        (arg("image"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 2D uint8 array.\n");
-
-    def("multiGrayscaleDilation",
-        registerConverters(&pythonMultiGrayscaleDilation<3,float>),
-        (arg("image"), arg("sigma"), arg("out")=object()));
-
-    def("multiGrayscaleOpening",
-        registerConverters(&pythonMultiGrayscaleOpening<4,UInt8>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Parabolic grayscale opening on multi-dimensional arrays.\n"
+
+    multidef("multiGrayscaleOpening",
+        pyMultiGrayscaleOpening<3, 4, UInt8, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=object()),
+        "\n"
+        "Parabolic grayscale opening on a scalar or multiband array (up to 3D).\n"
+        "Multiple channels are treated independently.\n"
         "\n"
-        "This function applies a parabolic opening (sequential erosion and dilation) "
-        "operator with a given spread 'sigma' on a grayscale array. "
-        "The operation is isotropic. The input is a grayscale multi-dimensional array. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "This function applies a parabolic opening (sequential erosion and dilation)\n"
+        "operator with a given spread 'sigma' on a grayscale array. The operation is\n"
+        "isotropic.\n"
         "\n"
         "For details see multiGrayscaleDilation_ and multiGrayscaleErosion_ in the C++ documentation.\n"
         );
 
-    def("multiGrayscaleOpening",
-        registerConverters(&pythonMultiGrayscaleOpening<4,float>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 3D float array.\n");
-
-    def("multiGrayscaleOpening",
-        registerConverters(&pythonMultiGrayscaleOpening<3,UInt8>),
-        (arg("image"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 2D uint8 array.\n");
-
-    def("multiGrayscaleOpening",
-        registerConverters(&pythonMultiGrayscaleOpening<3,float>),
-        (arg("image"), arg("sigma"), arg("out")=object()));
-
-    def("multiGrayscaleClosing",
-        registerConverters(&pythonMultiGrayscaleClosing<4,UInt8>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Parabolic grayscale closing on multi-dimensional arrays.\n"
+    multidef("multiGrayscaleClosing",
+        pyMultiGrayscaleClosing<3, 4, UInt8, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=object()),
+        "\n"
+        "Parabolic grayscale closing on a scalar or multiband array (up to 3D).\n"
+        "Multiple channels are treated independently.\n"
         "\n"
-        "This function applies a parabolic closing (sequential dilation and erosion) "
-        "operator with a given spread 'sigma' on a grayscale array. "
-        "The operation is isotropic. The input is a grayscale multi-dimensional array. "
-        "This function also works for multiband arrays, it is then executed on every band.\n"
+        "This function applies a parabolic closing (sequential dilation and erosion)\n"
+        "operator with a given spread 'sigma' on a grayscale array. The operation is\n"
+        "isotropic.\n"
         "\n"
         "For details see multiGrayscaleDilation_ and multiGrayscaleErosion_ in the C++ documentation.\n"
         );
-    
-    def("multiGrayscaleClosing",
-        registerConverters(&pythonMultiGrayscaleClosing<4,float>),
-        (arg("volume"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 3D float array.\n");
-
-    def("multiGrayscaleClosing",
-        registerConverters(&pythonMultiGrayscaleClosing<3,UInt8>),
-        (arg("image"), arg("sigma"), arg("out")=object()),
-        "Likewise for a 2D uint8 array.\n");
-
-    def("multiGrayscaleClosing",
-        registerConverters(&pythonMultiGrayscaleClosing<3,float>),
-        (arg("image"), arg("sigma"), arg("out")=object()));
 
     def("distanceTransform2D",
         registerConverters(&pythonDistanceTransform2D<float, float>),
-        (arg("image"), 
-         arg("background")=true, 
+        (arg("image"),
+         arg("background")=true,
          arg("norm")=2,
-         arg("pixel_pitch") = ArrayVector<double>(), 
+         arg("pixel_pitch") = ArrayVector<double>(),
          arg("out")=python::object()),
         "Compute the distance transform of a 2D scalar float image.\n"
         "All pixels with a value of 0.0 are considered to be background pixels,\n"
@@ -955,78 +935,62 @@ void defineMorphology()
 
     def("distanceTransform2D",
         registerConverters(&pythonDistanceTransform2D<UInt8,float>),
-        (arg("image"), 
-         arg("background")=true, 
+        (arg("image"),
+         arg("background")=true,
          arg("norm")=2,
-         arg("pixel_pitch") = ArrayVector<double>(), 
+         arg("pixel_pitch") = ArrayVector<double>(),
          arg("out")=python::object()),
         "Likewise for a 2D uint8 input array.\n");
 
-    def("distanceTransform3D",
-        registerConverters(&pythonDistanceTransformND<3, float>),
-        (arg("array"), 
-         arg("background") = true, 
-         arg("pixel_pitch") = ArrayVector<double>(), 
+    multidef("distanceTransform",
+        pyDistanceTransform<2, 3, npy_uint32, float>().installFallback(),
+        (arg("array"),
+         arg("background") = true,
+         arg("pixel_pitch") = ArrayVector<double>(),
          arg("out")=python::object()),
-        "Compute the Euclidean distance transform of a 3D scalar float volume.\n"
-        "All voxels with a value of 0.0 are considered to be background voxels,\n"
-        "while all voxels with a nonzero value are considered to be foreground voxels.\n"
+        "\n"
+        "Compute the Euclidean distance transform of a scalar array (up to 3D).\n"
+        "\n"
+        "All pixels with a value of 0.0 are considered background,\n"
+        "while all pixels with a nonzero value are considered foreground.\n"
         "The parameter 'background' is a Boolean scalar that specifies whether to\n"
-        "compute the distance of all background voxels to the nearest foreground voxel\n"
+        "compute the distance of all background pixels to the nearest foreground pixel\n"
         "(if it is 'True', default) or vice versa (if it is 'False').\n"
-        "Hence in the destination volume, for background==True all background voxels\n"
-        "will be assigned their distance value, while all foreground voxels will be assigned 0.\n"
-        "For background==False, it is exactly the other way around.\n\n"
+        "Hence in the destination array, for background==True all background elements\n"
+        "will be assigned their distance value, while all foreground elements will be assigned 0.\n"
+        "For background==False, it is exactly the other way around.\n"
+        "\n"
         "If 'pixel_pitch' is given, it must contain the pixel distance along the three axes.\n"
         "They are then used to compute the distance anisotropically. If no 'pixel_pitch' is\n"
         "given, the data is treated isotropically with unit distance between pixels.\n"
         "\n"
         "For more details see separableMultiDistance_ in the vigra C++ documentation.\n");
-        
-    def("vectorDistanceTransform",
-        registerConverters(&pythonVectorDistanceTransformND<2, float>),
-        (arg("array"), 
-         arg("background") = true, 
-         arg("pixel_pitch") = ArrayVector<double>(), 
+
+    multidef("vectorDistanceTransform",
+        pyVectorDistanceTransform<2, 3, npy_uint32, float>().installFallback(),
+        (arg("array"),
+         arg("background") = true,
+         arg("pixel_pitch") = ArrayVector<double>(),
          arg("out")=python::object()),
-        "Perform a Euclidean distance transform and return, for each background pixel, the\n"
-        "difference vector to the nearest foreground pixel (when 'background=True', the\n"
-        "default), or the other way around (when 'background=False').\n"
-        "Otherwise, this function behaves like :func:`distanceTransform2D` (which just\n"
+        "\n"
+        "Compute the Euclidean vector distance transform of a scalar array (up to 3D).\n"
+        "The function returns an array with a many channels as the input dimension.\n"
+        "\n"
+        "In contrast to the plain distance transform, this function returns the difference\n"
+        "vector of each background pixel to the nearest foreground pixel (when\n"
+        "'background=True', the default), or the other way around (when 'background=False').\n"
+        "Otherwise, this function behaves like :func:`distanceTransform` (which just\n"
         "returns the magnitude of the difference vectors).\n"
         "\n"
         "For more detailed documentation, see :func:`distanceTransform2D` and\n" "separableVectorDistance_ in the vigra C++ documentation.\n");
-        
-    def("vectorDistanceTransform",
-        registerConverters(&pythonVectorDistanceTransformND<2, npy_uint32>),
-        (arg("array"), 
-         arg("background") = true, 
-         arg("pixel_pitch") = ArrayVector<double>(), 
-         arg("out")=python::object()),
-        "Likewise for uint32 images.\n");
-        
-    def("vectorDistanceTransform",
-        registerConverters(&pythonVectorDistanceTransformND<3, float>),
-        (arg("array"), 
-         arg("background") = true, 
-         arg("pixel_pitch") = ArrayVector<double>(), 
-         arg("out")=python::object()),
-        "Likewise for 3D arrays.\n");
-        
-    def("vectorDistanceTransform",
-        registerConverters(&pythonVectorDistanceTransformND<3, npy_uint32>),
-        (arg("array"), 
-         arg("background") = true, 
-         arg("pixel_pitch") = ArrayVector<double>(), 
+
+    multidef("boundaryDistanceTransform",
+        pyboundaryDistanceTransform<2, 3, npy_uint32, float>().installFallback(),
+        (arg("array"),
+         arg("array_border_is_active") = false,
+         arg("boundary") = "InterpixelBoundary",
          arg("out")=python::object()),
-        "Likewise for 3D uint32 arrays.\n");
-        
-    def("boundaryDistanceTransform",
-       registerConverters(&pythonboundaryDistanceTransform<2, npy_uint32>),
-       (arg("image"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
+        "\n"
         "Compute the Euclidean distance transform of all regions in a 2D or 3D label\n"
         "array with respect to the region boundaries. The 'boundary' parameter must be\n"
         "one of the following strings:\n\n"
@@ -1041,36 +1005,13 @@ void defineMorphology()
         "\n"
         "For more details see boundaryMultiDistance_ in the vigra C++ documentation.\n");
 
-    def("boundaryDistanceTransform",
-       registerConverters(&pythonboundaryDistanceTransform<3, npy_uint32>),
-       (arg("volume"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 3D uint32 input array.\n");
-
-    def("boundaryDistanceTransform",
-       registerConverters(&pythonboundaryDistanceTransform<2, float>),
-       (arg("image"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 2D float32 input array.\n");
-
-    def("boundaryDistanceTransform",
-       registerConverters(&pythonboundaryDistanceTransform<3, float>),
-       (arg("volume"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 3D float32 input array.\n");
-
-    def("boundaryVectorDistanceTransform",
-       registerConverters(&pythonboundaryVectorDistanceTransform<2, npy_uint32>),
-       (arg("image"),
+    multidef("boundaryVectorDistanceTransform",
+       pyboundaryVectorDistanceTransform<2, 3, npy_uint32, float>().installFallback(),
+       (arg("array"),
         arg("array_border_is_active") = false,
         arg("boundary") = "InterpixelBoundary",
         arg("out")=python::object()),
+        "\n"
         "Compute the Euclidean distance transform of all regions in a 2D or 3D label\n"
         "array with respect to the region boundaries and return, in each pixel,\n"
         "the difference vector to the nearest boundary point.\n"
@@ -1087,102 +1028,31 @@ void defineMorphology()
         "For more details see :func:`boundaryDistanceTransform` and boundaryVectorDistance_ in\n"
         "the vigra C++ documentation.\n");
 
-    def("boundaryVectorDistanceTransform",
-       registerConverters(&pythonboundaryVectorDistanceTransform<3, npy_uint32>),
-       (arg("volume"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 3D uint32 input array.\n");
-
-    def("boundaryVectorDistanceTransform",
-       registerConverters(&pythonboundaryVectorDistanceTransform<2, float>),
-       (arg("image"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 2D float32 input array.\n");
-
-    def("boundaryVectorDistanceTransform",
-       registerConverters(&pythonboundaryVectorDistanceTransform<3, float>),
-       (arg("volume"),
-        arg("array_border_is_active") = false,
-        arg("boundary") = "InterpixelBoundary",
-        arg("out")=python::object()),
-         "Likewise for a 3D float32 input array.\n");
-
-    def("eccentricityTransform",
-        registerConverters(&pythonEccentricityTransform<2, UInt32, float>),
-        (arg("image"),
+    multidef("eccentricityTransform",
+        pyEccentricityTransform<2, 3, npy_uint8, npy_uint32, float>().installFallback(),
+        (arg("array"),
          arg("out")=python::object()),
-        "Compute the eccentricity transform of a 2D uint32 label array.\n\n"
+        "\n"
+        "Compute the eccentricity transform of a label array (2D or 3D).\n\n"
         "For more details see eccentricityTransformOnLabels_ in the vigra C++ documentation.\n");
 
-    def("eccentricityTransform",
-        registerConverters(&pythonEccentricityTransform<2, UInt8, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 2D uint8 input array.\n");
-
-    def("eccentricityTransform",
-        registerConverters(&pythonEccentricityTransform<3, UInt32, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 3D uint32 label array.\n");
-
-    def("eccentricityTransform",
-        registerConverters(&pythonEccentricityTransform<3, UInt8, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 3D uint8 input array.\n");
-
-    def("eccentricityCenters",
-        registerConverters(&pythonEccentricityCenters<2, UInt32>),
-        (arg("image")),
+    multidef("eccentricityCenters",
+        pyEccentricityCenters<2, 3, npy_uint8, npy_uint32, float>().installFallback(),
+        (arg("array")),
+         "\n"
          "Compute a list holding the eccentricity center of each region in\n"
-         "a 2D uint32 label array.\n\n"
+         "a label array (2D or 3D).\n\n"
          "For more details see eccentricityCenters_ in the vigra C++ documentation.\n");
 
-    def("eccentricityCenters",
-        registerConverters(&pythonEccentricityCenters<2, UInt8>),
-        (arg("image")),
-         "Likewise for a 2D uint8 input array.\n");
-
-    def("eccentricityCenters",
-        registerConverters(&pythonEccentricityCenters<3, UInt32>),
-        (arg("image")),
-         "Likewise for a 3D uint32 label array.\n");
-
-    def("eccentricityCenters",
-        registerConverters(&pythonEccentricityCenters<3, UInt8>),
-        (arg("image")),
-         "Likewise for a 3D uint8 array.\n");
-
-    def("eccentricityTransformWithCenters",
-        registerConverters(&pythonEccentricityTransformWithCenters<2, UInt32, float>),
-        (arg("image"),
+    multidef("eccentricityTransformWithCenters",
+        pyEccentricityTransformWithCenters<2, 3, npy_uint8, npy_uint32, float>().installFallback(),
+        (arg("array"),
          arg("out")=python::object()),
-         "Compute the eccentricity transform and eccentricity centers of a 2D uint32 label array.\n"
          "\n"
-         "Returns the tuple (ecc_image, centers).\n");
-
-    def("eccentricityTransformWithCenters",
-        registerConverters(&pythonEccentricityTransformWithCenters<2, UInt8, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 2D uint8 input array.\n");
-
-    def("eccentricityTransformWithCenters",
-        registerConverters(&pythonEccentricityTransformWithCenters<3, UInt32, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 3D uint32 label array.\n");
-
-    def("eccentricityTransformWithCenters",
-        registerConverters(&pythonEccentricityTransformWithCenters<3, UInt8, float>),
-        (arg("image"),
-         arg("out")=python::object()),
-         "Likewise for a 2D uint8 input array.\n");
+         "Compute the eccentricity transform and eccentricity centers of a label array (2D and 3D).\n"
+         "\n"
+         "Returns the tuple (ecc_image, centers). See :func:`eccentricityTransform` and\n"
+         ":func:`eccentricityCenters`.\n");
 
     def("skeletonizeImage",
         registerConverters(&pySkeletonizeImage<2, UInt32>),
diff --git a/vigranumpy/src/core/multi_array_chunked.cxx b/vigranumpy/src/core/multi_array_chunked.cxx
index 00ab168..2c03e64 100644
--- a/vigranumpy/src/core/multi_array_chunked.cxx
+++ b/vigranumpy/src/core/multi_array_chunked.cxx
@@ -44,6 +44,7 @@
 #include <vigra/multi_array_chunked_hdf5.hxx>
 #endif
 #include <vigra/compression.hxx>
+#include <vigra/python_utility.hxx>
 #include <boost/python.hpp>
 #include <boost/python/slice.hpp>
 
@@ -144,14 +145,14 @@ bindNumpyArray(NumpyAnyArray self, Shape const & stop)
     if(stop == Shape())
         return python::object(self);
 
-    python_ptr func(PyString_FromString("__getitem__"), python_ptr::keep_count);
+    python_ptr func(pythonFromData("__getitem__"));
     pythonToCppException(func);
     python_ptr index(PyTuple_New(stop.size()), python_ptr::keep_count);
     pythonToCppException(index);
     for(unsigned int k=0; k<stop.size(); ++k)
     {
         PyObject * item = stop[k] == 0
-                            ? PyInt_FromLong(0)
+                            ? pythonFromData(0)
                             : PySlice_New(0,0,0);
         pythonToCppException(item);
         PyTuple_SET_ITEM((PyTupleObject *)index.ptr(), k, item);
@@ -251,8 +252,12 @@ ptr_to_python(Array * array, python::object axistags)
     if(axistags != python::object())
     {
         AxisTags at;
+#if PY_MAJOR_VERSION < 3
         if(PyString_Check(axistags.ptr()))
-            at = AxisTags(python::extract<std::string>(axistags)());
+#else
+		if(PyUnicode_Check(axistags.ptr()))
+#endif
+			at = AxisTags(python::extract<std::string>(axistags)());
         else
             at = AxisTags(python::extract<AxisTags const &>(axistags)());
         int N = Array::shape_type::static_size;
@@ -642,9 +647,10 @@ void defineChunkedArrayImpl()
     docstring_options doc_options(true, false, false);
 
     typedef ChunkedArray<N, T> Array;
-    class_<Array, boost::noncopyable>("ChunkedArray",
+    class_<Array, boost::noncopyable>("ChunkedArrayBase",
          "\n"
-         "Base class for chunked arrays.\n\n",
+         "Base class for chunked arrays, can only be created via factory functions\n"
+         "like :func:`~vigra.ChunkedArrayCompressed` or :func:`~vigra.ChunkedArrayHDF5`.\n\n",
          no_init)
         .add_property("shape", &ChunkedArray_shape<N, T>,
              "\nshape of the array.\n")
@@ -678,29 +684,61 @@ void defineChunkedArrayImpl()
         .def("checkoutSubarray",
              registerConverters(&ChunkedArray_checkoutSubarray<N, T>),
              (arg("start"), arg("stop"), arg("out")=python::object()),
-             "\nobtain a copy of the specified subarray.\n")
+             "\n    checkoutSubarray(start, stop, res=None) => array\n\n"
+             "Obtain a copy of the subarray in the ROI '[start, stop)'.\n"
+             "If 'res' is given, it must have matching shape and will be used\n"
+             "to store the data instead of allocating new storage for 'array'.\n\n"
+             "The index operator provides a shorthand for this function, e.g.\n"
+             "for a 2-dimensional array you can equivalently write::\n\n"
+             "    roi = chunked_array.checkoutSubarray((5,10), (12,19))\n"
+             "    roi = chunked_array[5:12, 10:19]\n\n"
+             "to read the ROI from 'start=(5,10)' to 'stop=(12,19)' (exclusive).\n"
+             "Note that 'roi' is a copy, so overwriting it has no effect on the\n"
+             "chunked array. Use 'commitSubarray()' to overwrite data.\n")
         .def("commitSubarray",
              registerConverters(&ChunkedArray_commitSubarray<N, T>),
              (arg("start"), arg("array")),
-             "\nwrite the given array at offset 'start'.\n")
+             "\n    commitSubarray(start, array)\n\n"
+             "Write the given 'array' at offset 'start'.\n"
+             "The index operator provides a shorthand for this function, e.g.\n"
+             "for a 2-dimensional array you can equivalently write::\n\n"
+             "    chunked_array.commitSubarray((5,10), roi)\n"
+             "    chunked_array[5:12, 10:19] = roi\n\n"
+             "to write an ROI with shape (5,7) starting at 'start=(5,10)'.\n")
         .def("releaseChunks",
              &Array::releaseChunks,
              (arg("start"), arg("stop"),arg("destroy")=false),
+             "\n    releaseChunks(start, stop, destroy=False)\n\n"
              "\nrelease or destroy all chunks that are completely contained in [start, stop).\n")
-        .def("__getitem__", &ChunkedArray_getitem<N, T>)
+        .def("__getitem__", &ChunkedArray_getitem<N, T>,
+             "\nRead data from a chunked array with the usual index or slicing syntax::\n\n"
+             "    value = chunked_array[5, 20]\n"
+             "    roi   = chunked_array[5:12, 10:19]\n\n"
+             "Note that the roi is not a slice view of the original array\n"
+             "(as in numpy.ndarray), but a copy of the data.\n")
         .def("__setitem__", &ChunkedArray_setitem<N, T>)
-        .def("__setitem__", &ChunkedArray_setitem2<N, T>)
+        .def("__setitem__", &ChunkedArray_setitem2<N, T>,
+             "\nWrite data to a chunked array with the usual index or slicing syntax::\n\n"
+             "    chunked_array[5, 20] = value\n"
+             "    chunked_array[5:12, 10:19] = roi\n")
         ;
 
 #ifdef HasHDF5
     typedef ChunkedArrayHDF5<N, T> ArrayHDF5;
-    class_<ChunkedArrayHDF5<N, T>, bases<Array>, boost::noncopyable>("ChunkedArrayHDF5", no_init)
-        .def("close", &ArrayHDF5::close)
-        .def("flush", &ArrayHDF5::flushToDisk)
+    class_<ChunkedArrayHDF5<N, T>, bases<Array>, boost::noncopyable>(
+         "ChunkedArrayHDF5Base",
+         "\n"
+         "Base class for HDF5-based chunked arrays, can only be created via\n"
+         "the factory function :func:`~vigra.ChunkedArrayHDF5`.\n\n",
+         no_init)
+        .def("close", &ArrayHDF5::close,
+             "\nFlush data to disk and close the underlying HDF5 file.\n")
+        .def("flush", &ArrayHDF5::flushToDisk,
+             "\nFlush data to disk.\n")
         .add_property("filename", &ArrayHDF5::fileName,
-             "\nname of the file backend of this array.\n")
+             "\nName of the file backend of this array.\n")
         .add_property("dataset_name", &ArrayHDF5::datasetName,
-             "\nname of the dataset backend of this array.\n")
+             "\nName of the dataset backend of this array.\n")
         .add_property("readonly", &ArrayHDF5::isReadOnly,
              "\nTrue if this array is read-only.\n")
     ;
@@ -708,7 +746,7 @@ void defineChunkedArrayImpl()
 }
 
 template <unsigned int N>
-void defineChunkedArrayFactories()
+void defineChunkedArrayFactories(bool export_docu=false)
 {
     using namespace boost::python;
     typedef typename MultiArrayShape<N>::type shape_type;
@@ -716,16 +754,84 @@ void defineChunkedArrayFactories()
     docstring_options doc_options(true, false, false);
 
     def("ChunkedArrayFull", &construct_ChunkedArrayFull<N>,
-        (arg("shape"), arg("dtype")=defaultDtype(), arg("fill_value")=0.0, arg("axistags")=python::object()));
-    def("ChunkedArrayLazy", &construct_ChunkedArrayLazy<N>,
         (arg("shape"), arg("dtype")=defaultDtype(),
-         arg("chunk_shape")=shape_type(), arg("fill_value")=0.0, arg("axistags")=python::object()));
+         arg("fill_value")=0.0, arg("axistags")=python::object()),
+        !export_docu ? "" :
+        "Create a chunked array (type :class:`~vigra.vigranumpycore.ChunkedArrayBase`)\n"
+        "backed by a plain (consecutive) array::\n\n"
+        "  ChunkedArrayFull(shape, dtype=float32, fill_value=0, axistags=None)\n\n"
+        "'shape' can be up to 5-dimensional.\n\n"
+        "'dtype' can currently be ``uint8``, ``uint32``, and ``float32``.\n\n"
+        "'fill_value' is returned for all array elements that have never been written.\n\n"
+        "For more details see ChunkedArray_ in the vigra C++ documentation.\n");
+    def("ChunkedArrayLazy", &construct_ChunkedArrayLazy<N>,
+        (arg("shape"), arg("dtype")=defaultDtype(), arg("chunk_shape")=shape_type(),
+         arg("fill_value")=0.0, arg("axistags")=python::object()),
+        !export_docu ? "" :
+        "Create a chunked array (type :class:`~vigra.vigranumpycore.ChunkedArrayBase`)\n"
+        "backed by one plain array for each chunk (rectangular data block)::\n\n"
+        "  ChunkedArrayLazy(shape, dtype=float32, chunk_shape=None, fill_value=0, axistags=None)\n\n"
+        "The individual chunks are allocated lazily upon first write. Reads before the\n"
+        "first write will simply return the 'fill_value' without allocating memory.\n"
+        "All allocated chunks reside in memory.\n\n"
+        "'shape' can be up to 5-dimensional.\n\n"
+        "'chunk_shape' must have the same dimension as 'shape', and its elements must\n"
+        "be powers of 2.\n\n"
+        "'dtype' can currently be ``uint8``, ``uint32``, and ``float32``.\n\n"
+        "'fill_value' is returned for all array elements that have never been written.\n\n"
+        "For more details see ChunkedArray_ in the vigra C++ documentation.\n");
     def("ChunkedArrayCompressed", &construct_ChunkedArrayCompressed<N>,
-        (arg("shape"), arg("compression")=LZ4, arg("dtype")=defaultDtype(), arg("chunk_shape")=shape_type(),
-         arg("cache_max")=-1, arg("fill_value")=0.0, arg("axistags")=python::object()));
+        (arg("shape"), arg("compression")=LZ4, arg("dtype")=defaultDtype(),
+         arg("chunk_shape")=shape_type(), arg("cache_max")=-1, arg("fill_value")=0.0,
+         arg("axistags")=python::object()),
+        !export_docu ? "" :
+        "Create a chunked array (type :class:`~vigra.vigranumpycore.ChunkedArrayBase`)\n"
+        "backed by one plain array for each chunk (rectangular data block)::\n\n"
+        "  ChunkedArrayCompressed(shape, compression=LZ4, dtype=float32, chunk_shape=None, \n"
+        "                         cache_max=-1, fill_value=0, axistags=None)\n\n"
+        "The individual chunks are allocated lazily upon first write. Reads before the\n"
+        "first write will simply return the 'fill_value' without allocating memory.\n"
+        "All allocated chunks reside in memory, but may be compressed when not in use.\n"
+        "This is especially beneficial for highly compressible data like label images.\n\n"
+        "'shape' can be up to 5-dimensional.\n\n"
+        "'chunk_shape' must have the same dimension as 'shape', and its elements must\n"
+        "be powers of 2.\n\n"
+        "'dtype' can currently be ``uint8``, ``uint32``, and ``float32``.\n\n"
+        "'fill_value' is returned for all array elements that have never been written.\n\n"
+        "'compression' can be any of the flags defined in the :class:`~vigra.Compression` enum.\n\n"
+        "'cache_max' specifies how many uncompressed chunks may reside in memory at the\n"
+        "same time. If it is '-1', vigra will choose a sensible default, but other values\n"
+        "may better fit your data access patterns. This is a soft limit, i.e. may be\n"
+        "exceeded temporarily if more chunks are needed simultaneously in a single\n"
+        "operation.\n\n"
+        "For more details see ChunkedArray_ in the vigra C++ documentation.\n");
     def("ChunkedArrayTmpFile", &construct_ChunkedArrayTmpFile<N>,
         (arg("shape"), arg("dtype")=defaultDtype(), arg("chunk_shape")=shape_type(),
-         arg("cache_max")=-1, arg("path")="", arg("fill_value")=0.0, arg("axistags")=python::object()));
+         arg("cache_max")=-1, arg("path")="", arg("fill_value")=0.0,
+         arg("axistags")=python::object()),
+        !export_docu ? "" :
+        "Create a chunked array (type :class:`~vigra.vigranumpycore.ChunkedArrayBase`)\n"
+        "backed by a temporary file::\n\n"
+        "  ChunkedArrayTmpFile(shape, dtype=float32, chunk_shape=None, cache_max=-1,\n"
+        "                      path="", fill_value=0, axistags=None)\n\n"
+        "The individual chunks are allocated lazily upon first write. Reads before the\n"
+        "first write will simply return the 'fill_value' without allocating memory.\n"
+        "Unused chunks will be moved to the file to free their memory. The file is\n"
+        "automatically deleted when the object is deleted. Use :func:`~vigra.ChunkedArrayHDF5` if\n"
+        "you need persistent storage.\n\n"
+        "'shape' can be up to 5-dimensional.\n\n"
+        "'chunk_shape' must have the same dimension as 'shape', and its elements must\n"
+        "be powers of 2.\n\n"
+        "'dtype' can currently be ``uint8``, ``uint32``, and ``float32``.\n\n"
+        "'cache_max' specifies how many uncompressed chunks may reside in memory at the\n"
+        "same time. If it is '-1', vigra will choose a sensible default, but other values\n"
+        "may better fit your data access patterns. This is a soft limit, i.e. may be\n"
+        "exceeded temporarily if more chunks are needed simultaneously in a single\n"
+        "operation.\n\n"
+        "'fill_value' is returned for all array elements that have never been written.\n\n"
+        "'path' is the directory where the file is located (default: the system's TMP\n"
+        "directory).\n\n"
+        "For more details see ChunkedArray_ in the vigra C++ documentation.\n");
 }
 
 void defineChunkedArray()
@@ -736,7 +842,7 @@ void defineChunkedArray()
 
     enum_<CompressionMethod>("Compression",
          "\nEnum to encode the type of compression for\n"
-         "ChunkedArrayCompressed and ChunkedArrayHDF5:\n\n"
+         ":func:`~vigra.ChunkedArrayCompressed` and :func:`~vigra.ChunkedArrayHDF5`:\n\n"
          "   ``Compression.ZLIB:``\n      ZLIB default compression\n"
          "   ``Compression.ZLIB_NONE:``\n      ZLIB no compression (level = 0)\n"
          "   ``Compression.ZLIB_FAST:``\n      ZLIB fast compression (level = 1)\n"
@@ -751,8 +857,8 @@ void defineChunkedArray()
 
 #ifdef HasHDF5
     enum_<HDF5File::OpenMode>("HDF5Mode",
-         "\nEnum to encode open mode for ChunkedArrayHDF5:\n\n"
-         "   ``HDF5Mode.Default:``\n  Use the default strategy (ReadOnly when file and dataset exist, New otherwise)\n"
+         "\nEnum to encode open mode for :func:`~vigra.ChunkedArrayHDF5`:\n\n"
+         "   ``HDF5Mode.Default:``\n      Use the default strategy (ReadOnly when file and dataset exist, New otherwise)\n"
          "   ``HDF5Mode.New:``\n      Create new file (existing file will be deleted)\n"
          "   ``HDF5Mode.ReadWrite:``\n      Open file (create when not existing) and allow creation of new datasets.\n"
          "                                  Contents of existing datasets may be changed, but not their shape.\n"
@@ -784,19 +890,43 @@ void defineChunkedArray()
     defineChunkedArrayFactories<2>();
     defineChunkedArrayFactories<3>();
     defineChunkedArrayFactories<4>();
-    defineChunkedArrayFactories<5>();
+    defineChunkedArrayFactories<5>(true);
 
 #ifdef HasHDF5
     def("ChunkedArrayHDF5", &construct_ChunkedArrayHDF5id,
         (arg("file_id"), arg("dataset_name"), arg("shape")=python::object(),
          arg("dtype")=python::object(), arg("mode")=HDF5File::ReadOnly, arg("compression")=ZLIB_FAST,
          arg("chunk_shape")=python::object(), arg("cache_max")=-1, arg("fill_value")=0.0,
-         arg("axistags")=python::object()));
+         arg("axistags")=python::object()),
+        "");
     def("ChunkedArrayHDF5", &construct_ChunkedArrayHDF5,
         (arg("file_name"), arg("dataset_name"), arg("shape")=python::object(),
          arg("dtype")=python::object(), arg("mode")=HDF5File::Default, arg("compression")=ZLIB_FAST,
          arg("chunk_shape")=python::object(), arg("cache_max")=-1, arg("fill_value")=0.0,
-         arg("axistags")=python::object()));
+         arg("axistags")=python::object()),
+        "Create a chunked array (type :class:`~vigra.vigranumpycore.ChunkedArrayHDF5Base`)\n"
+        "backed by a HDF5 file::\n\n"
+        "  ChunkedArrayHDF5(file, dataset_name, shape=None, dtype=None,\n"
+        "                   mode=HDF5Mode.Default, compression=Compression.ZLIB_FAST, \n"
+        "                   chunk_shape=None, cache_max=-1, fill_value=0, axistags=None)\n\n"
+        "Parameters 'shape', 'dtype', 'compression', 'chunk_shape', 'fill_value', and\n"
+        "'axistags' may only be provided when a new dataset is created.\n\n"
+        "'file' can be either a file name or a file ID as returned by ``h5py.File.id.id``.\n\n"
+        "'shape' can be up to 5-dimensional.\n\n"
+        "'chunk_shape' must have the same dimension as 'shape', and its elements must\n"
+        "be powers of 2.\n\n"
+        "'dtype' can currently be ``uint8``, ``uint32``, and ``float32``.\n\n"
+        "'fill_value' is returned for all array elements that have never been written.\n\n"
+        "'compression' can be any of the flags defined in the :class:`~vigra.Compression` enum\n"
+        "except for `LZ4`.\n\n"
+        "'cache_max' specifies how many chunks may reside in memory at the same time.\n"
+        "If it is '-1', vigra will choose a sensible default, but other values may\n"
+        "better fit your data access patterns. This is a soft limit, i.e. may be exceeded\n"
+        "temporarily if more chunks are needed simultaneously in a single operation.\n\n"
+        "'mode' defines the access rights to the file and may be any of the flags defined\n"
+        "in the :class:`~vigra.HDF5Mode` enum. By default, you get read permission for an existing\n"
+        "dataset and read/write permission for a new dataset.\n\n"
+        "For more details see ChunkedArray_ in the vigra C++ documentation.\n");
 #endif
 }
 
diff --git a/vigranumpy/src/core/pythonaccumulator.hxx b/vigranumpy/src/core/pythonaccumulator.hxx
index 175f66b..5f957db 100644
--- a/vigranumpy/src/core/pythonaccumulator.hxx
+++ b/vigranumpy/src/core/pythonaccumulator.hxx
@@ -527,8 +527,12 @@ bool pythonActivateTags(Accu & a, python::object tags)
     if(tags == python::object() || python::len(tags) == 0)
         return false;
 
+#if PY_MAJOR_VERSION < 3
     if(PyString_Check(tags.ptr()))
-    {
+#else
+	if (PyUnicode_Check(tags.ptr()))
+#endif	
+		{
         std::string tag = python::extract<std::string>(tags)();
         if(normalizeString(tag) == "all")
             a.activateAll();
@@ -550,9 +554,12 @@ void pythonHistogramOptions(Accu & a, python::object minmax, int binCount)
 {
     HistogramOptions options;
     options.setBinCount(binCount);
-    
+#if PY_MAJOR_VERSION < 3
     if(PyString_Check(minmax.ptr()))
-    {
+#else
+	if (PyUnicode_Check(minmax.ptr()))
+#endif
+	{
         std::string spec = normalizeString(python::extract<std::string>(minmax)());
         if(spec == "globalminmax")
             options.globalAutoInit();
diff --git a/vigranumpy/src/core/segmentation.cxx b/vigranumpy/src/core/segmentation.cxx
index 685778a..62a06d4 100644
--- a/vigranumpy/src/core/segmentation.cxx
+++ b/vigranumpy/src/core/segmentation.cxx
@@ -617,89 +617,6 @@ pythonExtendedLocalMaxima3D(NumpyArray<3, Singleband<PixelType> > volume,
 
 /*************************************************************************/
 
-#if 0
-template < class PixelType >
-python::tuple
-pythonWatersheds2DOld(NumpyArray<2, Singleband<PixelType> > image,
-                   int neighborhood = 4,
-                   NumpyArray<2, Singleband<npy_uint32> > seeds = python::object(),
-                   std::string method = "RegionGrowing",
-                   SRGType srgType = CompleteGrow,
-                   PixelType max_cost = 0.0,
-                   NumpyArray<2, Singleband<npy_uint32> > res = NumpyArray<2, Singleband<npy_uint32> >())
-{
-    vigra_precondition(neighborhood == 4 || neighborhood == 8,
-           "watersheds2D(): neighborhood must be 4 or 8.");
-
-    method = tolower(method);
-
-    bool haveSeeds = seeds.hasData();
-    unsigned int maxRegionLabel = 0;
-
-    if(method == "")
-        method = "regiongrowing";
-
-    if(method == "regiongrowing")
-    {
-        seeds.reshapeIfEmpty(image.shape(),
-                "watersheds(): Seed array has wrong shape.");
-
-        if(!haveSeeds)
-        {
-            MultiArray<2, UInt8> minima(image.shape());
-            localMinima(srcImageRange(image), destImage(minima), 1, EightNeighborCode());
-            maxRegionLabel = labelImageWithBackground(srcImageRange(minima), destImage(seeds), true, 0);
-        }
-        else
-        {
-            FindMinMax< npy_uint32 > minmax;
-            inspectImage(srcImageRange(seeds), minmax);
-            maxRegionLabel = minmax.max;
-        }
-
-        res.reshapeIfEmpty(image.shape(), "watersheds(): Output array has wrong shape.");
-
-        ArrayOfRegionStatistics< SeedRgDirectValueFunctor< PixelType > > stats(maxRegionLabel);
-        if(neighborhood == 4)
-        {
-            seededRegionGrowing(srcImageRange(image), srcImage(seeds), destImage(res),
-                                stats, srgType, FourNeighborCode(), max_cost);
-        }
-        else
-        {
-            seededRegionGrowing(srcImageRange(image), srcImage(seeds), destImage(res),
-                                stats, srgType, EightNeighborCode(), max_cost);
-        }
-    }
-    else if(method == "unionfind")
-    {
-        vigra_precondition(!haveSeeds,
-           "watersheds(): UnionFind does not support seed images.");
-        vigra_precondition(srgType == CompleteGrow,
-           "watersheds(): UnionFind only supports 'CompleteGrow' mode.");
-
-        res.reshapeIfEmpty(image.shape(), "watersheds(): Output array has wrong shape.");
-
-        if(neighborhood == 4)
-        {
-            maxRegionLabel = watershedsUnionFind(srcImageRange(image), destImage(res),
-                                        FourNeighborCode());
-        }
-        else
-        {
-            maxRegionLabel = watershedsUnionFind(srcImageRange(image), destImage(res),
-                                        EightNeighborCode());
-        }
-    }
-    else
-    {
-        vigra_precondition(false, "watersheds(): Unknown watershed method requested.");
-    }
-
-    return python::make_tuple(res, maxRegionLabel);
-}
-#endif
-
 template < class PixelType >
 python::tuple
 pythonWatersheds2D(NumpyArray<2, Singleband<PixelType> > image,
@@ -1050,7 +967,7 @@ pythonWatersheds3D(NumpyArray<3, Singleband<PixelType> > image,
 
 VIGRA_PYTHON_MULTITYPE_FUNCTOR(pywatersheds3D, pythonWatersheds3D)
 
-template <unsigned int N, class PixelType >
+template <class PixelType, int N>
 python::tuple
 pythonSlic(NumpyArray<N, PixelType > array,
            double intensityScaling,
@@ -1086,34 +1003,7 @@ pythonSlic(NumpyArray<N, PixelType > array,
     return python::make_tuple(res, maxRegionLabel);
 }
 
-template <class PixelType >
-python::tuple
-pythonSlic2D(NumpyArray<2, PixelType > image,
-             double intensityScaling,
-             unsigned int seedDistance,
-             unsigned int minSize = 0,            // choose minSize automatically
-             unsigned int iterations = 10,
-             NumpyArray<2, Singleband<npy_uint32> > res = NumpyArray<2, Singleband<npy_uint32> >())
-{
-    return pythonSlic(image, intensityScaling, seedDistance, minSize, iterations, res);
-}
-
-VIGRA_PYTHON_MULTITYPE_FUNCTOR(pySlic2D, pythonSlic2D)
-
-template <class PixelType >
-python::tuple
-pythonSlic3D(NumpyArray<3, PixelType > image,
-             double intensityScaling,
-             unsigned int seedDistance,
-             unsigned int minSize = 0,            // choose minSize automatically
-             unsigned int iterations = 10,
-             NumpyArray<3, Singleband<npy_uint32> > res = NumpyArray<3, Singleband<npy_uint32> >())
-{
-    return pythonSlic(image, intensityScaling, seedDistance, minSize, iterations, res);
-}
-
-VIGRA_PYTHON_MULTITYPE_FUNCTOR(pySlic3D, pythonSlic3D)
-
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pySlic, pythonSlic)
 
 template<unsigned int DIM>
 NumpyAnyArray  pythonShrinkLabels(
@@ -1222,37 +1112,39 @@ void defineSegmentation()
         "shrink / ungrow a labeling / segmentation"
     );
 
-    multidef("labelImage", pyLabelMultiArray<2, npy_uint8, npy_uint32, float>(),
+    multidef("labelImage",
+        pyLabelMultiArray<2, 2, npy_uint8, npy_uint32, float>().installFallback(),
         (arg("image"),
-        arg("neighborhood") = 4,
-        arg("out")=python::object()),
+         arg("neighborhood") = 4,
+         arg("out")=python::object()),
         "Find the connected components of a segmented image. Parameter 'neighborhood' specifies "
         "the pixel neighborhood to be used and can be 4 (default) or 8.\n\n"
         "For details see labelMultiArray_ in the vigra C++ documentation.\n");
 
     multidef("labelImageWithBackground",
-         pyLabelMultiArrayWithBackground<2, npy_uint8, npy_uint32, float>(),
+        pyLabelMultiArrayWithBackground<2, 2, npy_uint8, npy_uint32, float>().installFallback(),
         (arg("image"),
-        arg("neighborhood") = 4,
-        arg("background_value") = 0,
-        arg("out")=python::object()),
+         arg("neighborhood") = 4,
+         arg("background_value") = 0,
+         arg("out")=python::object()),
         "Find the connected components of a segmented image, excluding the "
         "background from labeling, where the background is the set of all pixels with "
         "the given 'background_value'. Parameter 'neighborhood' specifies "
         "the pixel neighborhood to be used and can be 4 (default) or 8.\n\n"
         "For details see labelMultiArrayWithBackground_ in the vigra C++ documentation.\n");
 
-    multidef("labelVolume", pyLabelMultiArray<3, npy_uint8, npy_uint32, float>(),
+    multidef("labelVolume",
+        pyLabelMultiArray<3, 3, npy_uint8, npy_uint32, float>().installFallback(),
         (arg("volume"),
-        arg("neighborhood")=6,
-        arg("out")=python::object()),
+         arg("neighborhood")=6,
+         arg("out")=python::object()),
         "Find the connected components of a segmented volume. Parameter 'neighborhood' specifies "
         "the pixel neighborhood to be used and can be 6 (default) or 26.\n"
         "\n"
         "For details see labelMultiArray_ in the vigra C++ documentation.\n");
 
     multidef("labelVolumeWithBackground",
-        pyLabelMultiArrayWithBackground<3, npy_uint8, npy_uint32, float>(),
+        pyLabelMultiArrayWithBackground<3, 3, npy_uint8, npy_uint32, float>().installFallback(),
         (arg("volume"),
          arg("neighborhood")=6,
          arg("background_value")=0,
@@ -1264,77 +1156,39 @@ void defineSegmentation()
         "\n"
         "For details see labelMultiArrayWithBackground_ in the vigra C++ documentation.\n");
 
-    multidef("labelMultiArray", pyLabelMultiArray<2, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("out")=python::object()),
-        "Find the connected components of a segmented image."
-        "Parameter 'neighborhood' specifies the pixel neighborhood "
-        "to be used and can be 'direct' (default) or 'indirect' or 4 or 8.\n"
+    multidef("labelMultiArray",
+        pyLabelMultiArray<2, 5, npy_uint8, npy_uint32, float>().installFallback(),
+        (arg("array"),
+         arg("neighborhood")="",
+         arg("out")=python::object()),
+        "Find the connected components of a segmented multi-dimensional array\n"
+        "(supported dimensions: 2 to 5).\n"
+        "Parameter 'neighborhood' specifies the pixel neighborhood to be used\n"
+        "and can be 'direct' (default) or 'indirect' or the exact number of\n"
+        "neighbors (2D: 4 or 8, 3D: 6 or 26, 4D: 8 or 80, 5D: 10 or 242).\n"
         "\n"
         "For details see labelMultiArray_ in the vigra C++ documentation.\n");
 
-    multidef("labelMultiArray", pyLabelMultiArray<3, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("out")=python::object()),
-        "Find the connected components of a segmented volume."
-        "Parameter 'neighborhood' specifies the voxel neighborhood "
-        "to be used and can be 'direct' (default) or 'indirect' or 6 or 26.\n");
-
-    multidef("labelMultiArray", pyLabelMultiArray<4, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("out")=python::object()),
-        "Find the connected components of a segmented 4D array."
-        "Parameter 'neighborhood' specifies the pixel neighborhood "
-        "to be used and can be 'direct' (default) or 'indirect' or 8 or 80.\n");
-
-    multidef("labelMultiArray", pyLabelMultiArray<5, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("out")=python::object()),
-        "Find the connected components of a segmented 5D array."
-        "Parameter 'neighborhood' specifies the pixel neighborhood "
-        "to be used and can be 'direct' (default) or 'indirect' or 10 or 242.\n");
-
     multidef("labelMultiArrayWithBackground",
-        pyLabelMultiArrayWithBackground<2, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("background_value")=0, arg("out")=python::object()),
-        "Find the connected components of a segmented image, excluding the "
-        "background from labeling, where the background is the set of all pixels with "
-        "the given 'background_value'. Parameter 'neighborhood' specifies "
-        "the pixel neighborhood to be used and can be 'direct' (default) or 'indirect'\n"
-        " or 4 or 8.\n"
+        pyLabelMultiArrayWithBackground<2, 5, npy_uint8, npy_uint32, float>().installFallback(),
+        (arg("array"),
+         arg("neighborhood")="",
+         arg("background_value")=0,
+         arg("out")=python::object()),
+        "Find the connected components of a segmented multi-dimensional array\n"
+        "(supported dimensions: 2 to 5), excluding the background from labeling,\n"
+        "where background is the set of all pixels with the given 'background_value'.\n"
+        "Parameter 'neighborhood' specifies the pixel neighborhood to be used\n"
+        "and can be 'direct' (default) or 'indirect' or the exact number of\n"
+        "neighbors (2D: 4 or 8, 3D: 6 or 26, 4D: 8 or 80, 5D: 10 or 242).\n"
         "\n"
         "For details see labelMultiArrayWithBackground_ in the vigra C++ documentation.\n");
 
-    multidef("labelMultiArrayWithBackground",
-        pyLabelMultiArrayWithBackground<3, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("background_value")=0, arg("out")=python::object()),
-        "Find the connected components of a segmented volume, excluding the "
-        "background from labeling, where the background is the set of all pixels with "
-        "the given 'background_value'. Parameter 'neighborhood' specifies "
-        "the pixel neighborhood to be used and can be 'direct' (default) or 'indirect'\n"
-        " or 6 or 26.\n");
-
-    multidef("labelMultiArrayWithBackground",
-        pyLabelMultiArrayWithBackground<4, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("background_value")=0, arg("out")=python::object()),
-        "Find the connected components of a segmented 4D array, excluding the "
-        "background from labeling, where the background is the set of all pixels with "
-        "the given 'background_value'. Parameter 'neighborhood' specifies "
-        "the pixel neighborhood to be used and can be 'direct' (default) or 'indirect'\n"
-        " or 8 or 80.\n");
-
-    multidef("labelMultiArrayWithBackground",
-        pyLabelMultiArrayWithBackground<5, npy_uint8, npy_uint32, float>(),
-        (arg("array"), arg("neighborhood")="", arg("background_value")=0, arg("out")=python::object()),
-        "Find the connected components of a segmented 5D array, excluding the "
-        "background from labeling, where the background is the set of all pixels with "
-        "the given 'background_value'. Parameter 'neighborhood' specifies "
-        "the pixel neighborhood to be used and can be 'direct' (default) or 'indirect'\n"
-        " or 10 or 242.\n");
-
     def("sizeFilterSegInplace",registerConverters(&pySizeFilterSegInplace<UInt32>),
-        (
-            arg("seg"),
-            arg("maxLabel"),
-            arg("sizeLimit"),
-            arg("checkAtBorder") = false
-        ),
+        (arg("seg"),
+         arg("maxLabel"),
+         arg("sizeLimit"),
+         arg("checkAtBorder") = false),
         "replace every occurance of each number in the array 'seg' with zeros if this number"
         " occures less than 'sizeLimit' times in the array. If 'checkAtBorder' is false (default) "
         "segments that touch the border of the array will not be changed.\n"
@@ -1387,7 +1241,7 @@ void defineSegmentation()
         // );
 
     multidef("extendedLocalMinima",
-        pyExtendedLocalMinima2D<npy_uint8, float>(),
+        pyExtendedLocalMinima2D<npy_uint8, float>().installFallback(),
         (arg("image"),
          arg("marker")=1.0,
          arg("neighborhood") = 8,
@@ -1399,8 +1253,11 @@ void defineSegmentation()
         );
 
     multidef("extendedLocalMinima3D",
-        pyExtendedLocalMinima3D<float, npy_uint8>(),
-        (arg("volume"), arg("marker") = 1, arg("neighborhood") = 6, arg("out") = python::object()),
+        pyExtendedLocalMinima3D<float, npy_uint8>().installFallback(),
+        (arg("volume"),
+         arg("marker") = 1,
+         arg("neighborhood") = 6,
+         arg("out") = python::object()),
         "Find local minima and minimal plateaus in a volume and mark them with "
         "the given 'marker'. Parameter 'neighborhood' specifies the pixel "
         "neighborhood to be used and can be 6(default) or 26 .\n\n"
@@ -1465,14 +1322,16 @@ void defineSegmentation()
         ;
 
     /*  FIXME: int64 is unsupported by the C++ code (hard-coded int) */
-    multidef("watersheds", pywatersheds2D< npy_uint8, float >(),
-      (arg("image"),
-       arg("neighborhood") = 4,
-       arg("seeds")=python::object(),
-       arg("method")="",
-       arg("terminate")=CompleteGrow,
-       arg("max_cost")=0,
-       arg("out")=python::object()),
+    multidef("watersheds",
+        pywatersheds2D< npy_uint8, float >().installFallback().noPythonSignature(),
+        (arg("image"),
+         arg("neighborhood") = 4,
+         arg("seeds")=python::object(),
+         arg("method")="",
+         arg("terminate")=CompleteGrow,
+         arg("max_cost")=0,
+         arg("out")=python::object()),
+        "\n"
         "Compute the watersheds of a 2D image.\n"
         "\n"
         "   watersheds(image, neighborhood=4, seeds = None, methods = 'RegionGrowing', \n"
@@ -1519,74 +1378,80 @@ void defineSegmentation()
         " out:\n"
         "    the label image (with dtype=numpy.uint32) to be filled by the algorithm. "
         "    It will be allocated by the watershed function if not provided)\n\n"
-         "The function returns a Python tuple (labelImage, maxRegionLabel)\n\n"
-         );
-
-    multidef("watersheds", pywatersheds3D< npy_uint8, float >(),
-      (arg("volume"),
-       arg("neighborhood") = 6,
-       arg("seeds")=python::object(),
-       arg("method")="",
-       arg("terminate")=CompleteGrow,
-       arg("max_cost")=0,
-       arg("out")=python::object()),
-       "Likewise, compute watersheds of a volume.\n");
-
-    multidef("watershedsNew", pywatersheds2DNew< npy_uint8, float >(),
-      (arg("image"),
-       arg("neighborhood") = 4,
-       arg("seeds")=python::object(),
-       arg("method")="",
-       arg("terminate")=CompleteGrow,
-       arg("max_cost")=0,
-       arg("out")=python::object()),
-       "graph-based watershed");
+        "The function returns a Python tuple (labelImage, maxRegionLabel)\n\n"
+    );
 
-    multidef("watershedsNew", pywatersheds3DNew< npy_uint8, float >(),
-      (arg("image"),
-       arg("neighborhood") = 6,
-       arg("seeds")=python::object(),
-       arg("method")="",
-       arg("terminate")=CompleteGrow,
-       arg("max_cost")=0,
-       arg("out")=python::object()),
-       "graph-based watershed");
+    multidef("watersheds",
+        pywatersheds3D< npy_uint8, float >().noPythonSignature(),
+        (arg("volume"),
+         arg("neighborhood") = 6,
+         arg("seeds")=python::object(),
+         arg("method")="",
+         arg("terminate")=CompleteGrow,
+         arg("max_cost")=0,
+         arg("out")=python::object()),
+        "Likewise, compute watersheds of a volume.\n");
+
+    multidef("watershedsNew",
+        pywatersheds2DNew< npy_uint8, float >().installFallback(),
+        (arg("image"),
+         arg("neighborhood") = 4,
+         arg("seeds")=python::object(),
+         arg("method")="",
+         arg("terminate")=CompleteGrow,
+         arg("max_cost")=0,
+         arg("out")=python::object()),
+        "graph-based watershed");
 
-    multidef("slicSuperpixels", pySlic2D< TinyVector<float, 3>, Singleband<float> >(),
-      (arg("image"),
-       arg("intensityScaling"),
-       arg("seedDistance"),
-       arg("minSize")=0,
-       arg("iterations")=10,
-       arg("out")=python::object()),
-        "Compute Slic superpixels for a 2D image.\n\n"
+    multidef("watershedsNew",
+        pywatersheds3DNew< npy_uint8, float >(),
+        (arg("image"),
+         arg("neighborhood") = 6,
+         arg("seeds")=python::object(),
+         arg("method")="",
+         arg("terminate")=CompleteGrow,
+         arg("max_cost")=0,
+         arg("out")=python::object()),
+       "graph-based watershed");
 
+    multidef("slicSuperpixels",
+        pySlic<2, 3, TinyVector<float, 3>, Singleband<float> >().installFallback(),
+        (arg("array"),
+         arg("intensityScaling"),
+         arg("seedDistance"),
+         arg("minSize")=0,
+         arg("iterations")=10,
+         arg("out")=python::object()),
+        "\n"
+        "Compute Slic superpixels for a 2D or 3D image.\n"
+        "\n"
         "Parameters:\n\n"
-        " image:\n"
-        "    The 2D-image on which the superpixels will be calculated. Accepts single- and threeband images. \n\n"
+        " array:\n"
+        "    The array on which the superpixels will be calculated. Accepts single- and\n"
+        "    threeband images/volumes. \n"
+        "\n"
         " intensityScaling:\n"
-        "    Scale (divide) color/intensity difference by this parameter before comparing to spatial distance. \n\n"
+        "    Scale (divide) color/intensity difference by this parameter before comparing\n"
+        "    to spatial distance. \n"
+        "\n"
         " seedDistance:\n"
-        "    specify the radius of the window around each seed in which the algorithm looks for potential members of the corresponding superpixel"
-        " thus limiting the superpixel size. The grid spacing for seed placement is determined by this parameter.\n\n"
+        "    specify the radius of the window around each seed in which the algorithm looks\n"
+        "    for potential members of the corresponding superpixel thus limiting the\n"
+        "    superpixel size. The grid spacing for seed placement is determined by this parameter.\n"
+        "\n"
         " minSize:\n"
-        "    Minimum size for superpixels. By default the algorithm merges all regions smaller than a quarter of the average superpixel size.\n\n"
+        "    Minimum size for superpixels. By default the algorithm merges all regions smaller\n"
+        "    than a quarter of the average superpixel size.\n"
+        "\n"
         " iterations:\n"
-        "    Specify number of iterations. The default is 10."
+        "    Specify number of iterations. The default is 10.\n"
+        "\n"
         " out:\n"
         "    The label image (with dtype=numpy.uint32) to be filled by the algorithm. "
-        "    It will be allocated by the slicSuperpixels function if not provided)\n\n"
-        "The function returns a Python tuple (labelImage, maxRegionLabel)\n\n");
-
-    multidef("slicSuperpixels", pySlic3D< TinyVector<float, 3>, Singleband<float> >(),
-      (arg("image"),
-       arg("intensityScaling"),
-       arg("seedDistance"),
-       arg("minSize")=0,
-       arg("iterations")=10,
-       arg("out")=python::object()),
-       "Likewise compute Slic superpixels for a 3D volume, either single- or threeband.\n");
-
+        "    It will be allocated by the slicSuperpixels function if not provided)\n"
+        "\n"
+        "The function returns a Python tuple (labelImage, maxRegionLabel)\n"
+        "\n");
 }
 
 void defineEdgedetection();
diff --git a/vigranumpy/src/core/tensors.cxx b/vigranumpy/src/core/tensors.cxx
index 74fdd56..6b1b89f 100644
--- a/vigranumpy/src/core/tensors.cxx
+++ b/vigranumpy/src/core/tensors.cxx
@@ -50,35 +50,36 @@ namespace python = boost::python;
 namespace vigra
 {
 
-template < class VoxelType, unsigned int ndim >
-NumpyAnyArray 
-pythonGaussianGradientND(NumpyArray<ndim, Singleband<VoxelType> > array,
-                         python::object sigma,
-                         NumpyArray<ndim, TinyVector<VoxelType, (int)ndim> > res = NumpyArray<ndim, TinyVector<VoxelType, (int)ndim> >(),
-                         python::object sigma_d = python::object(0.0), 
-                         python::object step_size = python::object(1.0),
-                         double window_size = 0.0, 
-                         python::object roi = python::object())
+template < class VoxelType, int ndim >
+NumpyAnyArray
+pythonGaussianGradient(NumpyArray<ndim, Singleband<VoxelType> > array,
+                       python::object sigma,
+                       NumpyArray<ndim, TinyVector<VoxelType, (int)ndim> > res =
+                                 NumpyArray<ndim, TinyVector<VoxelType, (int)ndim> >(),
+                       python::object sigma_d = python::object(0.0),
+                       python::object step_size = python::object(1.0),
+                       double window_size = 0.0,
+                       python::object roi = python::object())
 {
     pythonScaleParam<ndim> params(sigma, sigma_d, step_size, "gaussianGradient");
     params.permuteLikewise(array);
     std::string description("Gaussian gradient, scale=");
     description += asString(sigma);
-    
+
     ConvolutionOptions<ndim> opt(params().filterWindowSize(window_size));
-    
+
     if(roi != python::object())
     {
         typedef typename MultiArrayShape<ndim>::type Shape;
         Shape start = array.permuteLikewise(python::extract<Shape>(roi[0])());
         Shape stop  = array.permuteLikewise(python::extract<Shape>(roi[1])());
         opt.subarray(start, stop);
-        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description),
                        "gaussianGradient(): Output array has wrong shape.");
     }
     else
     {
-        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
                        "gaussianGradient(): Output array has wrong shape.");
     }
 
@@ -89,156 +90,144 @@ pythonGaussianGradientND(NumpyArray<ndim, Singleband<VoxelType> > array,
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyGaussianGradient, pythonGaussianGradient)
 
 template < class VoxelType, unsigned int ndim >
-NumpyAnyArray 
-pythonGaussianGradientMagnitudeND(NumpyArray<ndim, Multiband<VoxelType> > array,
-                                  const ConvolutionOptions<ndim-1> & opt,
-                                  NumpyArray<ndim-1, Singleband<VoxelType> > res = NumpyArray<ndim-1, Singleband<VoxelType> >())
+NumpyAnyArray
+pythonGaussianGradientMagnitudeImpl(NumpyArray<ndim, Multiband<VoxelType> > array,
+                                    const ConvolutionOptions<ndim-1> & opt,
+                                    NumpyArray<ndim-1, Singleband<VoxelType> > res =
+                                              NumpyArray<ndim-1, Singleband<VoxelType> >())
 {
     using namespace vigra::functor;
     static const int sdim = ndim - 1;
-    
+
     std::string description("Gaussian gradient magnitude");
     typedef typename MultiArrayShape<sdim>::type Shape;
     Shape tmpShape(array.shape().begin());
     if(opt.to_point != Shape())
         tmpShape = opt.to_point-opt.from_point;
-    
-    res.reshapeIfEmpty(array.taggedShape().resize(tmpShape).setChannelDescription(description), 
+
+    res.reshapeIfEmpty(array.taggedShape().resize(tmpShape).setChannelDescription(description),
           "gaussianGradientMagnitude(): Output array has wrong shape.");
     res.init(VoxelType());
-    
+
     {
         PyAllowThreads _pythread;
         MultiArray<sdim, TinyVector<VoxelType, sdim> > grad(tmpShape);
-        
+
         for(int k=0; k<array.shape(sdim); ++k)
         {
             MultiArrayView<sdim, VoxelType, StridedArrayTag> barray = array.bindOuter(k);
-        
+
             gaussianGradientMultiArray(srcMultiArrayRange(barray), destMultiArray(grad), opt);
-            combineTwoMultiArrays(srcMultiArrayRange(grad), srcMultiArray(res), destMultiArray(res), 
+            combineTwoMultiArrays(srcMultiArrayRange(grad), srcMultiArray(res), destMultiArray(res),
                                   squaredNorm(Arg1())+Arg2());
         }
         transformMultiArray(srcMultiArrayRange(res), destMultiArray(res), sqrt(Arg1()));
     }
-    
-    return res;
-}
 
-
-template < class PixelType>
-NumpyAnyArray 
-pythonRieszTransformOfLOG2D(NumpyArray<2, Singleband<PixelType> > image,
-                            double scale, 
-                            unsigned int xorder, unsigned int yorder,
-                            NumpyArray<2, Singleband<PixelType> > res = NumpyArray<2, Singleband<PixelType> >())
-{
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription("Riesz transform"), 
-              "rieszTransformOfLOG2D(): Output array has wrong shape.");    
-    
-    {
-        PyAllowThreads _pythread;
-        rieszTransformOfLOG(srcImageRange(image), destImage(res), scale, xorder, yorder);
-    }
-    
     return res;
 }
 
 template < class VoxelType, unsigned int ndim >
-NumpyAnyArray 
-pythonGaussianGradientMagnitudeND(NumpyArray<ndim, Multiband<VoxelType> > volume,
-                                  const ConvolutionOptions<ndim-1> & opt,
-                                  NumpyArray<ndim, Multiband<VoxelType> > res = NumpyArray<ndim, Multiband<VoxelType> >())
+NumpyAnyArray
+pythonGaussianGradientMagnitudeImpl(NumpyArray<ndim, Multiband<VoxelType> > volume,
+                                    const ConvolutionOptions<ndim-1> & opt,
+                                    NumpyArray<ndim, Multiband<VoxelType> > res =
+                                                 NumpyArray<ndim, Multiband<VoxelType> >())
 {
     using namespace vigra::functor;
     static const int sdim = ndim - 1;
-    
+
     std::string description("channel-wise Gaussian gradient magnitude");
-    
+
     typedef typename MultiArrayShape<sdim>::type Shape;
     Shape tmpShape(volume.shape().begin());
     if(opt.to_point != Shape())
         tmpShape = opt.to_point-opt.from_point;
-    
-    res.reshapeIfEmpty(volume.taggedShape().resize(tmpShape).setChannelDescription(description), 
+
+    res.reshapeIfEmpty(volume.taggedShape().resize(tmpShape).setChannelDescription(description),
              "gaussianGradientMagnitude(): Output array has wrong shape.");
-    
+
     {
         PyAllowThreads _pythread;
         MultiArray<sdim, TinyVector<VoxelType, sdim> > grad(tmpShape);
-        
+
         for(int k=0; k<volume.shape(sdim); ++k)
         {
             MultiArrayView<sdim, VoxelType, StridedArrayTag> bvolume = volume.bindOuter(k);
             MultiArrayView<sdim, VoxelType, StridedArrayTag> bres = res.bindOuter(k);
-        
+
             gaussianGradientMultiArray(srcMultiArrayRange(bvolume), destMultiArray(grad), opt);
             transformMultiArray(srcMultiArrayRange(grad), destMultiArray(bres), norm(Arg1()));
         }
     }
-    
+
     return res;
 }
 
 template < class VoxelType, unsigned int ndim >
-NumpyAnyArray 
+NumpyAnyArray
 pythonGaussianGradientMagnitude(NumpyArray<ndim, Multiband<VoxelType> > volume,
                                 python::object sigma, bool accumulate,
                                 NumpyAnyArray res,
-                                python::object sigma_d, 
+                                python::object sigma_d,
                                 python::object step_size,
-                                double window_size = 0.0, 
+                                double window_size = 0.0,
                                 python::object roi = python::object())
 {
     pythonScaleParam<ndim - 1> params(sigma, sigma_d, step_size, "gaussianGradientMagnitude");
     params.permuteLikewise(volume);
     ConvolutionOptions<ndim-1> opt(params().filterWindowSize(window_size));
-    
+
     typedef typename MultiArrayShape<ndim - 1>::type Shape;
     if(roi != python::object())
     {
-        opt.subarray(volume.permuteLikewise(python::extract<Shape>(roi[0])()), 
+        opt.subarray(volume.permuteLikewise(python::extract<Shape>(roi[0])()),
                      volume.permuteLikewise(python::extract<Shape>(roi[1])()));
     }
     else
     {
         opt.subarray(Shape(), Shape(volume.shape().begin()));
     }
-    
+
     return accumulate
-              ? pythonGaussianGradientMagnitudeND(volume, opt, NumpyArray<ndim-1, Singleband<VoxelType> >(res))
-              : pythonGaussianGradientMagnitudeND(volume, opt, NumpyArray<ndim, Multiband<VoxelType> >(res));
+              ? pythonGaussianGradientMagnitudeImpl(volume, opt,
+                                   NumpyArray<ndim-1, Singleband<VoxelType> >(res))
+              : pythonGaussianGradientMagnitudeImpl(volume, opt,
+                                   NumpyArray<ndim, Multiband<VoxelType> >(res));
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyGaussianGradientMagnitude, pythonGaussianGradientMagnitude)
+
 template < class VoxelType, unsigned int ndim >
-NumpyAnyArray 
+NumpyAnyArray
 pythonSymmetricGradientND(NumpyArray<ndim, Singleband<VoxelType> > volume,
                           NumpyArray<ndim, TinyVector<VoxelType, (int)ndim> > res=python::object(),
-                          python::object step_size = python::object(1.0), 
+                          python::object step_size = python::object(1.0),
                           python::object roi = python::object())
 {
     pythonScaleParam<ndim> params(python::object(0.0), python::object(0.0),
                                  step_size, "symmetricGradient");
     params.permuteLikewise(volume);
     ConvolutionOptions<ndim> opt(params());
-    
+
     if(roi != python::object())
     {
         typedef typename MultiArrayShape<ndim>::type Shape;
         Shape start = volume.permuteLikewise(python::extract<Shape>(roi[0])());
         Shape stop  = volume.permuteLikewise(python::extract<Shape>(roi[1])());
         opt.subarray(start, stop);
-        res.reshapeIfEmpty(volume.taggedShape().resize(stop-start).setChannelDescription("symmetric gradient"), 
+        res.reshapeIfEmpty(volume.taggedShape().resize(stop-start).setChannelDescription("symmetric gradient"),
                  "symmetricGradient(): Output array has wrong shape.");
     }
     else
     {
-        res.reshapeIfEmpty(volume.taggedShape().setChannelDescription("symmetric gradient"), 
+        res.reshapeIfEmpty(volume.taggedShape().setChannelDescription("symmetric gradient"),
                  "symmetricGradient(): Output array has wrong shape.");
     }
-    
+
     {
         PyAllowThreads _pythread;
         symmetricGradientMultiArray(srcMultiArrayRange(volume), destMultiArray(res), opt);
@@ -247,348 +236,387 @@ pythonSymmetricGradientND(NumpyArray<ndim, Singleband<VoxelType> > volume,
 }
 
 template < class VoxelType, unsigned int N >
-NumpyAnyArray 
-pythonHessianOfGaussianND(NumpyArray<N, Singleband<VoxelType> > array,
-                          python::object sigma,
-                          NumpyArray<N, TinyVector<VoxelType, int(N*(N+1)/2)> > res= NumpyArray<N, TinyVector<VoxelType, int(N*(N+1)/2)> >(),
-                          python::object sigma_d = python::object(0.0), 
-                          python::object step_size = python::object(1.0),
-                          double window_size = 0.0, 
-                          python::object roi = python::object())
+NumpyAnyArray
+pythonHessianOfGaussian(NumpyArray<N, Singleband<VoxelType> > array,
+                        python::object sigma,
+                        NumpyArray<N, TinyVector<VoxelType, int(N*(N+1)/2)> > res =
+                                   NumpyArray<N, TinyVector<VoxelType, int(N*(N+1)/2)> >(),
+                        python::object sigma_d = python::object(0.0),
+                        python::object step_size = python::object(1.0),
+                        double window_size = 0.0,
+                        python::object roi = python::object())
 {
     std::string description("Hessian of Gaussian (flattened upper triangular matrix), scale=");
     description += asString(sigma);
-    
+
     pythonScaleParam<N> params(sigma, sigma_d, step_size, "hessianOfGaussian");
     params.permuteLikewise(array);
     ConvolutionOptions<N> opt(params().filterWindowSize(window_size));
-    
+
     if(roi != python::object())
     {
         typedef typename MultiArrayShape<N>::type Shape;
         Shape start = array.permuteLikewise(python::extract<Shape>(roi[0])());
         Shape stop  = array.permuteLikewise(python::extract<Shape>(roi[1])());
         opt.subarray(start, stop);
-        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description),
                "hessianOfGaussian(): Output array has wrong shape.");
     }
     else
     {
-        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
                "hessianOfGaussian(): Output array has wrong shape.");
     }
-    
-    {
-        PyAllowThreads _pythread;
-        hessianOfGaussianMultiArray(srcMultiArrayRange(array), destMultiArray(res), opt);
-    }
-    return res;
-}
 
-#if 0 // FIXME: this is probably no longer needed thanks to axistags
-template < class VoxelType>
-NumpyAnyArray 
-pythonHessianOfGaussian3D(NumpyArray<3, Singleband<VoxelType> > volume,
-                          python::object sigma,
-                          NumpyArray<3, TinyVector<VoxelType, 6> > res=NumpyArray<3, TinyVector<VoxelType, 6> >(),
-                          python::object sigma_d = python::object(0.0), python::object step_size = python::object(1.0))
-{
-    pythonScaleParam<3> params(sigma, sigma_d, step_size, "hessianOfGaussian");
-    params.permuteLikewise(volume);
-    std::string description("Hessian of Gaussian (flattened upper triangular matrix), scale=");
-    description += asString(sigma);
-    
-    res.reshapeIfEmpty(volume.taggedShape().setChannelDescription(description), 
-          "hessianOfGaussian(): Output array has wrong shape.");
-    
     {
         PyAllowThreads _pythread;
-        hessianOfGaussianMultiArray(srcMultiArrayRange(volume), destMultiArray(res), params());
+        hessianOfGaussianMultiArray(srcMultiArrayRange(array), destMultiArray(res), opt);
     }
-    
     return res;
 }
 
-template < class PixelType>
-NumpyAnyArray 
-pythonHessianOfGaussian2D(NumpyArray<2, Singleband<PixelType> > image,
-                          python::object sigma,
-                          NumpyArray<2, TinyVector<PixelType, 3> > res=NumpyArray<2, TinyVector<PixelType, 3> >(),
-                          python::object sigma_d = python::object(0.0), python::object step_size = python::object(1.0))
-{
-    pythonScaleParam<2> params(sigma, sigma_d, step_size, "hessianOfGaussian");
-    params.permuteLikewise(image);
-    std::string description("Hessian of Gaussian (flattened upper triangular matrix), scale=");
-    description += asString(sigma);
-    
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-             "hessianOfGaussian(): Output array has wrong shape.");
-    
-    {
-        PyAllowThreads _pythread;
-        hessianOfGaussianMultiArray(srcMultiArrayRange(image), destMultiArray(res), params());
-    }
-    
-    return res;
-}
-#endif
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyHessianOfGaussian, pythonHessianOfGaussian)
 
 template <class PixelType, unsigned int N>
-NumpyAnyArray 
-pythonStructureTensor(NumpyArray<N, Multiband<PixelType> > array, 
+NumpyAnyArray
+pythonStructureTensor(NumpyArray<N, Multiband<PixelType> > array,
                       python::object innerScale, python::object outerScale,
-                      NumpyArray<N-1, TinyVector<PixelType, int(N*(N-1)/2)> > res=NumpyArray<N-1, TinyVector<PixelType, int(N*(N-1)/2)> >(),
-                      python::object sigma_d = python::object(0.0), 
+                      NumpyArray<N-1, TinyVector<PixelType, int(N*(N-1)/2)> > res=
+                              NumpyArray<N-1, TinyVector<PixelType, int(N*(N-1)/2)> >(),
+                      python::object sigma_d = python::object(0.0),
                       python::object step_size = python::object(1.0),
-                      double window_size = 0.0, 
+                      double window_size = 0.0,
                       python::object roi = python::object())
 {
     using namespace vigra::functor;
     static const int sdim = N - 1;
-    
+
     std::string description("structure tensor (flattened upper triangular matrix), inner scale=");
     description += asString(innerScale) + ", outer scale=" + asString(outerScale);
-    
+
     pythonScaleParam<N-1> params(innerScale, sigma_d, step_size, outerScale, "structureTensor");
     params.permuteLikewise(array);
     ConvolutionOptions<N-1> opt(params().filterWindowSize(window_size));
-    
+
     if(roi != python::object())
     {
         typedef typename MultiArrayShape<N-1>::type Shape;
         Shape start = array.permuteLikewise(python::extract<Shape>(roi[0])());
         Shape stop  = array.permuteLikewise(python::extract<Shape>(roi[1])());
         opt.subarray(start, stop);
-        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().resize(stop-start).setChannelDescription(description),
                      "structureTensor(): Output array has wrong shape.");
     }
     else
     {
-        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
+        res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
                      "structureTensor(): Output array has wrong shape.");
     }
-    
+
     {
         PyAllowThreads _pythread;
 
-        MultiArrayView<sdim, PixelType, StridedArrayTag> band = array.bindOuter(0); 
+        MultiArrayView<sdim, PixelType, StridedArrayTag> band = array.bindOuter(0);
         structureTensorMultiArray(srcMultiArrayRange(band), destMultiArray(res), opt);
-        
+
         if(array.shape(sdim) > 1)
         {
             MultiArray<sdim, TinyVector<PixelType, int(N*(N-1)/2)> > st(res.shape());
-            
+
             for(int b=1; b<array.shape(sdim); ++b)
             {
                 MultiArrayView<sdim, PixelType, StridedArrayTag> band = array.bindOuter(b);
                 structureTensorMultiArray(srcMultiArrayRange(band), destMultiArray(st), opt);
-                combineTwoMultiArrays(srcMultiArrayRange(res), srcMultiArray(st), 
+                combineTwoMultiArrays(srcMultiArrayRange(res), srcMultiArray(st),
                                       destMultiArray(res), Arg1() + Arg2());
             }
         }
     }
-    
+
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyStructureTensor, pythonStructureTensor)
+
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonBoundaryTensor2D(NumpyArray<2, Singleband<SrcPixelType> > image,
                        double scale,
                        NumpyArray<2, TinyVector<DestPixelType, 3> > res = NumpyArray<2, TinyVector<DestPixelType, 3> >())
 {
     std::string description("boundary tensor (flattened upper triangular matrix), scale=");
     description += asString(scale);
-    
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-           "boundaryTensor2D(): Output array has wrong shape.");    
+
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+           "boundaryTensor2D(): Output array has wrong shape.");
 
     {
         PyAllowThreads _pythread;
         boundaryTensor(srcImageRange(image), destImage(res), scale);
     }
-    
+
     return res;
 }
 
+template < class PixelType>
+NumpyAnyArray
+pythonRieszTransformOfLOG2D(NumpyArray<2, Singleband<PixelType> > image,
+                            double scale,
+                            unsigned int xorder, unsigned int yorder,
+                            NumpyArray<2, Singleband<PixelType> > res = NumpyArray<2, Singleband<PixelType> >())
+{
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription("Riesz transform"),
+              "rieszTransformOfLOG2D(): Output array has wrong shape.");
+
+    {
+        PyAllowThreads _pythread;
+        rieszTransformOfLOG(srcImageRange(image), destImage(res), scale, xorder, yorder);
+    }
+
+    return res;
+}
 
 template < class SrcPixelType, typename DestPixelType  >
-NumpyAnyArray 
+NumpyAnyArray
 pythonTensorEigenRepresentation2D(NumpyArray<2, TinyVector<SrcPixelType, 3> > image,
                                   NumpyArray<2, TinyVector<DestPixelType, 3> > res = python::object())
 {
     std::string description("tensor eigen representation (ev1, ev2, angle)");
-    
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-                    "tensorEigenRepresentation2D(): Output array has wrong shape.");    
-    
+
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+                    "tensorEigenRepresentation2D(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         tensorEigenRepresentation(srcImageRange(image), destImage(res));
     }
-    
+
     return res;
 }
 
 // FIXME: generalize to handle non-interleaved representations
 template < class PixelType, unsigned int N >
-NumpyAnyArray 
+NumpyAnyArray
 pythonVectorToTensor(NumpyArray<N, TinyVector<PixelType, int(N)> > array,
                      NumpyArray<N, TinyVector<PixelType, int(N*(N+1)/2)> > res = python::object())
 {
     std::string description("outer product tensor (flattened upper triangular matrix)");
 
-    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
-            "vectorToTensor(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
+            "vectorToTensor(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         vectorToTensorMultiArray(srcMultiArrayRange(array), destMultiArray(res));
     }
-    
+
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyVectorToTensor, pythonVectorToTensor)
+
 // FIXME: generalize to handle non-interleaved representations
 template < class PixelType, unsigned int N >
-NumpyAnyArray 
+NumpyAnyArray
 pythonTensorTrace(NumpyArray<N, TinyVector<PixelType, int(N*(N+1)/2)> > array,
                   NumpyArray<N, Singleband<PixelType> > res = python::object())
 {
     std::string description("tensor trace");
 
-    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
-           "tensorTrace(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
+           "tensorTrace(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         tensorTraceMultiArray(srcMultiArrayRange(array), destMultiArray(res));
     }
-    
+
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyTensorTrace, pythonTensorTrace)
+
 // FIXME: generalize to handle non-interleaved representations
 template < class PixelType, unsigned int N >
-NumpyAnyArray 
+NumpyAnyArray
 pythonTensorDeterminant(NumpyArray<N, TinyVector<PixelType, int(N*(N+1)/2)> > array,
                         NumpyArray<N, Singleband<PixelType> > res = python::object())
 {
     std::string description("tensor determinant");
 
-    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
-                "tensorDeterminant(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
+                "tensorDeterminant(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         tensorDeterminantMultiArray(srcMultiArrayRange(array), destMultiArray(res));
     }
-    
+
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyTensorDeterminant, pythonTensorDeterminant)
+
 // FIXME: generalize to handle non-interleaved representations
 template < class PixelType, unsigned int N >
-NumpyAnyArray 
+NumpyAnyArray
 pythonTensorEigenvalues(NumpyArray<N, TinyVector<PixelType, int(N*(N+1)/2)> > array,
                         NumpyArray<N, TinyVector<PixelType, int(N)> > res = python::object())
 {
     std::string description("tensor eigenvalues");
 
-    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description), 
-                 "tensorEigenvalues(): Output array has wrong shape.");    
-    
+    res.reshapeIfEmpty(array.taggedShape().setChannelDescription(description),
+                 "tensorEigenvalues(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         tensorEigenvaluesMultiArray(srcMultiArrayRange(array), destMultiArray(res));
     }
-    
+
     return res;
 }
 
+VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyTensorEigenvalues, pythonTensorEigenvalues)
+
 template < class SrcPixelType, typename DestPixelType >
-NumpyAnyArray 
+NumpyAnyArray
 pythonHourGlassFilter2D(NumpyArray<2, TinyVector<SrcPixelType, 3> > image,
-                        double sigma, 
+                        double sigma,
                         double rho,
                         NumpyArray<2, TinyVector<DestPixelType, 3> > res = python::object())
 {
     std::string description("hourglass tensor (flattened upper triangular matrix), scale=");
     description += asString(sigma) + ", rho=" + asString(rho);
-    
-    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description), 
-            "hourGlassFilter2D(): Output array has wrong shape.");    
-    
+
+    res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
+            "hourGlassFilter2D(): Output array has wrong shape.");
+
     {
         PyAllowThreads _pythread;
         hourGlassFilter(srcImageRange(image), destImage(res), sigma, rho);
     }
-    
+
     return res;
 }
 
 void defineTensor()
 {
     using namespace python;
-    
+
     docstring_options doc_options(true, true, false);
-    
-    def("gaussianGradient",
-        registerConverters(&pythonGaussianGradientND<float,2>),
-        (arg("image"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the gradient vector by means of a 1st derivative of "
-        "Gaussian filter at the given scale for a 2D scalar image.\n\n"
-        "If 'sigma' is a single value, an isotropic filter at this scale is "
-        "applied (i.e., each dimension is filtered in the same way). "
-        "If 'sigma' is a tuple or list of values, the amount of smoothing "
+
+    multidef("gaussianGradient",
+        pyGaussianGradient<1, 4, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=python::object(),
+         arg("sigma_d")=0.0,
+         arg("step_size")=1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Calculate the gradient vector by means of a 1st derivative of\n"
+        "Gaussian filter at the given scale for a scalar array (up to 4D).\n"
+        "\n"
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
         "will be different for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"        
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
         "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
         "For details see gaussianGradientMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
-    def("gaussianGradient",
-        registerConverters(&pythonGaussianGradientND<float,3>),
-        (arg("volume"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Likewise for a 3D scalar volume.\n");
-
-    def("rieszTransformOfLOG2D",
-        registerConverters(&pythonRieszTransformOfLOG2D<float>),
-        (arg("image"), arg("scale"), arg("xorder"), arg("yorder"),arg("out")=python::object()),
-        "Calculate Riesz transforms of the Laplacian of Gaussian.\n\n"
-        "For details see rieszTransformOfLOG_ in the vigra C++ documentation.\n");
-
-    def("gaussianGradientMagnitude",
-        registerConverters(&pythonGaussianGradientMagnitude<float,3>),
-        (arg("image"), arg("sigma"), arg("accumulate")=true, arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the gradient magnitude by means of a 1st derivative of "
-        "Gaussian filter at the given scale for a 2D scalar or multiband image.\n"
-        "If 'accumulate' is True (the default), the gradients are accumulated (in the "
-        "L2-norm sense) over all  channels of a multi-channel array. Otherwise, "
-        "a separate gradient magnitude is computed for each channel.\n\n"
-        "If 'sigma' is a single value, an isotropic filter at this scale is "
-        "applied (i.e., each dimension is filtered in the same way). "
-        "If 'sigma' is a tuple or list of values, the amount of smoothing "
+    // def("gaussianGradient",
+        // registerConverters(&pythonGaussianGradientND<float,2>),
+        // (arg("image"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the gradient vector by means of a 1st derivative of "
+        // "Gaussian filter at the given scale for a 2D scalar image.\n"
+        // "\n"
+        // "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        // "applied (i.e., each dimension is filtered in the same way).\n"
+        // "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        // "will be different for each spatial dimension.\n"
+        // "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        // "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        // "or list) the distance between two adjacent pixels for each dimension.\n"
+        // "The length of the tuples or lists must be equal to the number of spatial\n"
+        // "dimensions.\n"
+        // "\n"
+        // // "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        // "For details see gaussianGradientMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
+
+    // def("gaussianGradient",
+        // registerConverters(&pythonGaussianGradientND<float,3>),
+        // (arg("volume"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Likewise for a 3D scalar volume.\n");
+
+    multidef("gaussianGradientMagnitude",
+        pyGaussianGradientMagnitude<2, 5, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("accumulate")=true,
+         arg("out")=python::object(),
+         arg("sigma_d")=0.0,
+         arg("step_size")=1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Calculate the gradient magnitude by means of a 1st derivative of\n"
+        "Gaussian filter at the given scale for a scalar or multiband array\n"
+        "with up to 5 dimensions.\n"
+        "\n"
+        "If 'accumulate' is True (the default), the gradients are accumulated\n"
+        "(in the L2-norm sense) over all channels of a multi-channel array.\n"
+        "Otherwise, a separate gradient magnitude is computed for each channel.\n"
+        "\n"
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
         "will be different for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"        
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
         "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
         "For details see gaussianGradientMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
-    def("gaussianGradientMagnitude",
-        registerConverters(&pythonGaussianGradientMagnitude<float,4>),
-        (arg("volume"), arg("sigma"), arg("accumulate")=true, arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Likewise for a 3D scalar or multiband volume.\n");
+    // def("gaussianGradientMagnitude",
+        // registerConverters(&pythonGaussianGradientMagnitude<float,3>),
+        // (arg("image"), arg("sigma"), arg("accumulate")=true, arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the gradient magnitude by means of a 1st derivative of "
+        // "Gaussian filter at the given scale for a 2D scalar or multiband image.\n"
+        // "If 'accumulate' is True (the default), the gradients are accumulated (in the "
+        // "L2-norm sense) over all  channels of a multi-channel array. Otherwise, "
+        // "a separate gradient magnitude is computed for each channel.\n"
+        // "\n"
+        // "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        // "applied (i.e., each dimension is filtered in the same way).\n"
+        // "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        // "will be different for each spatial dimension.\n"
+        // "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        // "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        // "or list) the distance between two adjacent pixels for each dimension.\n"
+        // "The length of the tuples or lists must be equal to the number of spatial\n"
+        // "dimensions.\n"
+        // "\n"
+        // "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        // "For details see gaussianGradientMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
+
+    // def("gaussianGradientMagnitude",
+        // registerConverters(&pythonGaussianGradientMagnitude<float,4>),
+        // (arg("volume"), arg("sigma"), arg("accumulate")=true, arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Likewise for a 3D scalar or multiband volume.\n");
 
     def("symmetricGradient",
         registerConverters(&pythonSymmetricGradientND<float,2>),
@@ -597,108 +625,172 @@ void defineTensor()
         "\n"
         "The optional tuple or list 'step_size' denotes the distance between two "
         "adjacent pixels for each dimension; its length must be equal to the "
-        "number of spatial dimensions.\n\n"        
+        "number of spatial dimensions.\n\n"
         "'roi' has the same meaning as in :func:`gaussianSmoothing`.\n\n"
         "For details see symmetricGradientMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
     def("symmetricGradient",
-        registerConverters(&pythonSymmetricGradientND<float,3>), 
+        registerConverters(&pythonSymmetricGradientND<float,3>),
         (arg("volume"), arg("out")=python::object(), arg("step_size")=1.0, arg("roi")=python::object()),
         "Likewise for a 3D scalar volume.\n");
-    
-    // FIXME: is this function still needed?
-    def("hessianOfGaussian2D",
-        registerConverters(&pythonHessianOfGaussianND<float, 2>),
-        (arg("image"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the Hessian matrix by means of a derivative of "
-        "Gaussian filters at the given scale for a 2D scalar image.\n"
-        "\n"
-        "If 'sigma' is a single value, an isotropic filter at this scale is "
-        "applied (i.e., each dimension is filtered in the same way). "
-        "If 'sigma' is a tuple or list of values, the amount of smoothing "
-        "will be different for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"        
-        "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
-        "For details see hessianOfGaussianMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
-
-    // FIXME: is this function still needed?
-    def("hessianOfGaussian3D",
-        registerConverters(&pythonHessianOfGaussianND<float, 3>),
-        (arg("volume"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the Hessian matrix by means of a derivative of "
-        "Gaussian filters at the given scale for a 3D scalar image.\n"
-        "\n"
-        "For details see hessianOfGaussianMultiArray_ in the vigra C++ documentation.\n");
 
-    def("hessianOfGaussian",
-        registerConverters(&pythonHessianOfGaussianND<float,2>),
-        (arg("image"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the Hessian matrix by means of a derivative of "
-        "Gaussian filters at the given scale for a 2D scalar image.\n"
+    multidef("hessianOfGaussian",
+        pyHessianOfGaussian<2, 4, float, double>().installFallback(),
+        (arg("array"),
+         arg("sigma"),
+         arg("out")=python::object(),
+         arg("sigma_d")=0.0,
+         arg("step_size")=1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Calculate the Hessian matrix by means of 2nd derivative of Gaussian\n"
+        "filters at the given scale for scalar arrays up to 4D.\n"
+        "The result has N*(N+1)/2 channels representing the flattened upper\n"
+        "triangular part of the Hessian (N is the dimension of the input).\n"
         "\n"
-        "If 'sigma' is a single value, an isotropic filter at this scale is "
-        "applied (i.e., each dimension is filtered in the same way). "
-        "If 'sigma' is a tuple or list of values, the amount of smoothing "
+        "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        "applied (i.e., each dimension is filtered in the same way).\n"
+        "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
         "will be different for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"        
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        "or list) the distance between two adjacent pixels for each dimension.\n"
+        "The length of the tuples or lists must be equal to the number of spatial\n"
+        "dimensions.\n"
+        "\n"
         "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
         "For details see hessianOfGaussianMultiArray_ in the vigra C++ documentation.\n");
 
-    def("hessianOfGaussian",
-        registerConverters(&pythonHessianOfGaussianND<float,3>),
-        (arg("volume"), arg("sigma"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Likewise for a 3D scalar or multiband volume.\n");
-
-    def("structureTensor",
-        registerConverters(&pythonStructureTensor<float,3>),
-        (arg("image"), arg("innerScale"), arg("outerScale"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Calculate the structure tensor of an image by means of Gaussian "
-        "(derivative) filters at the given scales. If the input has multiple channels, "
-        "the structure tensors of each channel are added to get the result.\n\n"
-        "If 'innerScale' and 'outerScale' are single values, "
-        "isotropic filters at these scales are "
-        "applied (i.e., each dimension is filtered in the same way). "
-        "If 'innerScale' and / or 'outerScale' are are tuples or lists of "
-        "values, the amount of smoothing "
-        "will be different for each spatial dimension.\n"
-        "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
-        "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
-        "pixels for each dimension. "
-        "The length of the tuples or lists must be equal to the "
-        "number of spatial dimensions.\n\n"        
+    // // FIXME: is this function still needed?
+    // def("hessianOfGaussian2D",
+        // registerConverters(&pythonHessianOfGaussianND<float, 2>),
+        // (arg("image"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the Hessian matrix by means of a derivative of "
+        // "Gaussian filters at the given scale for a 2D scalar image.\n"
+        // "\n"
+        // "If 'sigma' is a single value, an isotropic filter at this scale is "
+        // "applied (i.e., each dimension is filtered in the same way). "
+        // "If 'sigma' is a tuple or list of values, the amount of smoothing "
+        // "will be different for each spatial dimension.\n"
+        // "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
+        // "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
+        // "pixels for each dimension. "
+        // "The length of the tuples or lists must be equal to the "
+        // "number of spatial dimensions.\n\n"
+        // "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        // "For details see hessianOfGaussianMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
+
+    // // FIXME: is this function still needed?
+    // def("hessianOfGaussian3D",
+        // registerConverters(&pythonHessianOfGaussianND<float, 3>),
+        // (arg("volume"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the Hessian matrix by means of a derivative of "
+        // "Gaussian filters at the given scale for a 3D scalar image.\n"
+        // "\n"
+        // "For details see hessianOfGaussianMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("hessianOfGaussian",
+        // registerConverters(&pythonHessianOfGaussianND<float,2>),
+        // (arg("image"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the Hessian matrix by means of a derivative of "
+        // "Gaussian filters at the given scale for a 2D scalar image.\n"
+        // "\n"
+        // "If 'sigma' is a single value, an isotropic filter at this scale is\n"
+        // "applied (i.e., each dimension is filtered in the same way).\n"
+        // "If 'sigma' is a tuple or list of values, the amount of smoothing\n"
+        // "will be different for each spatial dimension.\n"
+        // "The optional 'sigma_d' (single, tuple, or list) denotes the PSF\n"
+        // "standard deviation per axis, the optional 'step_size' (single, tuple,\n"
+        // "or list) the distance between two adjacent pixels for each dimension.\n"
+        // "The length of the tuples or lists must be equal to the number of spatial\n"
+        // "dimensions.\n"
+        // "\n"
+        // "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        // "For details see hessianOfGaussianMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("hessianOfGaussian",
+        // registerConverters(&pythonHessianOfGaussianND<float,3>),
+        // (arg("volume"), arg("sigma"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Likewise for a 3D scalar or multiband volume.\n");
+
+    multidef("structureTensor",
+        pyStructureTensor<3, 5, float, double>().installFallback(),
+        (arg("array"),
+         arg("innerScale"),
+         arg("outerScale"),
+         arg("out")=python::object(),
+         arg("sigma_d")=0.0,
+         arg("step_size")=1.0,
+         arg("window_size")=0.0,
+         arg("roi")=python::object()),
+        "\n"
+        "Calculate the structure tensor of an array (up to 5D) by means of Gaussian\n"
+        "(derivative) filters at the given scales. If the input has multiple channels,\n"
+        "the structure tensors of each channel are added to get the result.\n"
+        "The result has N*(N+1)/2 channels representing the flattened upper\n"
+        "triangular part of the structure tensor (N is the dimension of the input).\n"
+        "\n"
+        "If 'innerScale' and 'outerScale' are single values, isotropic filters at\n"
+        "these scales are applied (i.e., each dimension is filtered in the same way).\nS"
+        "If 'innerScale' and/or 'outerScale' are are tuples or lists of values,\n"
+        "the amount of smoothing will be different for each spatial dimension.\n"
+        "The optional 'sigma_d' (single, tuple, or list) denotes the PSF standard\n"
+        "deviation per axis, the optional 'step_size' (single, tuple, or list) the\n"
+        "distance between two adjacent pixels for each dimension. The length of the\n"
+        "tuples or lists must be equal to the number of spatial dimensions.\n"
+        "\n"
         "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
         "For details see structureTensorMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
 
-    def("structureTensor",
-        registerConverters(&pythonStructureTensor<float,4>),
-        (arg("volume"), arg("innerScale"), arg("outerScale"), arg("out")=python::object(), 
-         arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
-        "Likewise for a 3D scalar or multiband volume.\n");
+    // def("structureTensor",
+        // registerConverters(&pythonStructureTensor<float,3>),
+        // (arg("image"), arg("innerScale"), arg("outerScale"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Calculate the structure tensor of an image by means of Gaussian "
+        // "(derivative) filters at the given scales. If the input has multiple channels, "
+        // "the structure tensors of each channel are added to get the result.\n\n"
+        // "If 'innerScale' and 'outerScale' are single values, "
+        // "isotropic filters at these scales are "
+        // "applied (i.e., each dimension is filtered in the same way). "
+        // "If 'innerScale' and / or 'outerScale' are are tuples or lists of "
+        // "values, the amount of smoothing "
+        // "will be different for each spatial dimension.\n"
+        // "The optional 'sigma_d' (single, tuple, or list) denotes the resolution standard deviation "
+        // "per axis, the optional 'step_size' (single, tuple, or list) the distance between two adjacent "
+        // "pixels for each dimension. "
+        // "The length of the tuples or lists must be equal to the "
+        // "number of spatial dimensions.\n\n"
+        // "'window_size' and 'roi' have the same meaning as in :func:`gaussianSmoothing`.\n\n"
+        // "For details see structureTensorMultiArray_ and ConvolutionOptions_ in the vigra C++ documentation.\n");
+
+    // def("structureTensor",
+        // registerConverters(&pythonStructureTensor<float,4>),
+        // (arg("volume"), arg("innerScale"), arg("outerScale"), arg("out")=python::object(),
+         // arg("sigma_d")=0.0, arg("step_size")=1.0, arg("window_size")=0.0, arg("roi")=python::object()),
+        // "Likewise for a 3D scalar or multiband volume.\n");
 
     def("boundaryTensor2D",
         registerConverters(&pythonBoundaryTensor2D<float, float>),
         (arg("image"), arg("scale"),arg("out")=python::object()),
         "Calculate the boundary tensor for a scalar valued 2D image."
         "For details see boundaryTensor_ in the vigra C++ documentation.\n");
-        
+
+    def("rieszTransformOfLOG2D",
+        registerConverters(&pythonRieszTransformOfLOG2D<float>),
+        (arg("image"), arg("scale"), arg("xorder"), arg("yorder"),arg("out")=python::object()),
+        "Calculate Riesz transforms of the Laplacian of Gaussian.\n\n"
+        "For details see rieszTransformOfLOG_ in the vigra C++ documentation.\n");
+
     /** FIXME: Export of Kernel2D before
     def("gradientEnergyTensor2D",
         registerConverters(&gradientEnergyTensor2D<float,float>),
         (arg("image"), arg("derivKernel"), arg("smoothKernel"),arg("out")=python::object()));
-        
+
     */
 
     def("tensorEigenRepresentation2D",
@@ -708,57 +800,102 @@ void defineTensor()
         "For details see tensorEigenRepresentation_ in the vigra C++ documentation.\n"
         );
 
-    def("vectorToTensor",
-        registerConverters(&pythonVectorToTensor<float,2>),
-        (arg("image"),arg("out")=python::object()),
-        "Turn a 2D vector valued image (e.g. the gradient image) into "
-        "a tensor image by computing the outer product in every pixel.\n\n"
+    multidef("vectorToTensor",
+        pyVectorToTensor<2, 3, float, double>().installFallback(),
+        (arg("array"),
+         arg("out")=python::object()),
+        "\n"
+        "Turn a vector valued 2D or 3D array (e.g. the gradient array) into "
+        "a tensor array by computing the outer product in every pixel.\n"
+        "\n"
         "For details see vectorToTensorMultiArray_ in the vigra C++ documentation.\n");
 
-    def("vectorToTensor",
-        registerConverters(&pythonVectorToTensor<float,3>),
-        (arg("volume"),arg("out")=python::object()),
-        "Likewise for a 3D vector-valued volume.\n");
-
-    def("tensorTrace",
-        registerConverters(&pythonTensorTrace<float,2>),
-        (arg("image"),arg("out")=python::object()),
-        "Calculate the trace of a 2x2 tensor image.\n\n"
+    // def("vectorToTensor",
+        // registerConverters(&pythonVectorToTensor<float,2>),
+        // (arg("image"),arg("out")=python::object()),
+        // "Turn a 2D vector valued image (e.g. the gradient image) into "
+        // "a tensor image by computing the outer product in every pixel.\n\n"
+        // "For details see vectorToTensorMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("vectorToTensor",
+        // registerConverters(&pythonVectorToTensor<float,3>),
+        // (arg("volume"),arg("out")=python::object()),
+        // "Likewise for a 3D vector-valued volume.\n");
+
+    multidef("tensorTrace",
+        pyTensorTrace<2, 3, float, double>().installFallback(),
+        (arg("array"),
+         arg("out")=python::object()),
+        "\n"
+        "Calculate the elementwise trace of an array which stores the flattened\n"
+        "upper triangular part of a symmetric tensor in each element (e.g. the\n"
+        "output of :func:`structureTensor`).\n"
+        "\n"
         "For details see tensorTraceMultiArray_ in the vigra C++ documentation.\n");
 
-    def("tensorTrace",
-        registerConverters(&pythonTensorTrace<float,3>),
-        (arg("volume"),arg("out")=python::object()),
-        "Likewise for a 3x3 tensor volume.\n");
-
-    def("tensorDeterminant",
-        registerConverters(&pythonTensorDeterminant<float,2>),
-        (arg("image"),arg("out")=python::object()),
-        "Calculate the determinant of a 2x2 tensor image.\n\n"
+    // def("tensorTrace",
+        // registerConverters(&pythonTensorTrace<float,2>),
+        // (arg("image"),arg("out")=python::object()),
+        // "Calculate the trace of a 2x2 tensor image.\n\n"
+        // "For details see tensorTraceMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("tensorTrace",
+        // registerConverters(&pythonTensorTrace<float,3>),
+        // (arg("volume"),arg("out")=python::object()),
+        // "Likewise for a 3x3 tensor volume.\n");
+
+    multidef("tensorDeterminant",
+        pyTensorDeterminant<2, 3, float, double>().installFallback(),
+        (arg("image"),
+         arg("out")=python::object()),
+        "\n"
+        "Calculate the elementwise determinant of an array which stores the flattened\n"
+        "upper triangular part of a symmetric tensor in each element (e.g. the\n"
+        "output of :func:`structureTensor`).\n"
+        "\n"
         "For details see tensorDeterminantMultiArray_ in the vigra C++ documentation.\n");
 
-    def("tensorDeterminant",
-        registerConverters(&pythonTensorDeterminant<float,3>),
-        (arg("volume"),arg("out")=python::object()),
-        "Likewise for a 3x3 tensor volume.\n");
-
-    def("tensorEigenvalues",
-        registerConverters(&pythonTensorEigenvalues<float,2>),
-        (arg("image"),arg("out")=python::object()),
-        "Calculate the eigenvalues in each pixel/voxel of a 2x2 tensor image.\n\n"
+    // def("tensorDeterminant",
+        // registerConverters(&pythonTensorDeterminant<float,2>),
+        // (arg("image"),arg("out")=python::object()),
+        // "Calculate the determinant of a 2x2 tensor image.\n\n"
+        // "For details see tensorDeterminantMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("tensorDeterminant",
+        // registerConverters(&pythonTensorDeterminant<float,3>),
+        // (arg("volume"),arg("out")=python::object()),
+        // "Likewise for a 3x3 tensor volume.\n");
+
+    multidef("tensorEigenvalues",
+        pyTensorEigenvalues<2, 3, float, double>().installFallback(),
+        (arg("image"),
+         arg("out")=python::object()),
+        "\n"
+        "Calculate the eigenvalues in every element of an array which stores the\n"
+        "flattened upper triangular part of a symmetric tensor in each element\n"
+        "(e.g. the output of :func:`structureTensor`).\n"
+        "The result has as many channels (= eigenvalues) as the spatial dimension\n"
+        "of the input.\n"
+        "\n"
         "For details see tensorEigenvaluesMultiArray_ in the vigra C++ documentation.\n");
 
-    def("tensorEigenvalues",
-        registerConverters(&pythonTensorEigenvalues<float,3>),
-        (arg("volume"),arg("out")=python::object()),
-        "Likewise for a 3x3 tensor volume.\n");
+    // def("tensorEigenvalues",
+        // registerConverters(&pythonTensorEigenvalues<float,2>),
+        // (arg("image"),arg("out")=python::object()),
+        // "Calculate the eigenvalues in each pixel/voxel of a 2x2 tensor image.\n\n"
+        // "For details see tensorEigenvaluesMultiArray_ in the vigra C++ documentation.\n");
+
+    // def("tensorEigenvalues",
+        // registerConverters(&pythonTensorEigenvalues<float,3>),
+        // (arg("volume"),arg("out")=python::object()),
+        // "Likewise for a 3x3 tensor volume.\n");
 
     def("hourGlassFilter2D",
         registerConverters(&pythonHourGlassFilter2D<float,float>),
         (arg("image"), arg("sigma"), arg("rho"),arg("out")=python::object()),
         "Anisotropic tensor smoothing with the hourglass filter. \n\n"
         "For details see hourGlassFilter_ in the vigra C++ documentation.\n");
- 
+
  /* Wee, tons of errors here
     def("ellipticGaussian2D",
         registerConverters(&ellipticGaussian2D<float,float>),
diff --git a/vigranumpy/src/core/vigranumpycore.cxx b/vigranumpy/src/core/vigranumpycore.cxx
index ed7effb..ec38d36 100644
--- a/vigranumpy/src/core/vigranumpycore.cxx
+++ b/vigranumpy/src/core/vigranumpycore.cxx
@@ -52,8 +52,18 @@ namespace vigra {
 
 UInt32 pychecksum(python::str const & s)
 {
-    unsigned int size = len(s);
-    return checksum(PyString_AsString(s.ptr()), size);
+#if PY_MAJOR_VERSION < 3
+	unsigned int size = len(s);
+	return checksum(PyString_AsString(s.ptr()), size);
+#elif (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION < 3)
+	Py_ssize_t size = PyUnicode_GET_DATA_SIZE(s.ptr());
+	const char * data = PyUnicode_AS_DATA(s.ptr());
+	return checksum(data, size);
+#else
+	Py_ssize_t size = 0;
+	char * data = PyUnicode_AsUTF8AndSize(s.ptr(), &size);
+	return checksum(data, size);
+#endif
 }
 
 void registerNumpyArrayConverters();
@@ -67,8 +77,9 @@ using namespace vigra;
 
 BOOST_PYTHON_MODULE_INIT(vigranumpycore)
 {
-    import_array();
-    registerNumpyArrayConverters();
+	if (_import_array() < 0)
+		pythonToCppException(0);
+	registerNumpyArrayConverters();
     defineAxisTags();
     defineChunkedArray();
     
diff --git a/vigranumpy/src/fourier/CMakeLists.txt b/vigranumpy/src/fourier/CMakeLists.txt
index 7f9f3dc..ed2c1b2 100644
--- a/vigranumpy/src/fourier/CMakeLists.txt
+++ b/vigranumpy/src/fourier/CMakeLists.txt
@@ -1,11 +1,10 @@
 INCLUDE_DIRECTORIES(${VIGRANUMPY_INCLUDE_DIRS} ${FFTW3_INCLUDE_DIR})
 
-VIGRA_CONFIGURE_THREADING()
-
-VIGRA_ADD_NUMPY_MODULE(fourier 
+VIGRA_ADD_NUMPY_MODULE(fourier
   SOURCES
     fourier.cxx
-  LIBRARIES   
+  LIBRARIES
     ${FFTW3_LIBRARIES}
     ${FFTW3F_LIBRARIES}
-  VIGRANUMPY)   
+    ${VIGRANUMPY_THREAD_LIBRARIES}
+  VIGRANUMPY)
diff --git a/vigranumpy/test/CMakeLists.txt b/vigranumpy/test/CMakeLists.txt
index 655f6d5..58307a6 100644
--- a/vigranumpy/test/CMakeLists.txt
+++ b/vigranumpy/test/CMakeLists.txt
@@ -10,6 +10,7 @@ SET(TEST_SCRIPTS
     test4.py
     test_color.py
     test_segmentation.py
+    test_multidef.py
     )
 
 # setup the file 'testsuccess.cxx' which will become out-of-date when the
diff --git a/vigranumpy/test/test1.py b/vigranumpy/test/test1.py
index 023ed4f..bd63937 100644
--- a/vigranumpy/test/test1.py
+++ b/vigranumpy/test/test1.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #                                                                      
 #         Copyright 2009-2010 by Ullrich Koethe                        
 #                                                                      
@@ -33,9 +33,10 @@
 #                                                                      
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 from nose.tools import assert_equal, raises, assert_raises
 import vigra
@@ -190,7 +191,6 @@ def test_Region2Crack():
 def test_transforms():
     res = distanceTransform2D(img_scalar_f)
     checkShape(img_scalar_i, res)
-    #print >> sys.stderr, res.dtype,
     checkType(res, np.float32)
     
     res = distanceTransform2D(img_scalar_ui8)
@@ -275,5 +275,5 @@ def test_edges():
     
     
 def ok_():
-    print >> sys.stderr, ".",
+    print(".", file=sys.stderr)
         
diff --git a/vigranumpy/test/test2.py b/vigranumpy/test/test2.py
index 0717bc9..dd23543 100644
--- a/vigranumpy/test/test2.py
+++ b/vigranumpy/test/test2.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #
 #         Copyright 2009-2010 by Ullrich Koethe
 #
@@ -33,9 +33,10 @@
 #
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 from nose.tools import assert_equal, raises
 import numpy as np
diff --git a/vigranumpy/test/test3.py b/vigranumpy/test/test3.py
index 2e48369..e13dd81 100644
--- a/vigranumpy/test/test3.py
+++ b/vigranumpy/test/test3.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #                                                                      
 #         Copyright 2009-2010 by Ullrich Koethe                        
 #                                                                      
@@ -33,12 +33,13 @@
 #                                                                      
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 from nose.tools import assert_equal, raises
-from vigra import numpy as np
+import numpy as np
 from vigra import *
 from vigra import arraytypes as at
 from vigra.filters import *
diff --git a/vigranumpy/test/test4.py b/vigranumpy/test/test4.py
index c0bb2f0..bf89970 100644
--- a/vigranumpy/test/test4.py
+++ b/vigranumpy/test/test4.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #                                                                      
 #         Copyright 2009-2010 by Ullrich Koethe                        
 #                                                                      
@@ -33,16 +33,18 @@
 #                                                                      
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 
 from nose.tools import assert_equal, raises
-from vigra import numpy as np
-from vigra import numpy as numpy
+import numpy as np
+import numpy as numpy
 from vigra import graphs as vigraph
 from vigra import graphs,taggedView
+#from vigra.arraytypes import taggedView
 import vigra
 
 def testGridGraphSegmentationFelzenszwalbSegmentation():
diff --git a/vigranumpy/test/test_arraytypes.py b/vigranumpy/test/test_arraytypes.py
index 5974b9d..c5764cd 100644
--- a/vigranumpy/test/test_arraytypes.py
+++ b/vigranumpy/test/test_arraytypes.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #
 #         Copyright 2009-2010 by Ullrich Koethe
 #
@@ -36,10 +36,12 @@
 # run with a simple 'nosetests' in this directory
 # (and nose installed, i.e. 'easy_install nose')
 
-import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+from __future__ import division, print_function
+from functools import reduce
 
+import sys
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 # import vigra  # FIXME: without this line, C++ constructors don't find VigraArray
 import vigra.arraytypes as arraytypes
 import vigra.ufunc as ufunc
@@ -49,11 +51,21 @@ from nose.tools import assert_equal, raises, assert_true
 
 from vigra.arraytypes import AxisTags, AxisInfo
 
+if sys.version_info[0] > 2:
+    def xrange(*args):
+        return range(*args)
+
+    def iteritems(dictionary, **kwargs):
+        return dictionary.items(**kwargs)
+else:
+    def iteritems(dictionary, **kwargs):
+        return dictionary.iteritems(**kwargs)
+
 numpyHasComplexNegateBug = numpy.version.version.startswith('1.0')
 
 try:
     vt.testAny()
-except Exception, e:
+except Exception as e:
     ArgumentError = type(e)
 
 allTests = set()
@@ -85,8 +97,6 @@ def checkArray(cls, channels, dim, hasChannelAxis=True):
             assert_equal(sys.getrefcount(b), 2)
             assert b.__class__ is img.__class__
             assert_equal(b.shape, img.shape)
-            # print b.shape, img.shape, b.strides, img.strides
-            # assert False
             assert_equal(b.strides, img.strides)
             assert_equal(b.order, img.order)
             assert_equal(b.flags.c_contiguous, img.flags.c_contiguous)
@@ -171,7 +181,7 @@ def checkArray(cls, channels, dim, hasChannelAxis=True):
         else:
             try:
                 img.withAxes('y', 'z', 'x')
-                raise AssertionError, "img.withAxes() failed to throw on non-singleton channel."
+                raise AssertionError("img.withAxes() failed to throw on non-singleton channel.")
             except RuntimeError:
                 pass
         # FIXME: add more tests
@@ -183,7 +193,7 @@ def checkArray(cls, channels, dim, hasChannelAxis=True):
         assert_equal(img.min(), 99.0)
         assert_equal(img.max(), 99.0)
         img.flat[:] = range(img.size)
-        assert_equal(img.flatten().tolist(), range(img.size))
+        assert_equal(img.flatten().tolist(), list(range(img.size)))
         img[1,2] = value
         assert_equal((img[1,2]==value).all(), True)
 
@@ -356,7 +366,7 @@ def checkFailure(obj, n):
         f(obj)
     except ArgumentError:
         return
-    raise AssertionError, "%r did not throw ArgumentError as expected when passed a %r with shape %s, stride %s, axistags '%s'" % (n, type(obj), str(obj.shape), str(obj.strides), repr(getattr(obj, "axistags", "none")))
+    raise AssertionError("%r did not throw ArgumentError as expected when passed a %r with shape %s, stride %s, axistags '%s'" % (n, type(obj), str(obj.shape), str(obj.strides), repr(getattr(obj, "axistags", "none"))))
 
 def checkCompatibility(obj, compatible):
     for n in compatible:
@@ -364,7 +374,6 @@ def checkCompatibility(obj, compatible):
             f = getattr(vt, n)
             shape, acopy, default_ordering, same_ordering = f(obj)
 
-
             assert_equal(obj.shape, shape)
 
             assert_equal(obj.__class__, acopy.__class__)
@@ -421,10 +430,10 @@ def checkCompatibility(obj, compatible):
                             assert(dobj.view(numpy.ndarray) == default_ordering.view(numpy.ndarray)).all()
                         else:
                             assert_equal(dshape + (1,), default_ordering.shape)
-                            assert(fobj.view(numpy.ndarray) == default_ordering[...,0].view(numpy.ndarray)).all()
+                            assert(dobj.view(numpy.ndarray) == default_ordering[...,0].view(numpy.ndarray)).all()
         except Exception:
-            print "exception in %s with shape %s strides %s tags (%s)" % (n, obj.shape, obj.strides,
-                                            repr(getattr(obj, "axistags", "none")))
+            print("exception in %s with shape %s strides %s tags (%s)" % (n, obj.shape, obj.strides,
+                                            repr(getattr(obj, "axistags", "none"))))
             raise
 
     incompatible = allTests.difference(compatible)
@@ -433,9 +442,9 @@ def checkCompatibility(obj, compatible):
         try:
             checkFailure(obj, n)
         except Exception:
-            print "exception in %s with shape %s strides %s tags (%s)" % (n, obj.shape, obj.strides,
-                                            repr(getattr(obj, "axistags", "none")))
-            raise
+            print("exception in %s with shape %s strides %s tags (%s)" % (n, obj.shape, obj.strides,
+                                            repr(getattr(obj, "axistags", "none"))))
+            raise     
 
 def testAxisTags():
     axistags = AxisTags(AxisInfo.c(description="RGB"),
@@ -1186,7 +1195,7 @@ def testTaggedShape():
 
     try:
         r = arraytypes.taggedView(a, 'cxy', order='C')
-        raise AssertionError, "arraytypes.taggedView() failed to throw."
+        raise AssertionError("arraytypes.taggedView() failed to throw.")
     except RuntimeError:
         pass
 
@@ -1211,13 +1220,13 @@ def testTaggedShape():
 
     try:
         r = arraytypes.taggedView(a, 'xcz')
-        raise AssertionError, "arraytypes.taggedView() failed to throw."
+        raise AssertionError("arraytypes.taggedView() failed to throw.")
     except RuntimeError:
         pass
 
     try:
         r = arraytypes.taggedView(a, 'xcz', force=True)
-        raise AssertionError, "arraytypes.taggedView() failed to throw."
+        raise AssertionError("arraytypes.taggedView() failed to throw.")
     except RuntimeError:
         pass
 
@@ -1438,7 +1447,7 @@ def testMethods():
 
     assert_equal(a.mean(dtype=numpy.longdouble), (a.size - 1.0) / 2.0)
     assert (a.mean(axis='y', dtype=numpy.longdouble) ==
-            range((a.size-a.shape[0])/2, (a.size+a.shape[0])/2)).all()
+            range((a.size-a.shape[0])//2, (a.size+a.shape[0])//2)).all()
 
     assert_equal(a.min(), 0)
     assert (a.min(axis='y') == range(a.shape[0])).all()
@@ -1533,7 +1542,7 @@ def testUfuncs():
     for t in types:
         arrays[t] = arraytypes.ScalarImage((2,2), t, value=2)
         ones[t] = arraytypes.ScalarImage((1,1), t, value=1)
-    for t, a in arrays.iteritems():
+    for t, a in iteritems(arrays):
         b = -a
         assert_equal(t, b.dtype)
         assert_equal(a.axistags, b.axistags)
diff --git a/vigranumpy/test/test_color.py b/vigranumpy/test/test_color.py
index e2b4947..5551a97 100644
--- a/vigranumpy/test/test_color.py
+++ b/vigranumpy/test/test_color.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #                                                                      
 #         Copyright 2009-2013 by Ullrich Koethe and Thorben Kroeger
 #                                                                      
@@ -33,9 +33,10 @@
 #                                                                      
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 from nose.tools import assert_equal, raises, assert_raises, assert_true
 import vigra
diff --git a/vigranumpy/test/test_impex.py b/vigranumpy/test/test_impex.py
index ef4e4b2..14c31c0 100644
--- a/vigranumpy/test/test_impex.py
+++ b/vigranumpy/test/test_impex.py
@@ -1,4 +1,4 @@
-#######################################################################
+#######################################################################
 #                                                                      
 #         Copyright 2009-2010 by Ullrich Koethe                        
 #                                                                      
@@ -33,9 +33,10 @@
 #                                                                      
 #######################################################################
 
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 from nose.tools import assert_equal, raises
 import numpy as np
@@ -97,7 +98,7 @@ def test_writeAndReadImageHDF5():
     try:
         import h5py
     except:
-        print "Warning: 'import h5py' failed, not executing HDF5 import/export tests"
+        print("Warning: 'import h5py' failed, not executing HDF5 import/export tests")
         return
     
     # positive tests
diff --git a/vigranumpy/test/test_multidef.py b/vigranumpy/test/test_multidef.py
new file mode 100644
index 0000000..a993a22
--- /dev/null
+++ b/vigranumpy/test/test_multidef.py
@@ -0,0 +1,582 @@
+#######################################################################
+#                                                                      
+#         Copyright 2015-2016 by Ullrich Koethe and Philip Schill      
+#                                                                      
+#    This file is part of the VIGRA computer vision library.           
+#    The VIGRA Website is                                              
+#        http://hci.iwr.uni-heidelberg.de/vigra/                       
+#    Please direct questions, bug reports, and contributions to        
+#        ullrich.koethe at iwr.uni-heidelberg.de    or                    
+#        vigra at informatik.uni-hamburg.de                               
+#                                                                      
+#    Permission is hereby granted, free of charge, to any person       
+#    obtaining a copy of this software and associated documentation    
+#    files (the "Software"), to deal in the Software without           
+#    restriction, including without limitation the rights to use,      
+#    copy, modify, merge, publish, distribute, sublicense, and/or      
+#    sell copies of the Software, and to permit persons to whom the    
+#    Software is furnished to do so, subject to the following          
+#    conditions:                                                       
+#                                                                      
+#    The above copyright notice and this permission notice shall be    
+#    included in all copies or substantial portions of the             
+#    Software.                                                         
+#                                                                      
+#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND    
+#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES   
+#    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND          
+#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT       
+#    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,      
+#    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING      
+#    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR     
+#    OTHER DEALINGS IN THE SOFTWARE.                                   
+#                                                                      
+#######################################################################
+
+from __future__ import division, print_function
+import sys
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
+
+from nose.tools import assert_equal, raises, assert_raises
+import vigra
+import numpy as np
+
+def checkAboutSame(i1,i2):
+    assert(i1.shape==i2.shape)
+    difference=np.sum(np.abs(i1-i2))/float(np.size(i1))
+    assert(difference<5)
+
+def checkEqual(i1, i2):
+    assert(i1.shape==i2.shape)
+    assert((i1==i2).all())
+
+def test_convexHull():
+    points = np.array([[0, 0], [2, 0], [2, 1], [0, 1], [1, 0.5]], dtype=np.float32)
+    res = np.array([[0, 0], [0, 1], [2, 1], [2, 0], [0, 0]], dtype=np.float32)
+    res = vigra.taggedView(res)
+    hull = vigra.geometry.convexHull(points)
+    checkAboutSame(hull, res)
+    hull = vigra.geometry.convexHull(vigra.taggedView(points))
+    checkAboutSame(res, hull)
+    assert_raises(ValueError, vigra.geometry.convexHull, points.transpose())
+    assert_raises(ValueError, vigra.geometry.convexHull, points.astype(np.uint8))
+    assert_raises(ValueError, vigra.geometry.convexHull, 0, "a")
+
+def test_convolveOneDimension():
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 0, 1, 0, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.float64)
+    imc = np.array([[0, 0, 0, 0, 0],
+                    [0, 1, 2, 3, 0],
+                    [0, 0, 0, 0, 0]], dtype=np.float64)
+    k = vigra.filters.explictKernel(-1, 1, np.array([1, 2, 3], dtype=np.float64))
+    res = vigra.filters.convolveOneDimension(im, 0, k)
+    checkAboutSame(res, imc)
+    assert_raises(ValueError, vigra.filters.convolveOneDimension, im)
+    assert_raises(ValueError, vigra.filters.convolveOneDimension, im, 0)
+    assert_raises(ValueError, vigra.filters.convolveOneDimension, im.astype(np.uint8), 0, k)
+    assert_raises(ValueError, vigra.filters.convolveOneDimension, [0, 1], 0, k)
+
+def test_convolve():
+    # Test convolve with a 2D kernel.
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 0, 0, 0, 0],
+                   [0, 0, 1, 0, 0],
+                   [0, 0, 0, 0, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.float64)
+    imc0 = np.array([[0, 0, 0, 0, 0],
+                     [0, 1, 2, 3, 0],
+                     [0, 4, 5, 6, 0],
+                     [0, 7, 8, 9, 0],
+                     [0, 0, 0, 0, 0]], dtype=np.float64)
+    k = vigra.filters.Kernel2D()
+    k.initExplicitly((-1, -1), (1, 1), np.array([[1, 2, 3],
+                                                 [4, 5, 6],
+                                                 [7, 8, 9]], dtype=np.float64))
+    res = vigra.filters.convolve(im, k)
+    checkAboutSame(res, imc0)
+    assert_raises(ValueError, vigra.filters.convolve, im)
+    assert_raises(ValueError, vigra.filters.convolve, im.astype(np.uint8), k)
+    assert_raises(ValueError, vigra.filters.convolve, [0, 1], k)
+
+    # Test convolve with a 1D kernel.
+    imc1 = np.array([[0, 0, 0, 0, 0],
+                     [0, 1, 2, 3, 0],
+                     [0, 2, 4, 6, 0],
+                     [0, 3, 6, 9, 0],
+                     [0, 0, 0, 0, 0]], dtype=np.float64)
+    k = vigra.filters.Kernel1D()
+    k.initExplicitly(-1, 1, np.array([1, 2, 3], dtype=np.float64))
+    res = vigra.filters.convolve(im, k)
+    checkAboutSame(res, imc1)
+
+    # Test convolve with two 1D kernels.
+    imc2 = np.array([[0, 0, 0, 0, 0],
+                     [0, 0.5, 1, 2, 0],
+                     [0, 1, 2, 4, 0],
+                     [0, 1.5, 3, 6, 0],
+                     [0, 0, 0, 0, 0]], dtype=np.float64)
+    k0 = vigra.filters.explictKernel(-1, 1, np.array([1, 2, 3], dtype=np.float64))
+    k1 = vigra.filters.explictKernel(-1, 1, np.array([0.5, 1, 2], dtype=np.float64))
+    res = vigra.filters.convolve(im, (k0, k1))
+    checkAboutSame(res, imc2)
+
+def test_gaussianSmoothing():
+    im = np.array([[0, 0, 0],
+                   [0, 1, 0],
+                   [0, 0, 0]], dtype=np.float64)
+    imc = np.array([[ 0.04532707,  0.16757448,  0.04532707],
+                    [ 0.16757448,  0.61952398,  0.16757448],
+                    [ 0.04532707,  0.16757448,  0.04532707]])
+    res = vigra.filters.gaussianSmoothing(im, 0.5)
+    checkAboutSame(res, imc)
+    assert_raises(ValueError, vigra.filters.gaussianSmoothing, im)
+    assert_raises(ValueError, vigra.filters.gaussianSmoothing, im.astype(np.int8), 0.5)
+
+def test_laplacianOfGaussian():
+    im = np.zeros((6, 5), dtype=np.float64)
+    im[2, 2] = 1
+    imc = np.array([[ 0.07002893,  0.08373281,  0.08667994,  0.08373281,  0.07002893],
+                    [ 0.08373281,  0.0174964 , -0.08323153,  0.0174964 ,  0.08373281],
+                    [ 0.08646211, -0.0837286 , -0.31618431, -0.0837286 ,  0.08646211],
+                    [ 0.07846283,  0.00859282, -0.09564048,  0.00859282,  0.07846283],
+                    [ 0.0352323 ,  0.04236348,  0.04414477,  0.04236348,  0.0352323 ],
+                    [ 0.01053995,  0.01780715,  0.0248179 ,  0.01780715,  0.01053995]])
+    res = vigra.filters.laplacianOfGaussian(im)
+    checkAboutSame(res, imc)
+    assert_raises(ValueError, vigra.filters.laplacianOfGaussian, im.astype(np.uint8))
+
+def test_gaussianDivergence():
+    im = np.zeros((5, 5, 2), dtype=np.float64)
+    im[2, 2] = [0, 1]
+    imc = np.array([[  0.,   2.47013247e-02,   5.96311195e-19, -2.47013247e-02,   0.],
+                    [  0.,   5.63656325e-02,   1.35525272e-18, -5.63656325e-02,   0.],
+                    [  0.,   9.12597369e-02,   2.16840434e-18, -9.12597369e-02,   0.],
+                    [  0.,   5.63656325e-02,   1.35525272e-18, -5.63656325e-02,   0.],
+                    [  0.,   2.47013247e-02,   5.96311195e-19, -2.47013247e-02,   0.]])
+    res = vigra.filters.gaussianDivergence(im)
+    checkAboutSame(res, imc)
+    assert_raises(ValueError, vigra.filters.gaussianDivergence, np.zeros((5, 6, 3), dtype=np.float64))
+    assert_raises(ValueError, vigra.filters.gaussianDivergence, np.zeros((10, 11, 12, 2), dtype=np.float64))
+
+def test_multiBinaryErosionDilation():
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 1, 1, 0, 0],
+                   [0, 0, 1, 0, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.uint8)
+    imc = np.array([[0, 1, 1, 0, 0],
+                    [1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0]], dtype=np.uint8)
+    imc2 = np.array([[1, 1, 1, 0, 0],
+                    [1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0]], dtype=np.uint8)
+    res = vigra.filters.multiBinaryErosion(imc, 1)
+    checkEqual(res, im)
+    assert_raises(ValueError, vigra.filters.multiBinaryErosion, imc)
+    assert_raises(ValueError, vigra.filters.multiBinaryErosion, imc.astype(np.float64))
+    res = vigra.filters.multiBinaryDilation(im, 1)
+    checkEqual(res, imc)
+    assert_raises(ValueError, vigra.filters.multiBinaryDilation, im)
+    assert_raises(ValueError, vigra.filters.multiBinaryDilation, im.astype(np.float64))
+
+    res = vigra.filters.multiBinaryOpening(im, 1)
+    checkEqual(res, np.zeros((4, 5), dtype=np.uint8))
+    res = vigra.filters.multiBinaryOpening(imc, 1)
+    checkEqual(res, imc)
+    assert_raises(ValueError, vigra.filters.multiBinaryOpening, im)
+    assert_raises(ValueError, vigra.filters.multiBinaryOpening, im.astype(np.float64))
+    res = vigra.filters.multiBinaryClosing(im, 1)
+    checkEqual(res, im)
+    res = vigra.filters.multiBinaryClosing(imc, 1)
+    checkEqual(res, imc2)
+    assert_raises(ValueError, vigra.filters.multiBinaryClosing, im)
+    assert_raises(ValueError, vigra.filters.multiBinaryClosing, im.astype(np.float64))
+
+def test_multiGrayscaleErosionDilation():
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 128, 100, 0, 0],
+                   [0, 0, 128, 0, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.uint8)
+    imc0 = np.array([[0, 0, 0, 0, 0],
+                     [0, 81, 81, 0, 0],
+                     [0, 0, 81, 0, 0],
+                     [0, 0, 0, 0, 0]], dtype=np.uint8)
+    imc1 = np.array([[ 56,  92,  64,  28,   0],
+                     [ 92, 128, 100,  64,   0],
+                     [ 56,  92, 128,  92,   0],
+                     [  0,  56,  92,  56,   0]], dtype=np.uint8)
+    imc2 = np.array([[0, 47, 19, 0, 0],
+                     [47, 128, 100, 19, 0],
+                     [0, 47, 128, 47, 0],
+                     [0, 0, 47, 0, 0]], dtype=np.uint8)
+    res = vigra.filters.multiGrayscaleErosion(im, 9)
+    checkEqual(res, imc0)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleErosion, im)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleErosion, im.astype(np.int16), 9)
+    res = vigra.filters.multiGrayscaleDilation(im, 6)
+    checkEqual(res, imc1)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleDilation, im)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleDilation, im.astype(np.int16), 6)
+
+    res = vigra.filters.multiGrayscaleOpening(im, 9)
+    checkEqual(res, imc0)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleOpening, im)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleOpening, im.astype(np.int16), 9)
+    res = vigra.filters.multiGrayscaleClosing(im, 9)
+    checkEqual(res, imc2)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleClosing, im)
+    assert_raises(ValueError, vigra.filters.multiGrayscaleClosing, im.astype(np.int16), 9)
+
+def test_distanceTransform():
+    # Test distanceTransform and distanceTransform2D.
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 1, 1, 0, 0],
+                   [0, 0, 1, 0, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.uint32)
+    s2 = 1.41421354
+    s5 = 2.23606801
+    imd = np.array([[s2, 1, 1, s2, s5],
+                    [1, 0, 0, 1, 2],
+                    [s2, 1, 0, 1, 2],
+                    [s5, s2, 1, s2, s5]], dtype=np.float32)
+    res = vigra.filters.distanceTransform(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.distanceTransform, np.zeros((5, 6, 7, 8), dtype=np.float32))
+
+    # Test vectorDistanceTransform.
+    im = im.astype(np.float32)
+    imd = np.array([[[1, 1], [1, 0], [1, 0], [1, -1], [1, -2]],
+                    [[0, 1], [0, 0], [0, 0], [0, -1], [0, -2]],
+                    [[-1, 1], [0, 1], [0, 0], [0, -1], [0, -2]],
+                    [[-1, 2], [-1, 1], [-1, 0], [-1, -1], [-1, -2]]], dtype=np.float32)
+    res = vigra.filters.vectorDistanceTransform(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.vectorDistanceTransform, np.zeros((5, 6, 7, 8), dtype=np.float32))
+    
+def test_boundaryDistanceTransform():
+    # Test boundaryDistanceTransform.
+    im = np.array([[2, 2, 2, 2, 2, 2],
+                   [2, 2, 2, 2, 2, 2],
+                   [1, 1, 1, 1, 2, 2],
+                   [1, 1, 1, 1, 1, 2],
+                   [1, 1, 1, 1, 1, 1],
+                   [1, 1, 1, 1, 1, 1]], dtype=np.float32)
+    s2 = 0.91421354
+    s3 = 1.73606801
+    s5 = 2.32842708
+    s6 = 3.10555124
+    imd = np.array([[1.5, 1.5, 1.5, 1.5, s3, s5],
+                    [0.5, 0.5, 0.5, 0.5, s2, s3],
+                    [0.5, 0.5, 0.5, 0.5, 0.5, s2],
+                    [1.5, 1.5, 1.5, s2, 0.5, 0.5],
+                    [2.5, 2.5, s5, s3, s2, 0.5],
+                    [3.5, 3.5, s6, s5, s3, 1.5]], dtype=np.float32)
+    res = vigra.filters.boundaryDistanceTransform(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.boundaryDistanceTransform, im.astype(np.uint8))
+    assert_raises(ValueError, vigra.filters.boundaryDistanceTransform, np.zeros((5, 6, 7, 8), dtype=np.float32))
+
+    # Test boundaryVectorDistanceTransform.
+    imd2 = np.array([[[ 1.5,  0. ], [ 1.5,  0. ], [ 1.5,  0. ], [ 1.5,  0. ], [ 1.5, -1. ], [ 1.5, -2. ]],
+                     [[ 0.5,  0. ], [ 0.5,  0. ], [ 0.5,  0. ], [ 0.5,  0. ], [ 0.5, -1. ], [ 1. , -1.5]],
+                     [[-0.5,  0. ], [-0.5,  0. ], [-0.5,  0. ], [-0.5,  0. ], [ 0. , -0.5], [ 0.5, -1. ]],
+                     [[-1.5,  0. ], [-1.5,  0. ], [-1.5,  0. ], [-1. ,  0.5], [-0.5,  0. ], [ 0. , -0.5]],
+                     [[-2.5,  0. ], [-2.5,  0. ], [-2. ,  1.5], [-1.5,  1. ], [-1. ,  0.5], [-0.5,  0. ]],
+                     [[-3.5,  0. ], [-3.5,  0. ], [-2.5,  2. ], [-2. ,  1.5], [-1.5,  1. ], [-1.5,  0. ]]],
+                    dtype=np.float32)
+    res = vigra.filters.boundaryVectorDistanceTransform(im)
+    checkAboutSame(res, imd2)
+    assert_raises(ValueError, vigra.filters.boundaryVectorDistanceTransform, im.astype(np.uint8))
+    assert_raises(ValueError, vigra.filters.boundaryVectorDistanceTransform, np.zeros((5, 6, 7, 8), dtype=np.float32))
+
+def test_eccentricityTransform():
+    # Test eccentricityTransform.
+    im = np.array([[2, 2, 2, 3, 3, 3, 3],
+                   [2, 1, 1, 1, 1, 1, 3],
+                   [2, 1, 1, 1, 1, 1, 3],
+                   [2, 1, 1, 1, 1, 1, 3],
+                   [2, 2, 2, 3, 3, 3, 3]], dtype=np.float32)
+    s2 = 1.41421354
+    imd = np.array([[2, s2+1, s2+2, s2+3, s2+2, s2+1, 2],
+                    [1, s2+1, s2, 1, s2, s2+1, 1],
+                    [0, 2, 1, 0, 1, 2, 0],
+                    [1, s2+1, s2, 1, s2, s2+1, 1],
+                    [2, s2+1, s2+2, s2+3, s2+2, s2+1, 2]])
+    res = vigra.filters.eccentricityTransform(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.eccentricityTransform, im.astype(np.int32))
+    assert_raises(ValueError, vigra.filters.eccentricityTransform, np.zeros((5, 6, 7, 8), dtype=np.float32))
+
+    # Test eccentricityCenters.
+    c = vigra.filters.eccentricityCenters(im)
+    assert(c[1] == (2, 3) and c[2] == (2, 0) and c[3] == (2, 6))
+    assert_raises(ValueError, vigra.filters.eccentricityCenters, im.astype(np.int32))
+    assert_raises(ValueError, vigra.filters.eccentricityCenters, np.zeros((5, 6, 7, 8), dtype=np.float32))
+    
+    # Test eccentricityTransformWithCenters.
+    res, c = vigra.filters.eccentricityTransformWithCenters(im)
+    checkAboutSame(res, imd)
+    assert(c[1] == (2, 3) and c[2] == (2, 0) and c[3] == (2, 6))
+    assert_raises(ValueError, vigra.filters.eccentricityTransformWithCenters, im.astype(np.int32))
+    assert_raises(ValueError, vigra.filters.eccentricityTransformWithCenters, np.zeros((5, 6, 7, 8), dtype=np.float32))
+
+def test_gaussianGradient():
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 1, 1, 0, 0],
+                   [0, 1, 1, 1, 0],
+                   [0, 0, 1, 1, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.float64)
+    imd0 = np.array([[[ -8.01406117e-18 ,  8.02309608e-18],
+                     [ -9.18662066e-18 ,  8.61150404e-02],
+                     [ -7.83858551e-18 , -1.21313252e-01],
+                     [ -2.95644367e-18 , -1.88750139e-01],
+                     [ -5.94896474e-19 ,  0.00000000e+00]],
+                    [[  8.61150404e-02 , -1.30104261e-17],
+                     [  1.29233242e-01 ,  1.29233242e-01],
+                     [  2.03991002e-01 , -1.01871715e-01],
+                     [  2.15420146e-01 , -2.15420146e-01],
+                     [  1.88750139e-01 ,  1.73472348e-18]],
+                    [[ -1.21313252e-01 , -1.73472348e-18],
+                     [ -1.01871715e-01 ,  2.03991002e-01],
+                     [  2.77555756e-17 , -2.58040117e-17],
+                     [  1.01871715e-01 , -2.03991002e-01],
+                     [  1.21313252e-01 , -2.94902991e-17]],
+                    [[ -1.88750139e-01 , -1.38777878e-17],
+                     [ -2.15420146e-01 ,  2.15420146e-01],
+                     [ -2.03991002e-01 ,  1.01871715e-01],
+                     [ -1.29233242e-01 , -1.29233242e-01],
+                     [ -8.61150404e-02 ,  8.67361738e-19]],
+                    [[ -5.94896474e-19 ,  0.00000000e+00],
+                     [ -2.95644367e-18 ,  1.88750139e-01],
+                     [ -7.83858551e-18 ,  1.21313252e-01],
+                     [ -9.18662066e-18 , -8.61150404e-02],
+                     [ -8.01406117e-18 ,  8.02309608e-18]]], dtype=np.float64)
+    imd1 = np.array([[  1.13399844e-17 , 8.61150404e-02 , 1.21313252e-01 , 1.88750139e-01 , 5.94896474e-19],
+                     [  8.61150404e-02 , 1.82763404e-01 , 2.28013542e-01 , 3.04650092e-01 , 1.88750139e-01],
+                     [  1.21313252e-01 , 2.28013542e-01 , 3.78974801e-17 , 2.28013542e-01 , 1.21313252e-01],
+                     [  1.88750139e-01 , 3.04650092e-01 , 2.28013542e-01 , 1.82763404e-01 , 8.61150404e-02],
+                     [  5.94896474e-19 , 1.88750139e-01 , 1.21313252e-01 , 8.61150404e-02 , 1.13399844e-17]], dtype=np.float64)
+    res = vigra.filters.gaussianGradient(im, 1)
+    checkAboutSame(res, imd0)
+    assert_raises(ValueError, vigra.filters.gaussianGradient, im)
+    assert_raises(ValueError, vigra.filters.gaussianGradient, im.astype(np.uint8), 1)
+    assert_raises(ValueError, vigra.filters.gaussianGradient, np.zeros((5, 6, 7, 8, 9), dtype=np.float64), 1)
+
+    res = vigra.filters.gaussianGradientMagnitude(im, 1)
+    checkAboutSame(res, imd1)
+    assert_raises(ValueError, vigra.filters.gaussianGradientMagnitude, im)
+    assert_raises(ValueError, vigra.filters.gaussianGradientMagnitude, im.astype(np.uint8), 1)
+    assert_raises(ValueError, vigra.filters.gaussianGradientMagnitude, np.zeros((5, 6, 7, 8, 9, 10), dtype=np.float64), 1)
+
+def test_hessianOfGaussian():
+    im = np.array([[0, 0, 0, 0, 0],
+                   [0, 1, 1, 0, 0],
+                   [0, 1, 1, 1, 0],
+                   [0, 0, 1, 1, 0],
+                   [0, 0, 0, 0, 0]], dtype=np.float64)
+    imd = np.array([[[  2.03117070e-01,  4.84491752e-34,  2.03117070e-01],
+                     [  2.65611971e-01, -1.09103007e-18, -9.96424231e-02],
+                     [  3.34984252e-01,  3.98564636e-18, -2.12308959e-01],
+                     [  2.93927033e-01,  4.46302120e-18,  9.19749106e-02],
+                     [  2.36835873e-01, -2.40741243e-35,  2.36835873e-01]],
+                    [[ -9.96424231e-02,  3.46944695e-18,  2.65611971e-01],
+                     [ -8.92026671e-02,  7.56238735e-02, -8.92026671e-02],
+                     [ -1.41639200e-02,  5.51365585e-02, -2.62273105e-01],
+                     [  7.17062698e-02, -2.89764871e-02,  7.17062698e-02],
+                     [  9.19749106e-02,  7.80625564e-18,  2.93927033e-01]],
+                    [[ -2.12308959e-01,  0.00000000e+00,  3.34984252e-01],
+                     [ -2.62273105e-01,  5.51365585e-02, -1.41639200e-02],
+                     [ -3.06656412e-01,  1.30341282e-01, -3.06656412e-01],
+                     [ -2.62273105e-01,  5.51365585e-02, -1.41639200e-02],
+                     [ -2.12308959e-01, -2.60208521e-18,  3.34984252e-01]],
+                    [[  9.19749106e-02, -7.58941521e-18,  2.93927033e-01],
+                     [  7.17062698e-02, -2.89764871e-02,  7.17062698e-02],
+                     [ -1.41639200e-02,  5.51365585e-02, -2.62273105e-01],
+                     [ -8.92026671e-02,  7.56238735e-02, -8.92026671e-02],
+                     [ -9.96424231e-02,  0.00000000e+00,  2.65611971e-01]],
+                    [[  2.36835873e-01, -2.40741243e-35,  2.36835873e-01],
+                     [  2.93927033e-01, -4.46302120e-18,  9.19749106e-02],
+                     [  3.34984252e-01, -3.98564636e-18, -2.12308959e-01],
+                     [  2.65611971e-01,  1.09103007e-18, -9.96424231e-02],
+                     [  2.03117070e-01,  4.84491752e-34,  2.03117070e-01]]], dtype=np.float64)
+    res = vigra.filters.hessianOfGaussian(im, 1)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.hessianOfGaussian, im)
+    assert_raises(ValueError, vigra.filters.hessianOfGaussian, im.astype(np.uint8), 1)
+    assert_raises(ValueError, vigra.filters.hessianOfGaussian, np.zeros((5, 6, 7, 8, 9), dtype=np.float32), 1)
+
+def test_structureTensor():
+    im = np.array([[0, 0, 0],
+                   [0, 1, 1]], dtype=np.float64)
+    imd = np.array([[[0, 0, 0.00163116],
+                     [0, 0, 0.01856249],
+                     [0, 0, 0.00163116]],
+                    [[0, 0, 0.01856249],
+                     [0, 0, 0.21124014],
+                     [0, 0, 0.01856249]]], dtype=np.float64)
+    res = vigra.filters.structureTensor(im, 0.2, 0.4)
+    checkAboutSame(imd, res)
+    assert_raises(ValueError, vigra.filters.structureTensor, im)
+    assert_raises(ValueError, vigra.filters.structureTensor, im, 0.2)
+    assert_raises(ValueError, vigra.filters.structureTensor, im.astype(np.uint8), 0.2, 0.4)
+    assert_raises(ValueError, vigra.filters.structureTensor, np.zeros((5, 6, 7, 8, 9, 10), dtype=np.float64), 0.5, 0.75)
+
+def test_vectorToTensor():
+    im = np.zeros((2, 3, 2), dtype=np.float32)
+    im[0, 1] = [4, 5]
+    im[1, 1] = [3, 7]
+    imd = np.array([[[0, 0, 0],
+                     [16, 20, 25],
+                     [0, 0, 0]],
+                    [[0, 0, 0],
+                     [9, 21, 49],
+                     [0, 0, 0]]], dtype=np.float32)
+    res = vigra.filters.vectorToTensor(im)
+    checkAboutSame(res, imd)
+    vigra.filters.vectorToTensor(np.zeros((3, 4, 5, 3), dtype=np.float32))
+    assert_raises(ValueError, vigra.filters.vectorToTensor, im.astype(np.uint32))
+    assert_raises(ValueError, vigra.filters.vectorToTensor, np.zeros((2, 3, 4), dtype=np.float32))
+    assert_raises(ValueError, vigra.filters.vectorToTensor, np.zeros((3, 4, 5, 4), dtype=np.float32))
+
+def test_tensorTraceDetEigen():
+    # Test tensorTrace.
+    im = np.array([[0, 0, 0],
+                   [0, 1, 1]], dtype=np.float64)
+    im = vigra.filters.structureTensor(im, 0.2, 0.4)
+    imd = np.array([[ 0.00163116,  0.01856249,  0.00163116],
+                    [ 0.01856249,  0.21124014,  0.01856249]], dtype=np.float64)
+    res = vigra.filters.tensorTrace(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.tensorTrace, im.astype(np.uint32))
+    assert_raises(ValueError, vigra.filters.tensorTrace, np.zeros((3, 4, 5), dtype=np.float64))
+
+    # Test tensorDeterminant.
+    vigra.filters.tensorDeterminant(im)
+    assert_raises(ValueError, vigra.filters.tensorDeterminant, im.astype(np.uint32))
+    assert_raises(ValueError, vigra.filters.tensorDeterminant, np.zeros((3, 4, 5), dtype=np.float64))
+
+    # Test tensorEigenvalues.
+    imd = np.array([[[ 0.00163116, 0 ],
+                     [ 0.01856249, 0 ],
+                     [ 0.00163116, 0 ]],
+                    [[ 0.01856249, 0 ],
+                     [ 0.21124014, 0 ],
+                     [ 0.01856249, 0 ]]], dtype=np.float64)
+    res = vigra.filters.tensorEigenvalues(im)
+    checkAboutSame(res, imd)
+    assert_raises(ValueError, vigra.filters.tensorEigenvalues, im.astype(np.uint32))
+    assert_raises(ValueError, vigra.filters.tensorEigenvalues, np.zeros((3, 4, 5), dtype=np.float64))
+
+def test_applyColortable():
+    vigra.colors.applyColortable(np.zeros((10, 11), dtype=np.uint8), np.zeros((10, 4), dtype=np.uint8))
+    assert_raises(ValueError, vigra.colors.applyColortable, np.zeros((10, 11), dtype=np.uint8))
+    assert_raises(ValueError, vigra.colors.applyColortable, np.zeros((10, 11), dtype=np.uint8), np.zeros((10, 4), dtype=np.uint32))
+    assert_raises(ValueError, vigra.colors.applyColortable, np.zeros((10, 11, 12), dtype=np.uint8), np.zeros((10, 4), dtype=np.uint8))
+
+def test_linearRangeMapping():
+    im = np.zeros((10, 11), dtype=np.uint8)
+    im[5, 5] = 128
+    vigra.colors.linearRangeMapping(im)
+    im = np.zeros((10, 11, 12, 13), dtype=np.uint8)
+    im[5, 5, 5, 6] = 128
+    assert_raises(ValueError, vigra.colors.linearRangeMapping, im)
+
+def test_labelImage():
+    im = np.array([[1, 1, 2, 2],
+                   [1, 0, 2, 2],
+                   [0, 0, 1, 1]], dtype=np.uint8)
+    imd0 = np.array([[1, 1, 3, 3],
+                    [1, 2, 3, 3],
+                    [2, 2, 4, 4]], dtype=np.uint8)
+    imd1 = np.array([[1, 1, 2, 2],
+                     [1, 0, 2, 2],
+                     [0, 0, 3, 3]], dtype=np.uint8)
+    res = vigra.analysis.labelImage(im)
+    checkEqual(res, imd0)
+    vigra.analysis.labelImage(im, "direct")
+    assert_raises(ValueError, vigra.analysis.labelImage, im.astype(np.float64))
+    assert_raises(ValueError, vigra.analysis.labelImage, np.zeros((3, 4, 5), dtype=np.uint8))
+    res = vigra.analysis.labelImageWithBackground(im)
+    checkEqual(res, imd1)
+    vigra.analysis.labelImageWithBackground(im, "direct")
+    assert_raises(ValueError, vigra.analysis.labelImageWithBackground, im.astype(np.float64))
+    assert_raises(ValueError, vigra.analysis.labelImageWithBackground, np.zeros((3, 4, 5), dtype=np.uint8))
+
+def test_labelVolume():
+    im = np.zeros((4, 5, 6), dtype=np.uint8)
+    vigra.analysis.labelVolume(im)
+    vigra.analysis.labelVolume(im, "direct")
+    assert_raises(ValueError, vigra.analysis.labelVolume, im.astype(np.float64))
+    assert_raises(ValueError, vigra.analysis.labelVolume, np.zeros((3, 4, 5, 6), dtype=np.uint8))
+    vigra.analysis.labelVolumeWithBackground(im)
+    vigra.analysis.labelVolumeWithBackground(im, "direct")
+    assert_raises(ValueError, vigra.analysis.labelVolumeWithBackground, im.astype(np.float64))
+    assert_raises(ValueError, vigra.analysis.labelVolumeWithBackground, np.zeros((3, 4, 5, 6), dtype=np.uint8))
+
+def test_labelMultiArray():
+    im = np.zeros((4, 5, 6, 7), dtype=np.uint8)
+    vigra.analysis.labelMultiArray(im)
+    vigra.analysis.labelMultiArray(im, "direct")
+    assert_raises(ValueError, vigra.analysis.labelMultiArray, im, "direct", 0)
+    assert_raises(ValueError, vigra.analysis.labelMultiArray, im.astype(np.float64))
+    vigra.analysis.labelMultiArrayWithBackground(im)
+    vigra.analysis.labelMultiArrayWithBackground(im, "direct")
+    vigra.analysis.labelMultiArrayWithBackground(im, "direct", 0)
+    assert_raises(ValueError, vigra.analysis.labelMultiArrayWithBackground, im, "direct", 0, 1)
+    assert_raises(ValueError, vigra.analysis.labelMultiArrayWithBackground, im.astype(np.float64))
+
+def test_extendedLocalMinima():
+    im = np.array([[5, 4, 5, 6],
+                   [6, 3, 4, 5],
+                   [7, 4, 5, 6]], dtype=np.uint8)
+    imd = np.array([[0, 0, 0, 0],
+                    [0, 1, 0, 0],
+                    [0, 0, 0, 0]], dtype=np.uint8)
+    res = vigra.analysis.extendedLocalMinima(im, 1)
+    checkEqual(res, imd)
+    vigra.analysis.extendedLocalMinima(im, 1, 8)
+    vigra.analysis.extendedLocalMinima(im.astype(np.float32))
+    vigra.analysis.extendedLocalMinima(im.astype(np.float32), 1.5)
+    assert_raises(ValueError, vigra.analysis.extendedLocalMinima, im)
+    assert_raises(ValueError, vigra.analysis.extendedLocalMinima, im, 1, 8, 1)
+    assert_raises(ValueError, vigra.analysis.extendedLocalMinima, im, 1.0)
+
+def test_extendedLocalMinima3D():
+    im = np.zeros((3, 4, 5), dtype=np.uint8)
+    vigra.analysis.extendedLocalMinima3D(im)
+    vigra.analysis.extendedLocalMinima3D(im, 5)
+    vigra.analysis.extendedLocalMinima3D(im, 5, 6)
+    assert_raises(ValueError, vigra.analysis.extendedLocalMinima3D, im, 1.5)
+    assert_raises(ValueError, vigra.analysis.extendedLocalMinima3D, im, 5, 6, 1)
+
+def test_watersheds():
+    im_f = vigra.arraytypes.ScalarImage(np.random.rand(100, 200)*255, dtype=np.float32)
+    im_i = vigra.arraytypes.ScalarImage(np.random.rand(100, 200)*255, dtype=np.uint32)
+
+    vigra.analysis.watersheds(im_f)
+    vigra.analysis.watersheds(im_f, seeds=im_i)
+    vigra.analysis.watersheds(im_f, method="RegionGrowing")
+    vigra.analysis.watersheds(im_f, seeds=im_i, method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watersheds, im_f, seeds=im_i.astype(np.float32), method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watersheds, im_f, seeds=np.zeros((100, 200, 3), dtype=np.uint32), method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watersheds, np.zeros((100, 200, 3), dtype=np.float32), seeds=np.zeros((100, 200), dtype=np.uint32), method="RegionGrowing")
+
+    vigra.analysis.watershedsNew(im_f)
+    vigra.analysis.watershedsNew(im_f, seeds=im_i)
+    vigra.analysis.watershedsNew(im_f, method="RegionGrowing")
+    vigra.analysis.watershedsNew(im_f, seeds=im_i, method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watershedsNew, im_f, seeds=im_i.astype(np.float32), method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watershedsNew, im_f, seeds=np.zeros((100, 200, 3), dtype=np.uint32), method="RegionGrowing")
+    assert_raises(ValueError, vigra.analysis.watershedsNew, np.zeros((100, 200, 3), dtype=np.float32), seeds=np.zeros((100, 200), dtype=np.uint32), method="RegionGrowing")
+
+def test_superpixels():
+    im_f = vigra.arraytypes.ScalarImage(np.random.rand(100, 200)*255, dtype=np.float32)
+    im_i = vigra.arraytypes.ScalarImage(np.random.rand(100, 200)*255, dtype=np.uint32)
+
+    vigra.analysis.slicSuperpixels(im_f, 0.5, 1)
+    assert_raises(ValueError, vigra.analysis.slicSuperpixels, im_f)
+    assert_raises(ValueError, vigra.analysis.slicSuperpixels, im_f, 0.5)
+    assert_raises(ValueError, vigra.analysis.slicSuperpixels, np.zeros((100, 200, 10, 20), dtype=np.float32), 0.5, 1)
diff --git a/vigranumpy/test/test_rf.py b/vigranumpy/test/test_rf.py
index 5b78227..ce4c222 100644
--- a/vigranumpy/test/test_rf.py
+++ b/vigranumpy/test/test_rf.py
@@ -1,5 +1,7 @@
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 import vigra
 import numpy as np
@@ -19,4 +21,4 @@ def test_rf_learn():
     RF.learnRF(fmat,lmat,0,1,100)
 
 def ok_():
-    print >> sys.stderr, ".",
+    print(".", file=sys.stderr)
\ No newline at end of file
diff --git a/vigranumpy/test/test_segmentation.py b/vigranumpy/test/test_segmentation.py
index 3dc307f..825672d 100644
--- a/vigranumpy/test/test_segmentation.py
+++ b/vigranumpy/test/test_segmentation.py
@@ -1,6 +1,7 @@
+from __future__ import division, print_function
 import sys
-print >> sys.stderr, "\nexecuting test file", __file__
-execfile('set_paths.py')
+print("\nexecuting test file", __file__, file=sys.stderr)
+exec(compile(open('set_paths.py', "rb").read(), 'set_paths.py', 'exec'))
 
 import numpy
 import vigra
@@ -34,4 +35,4 @@ def test_labelMultiArray():
     _impl_test_labelMultiArray(numpy.float32)
     
 def ok_():
-    print >> sys.stderr, ".",
+    print(".", file=sys.stderr)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libvigraimpex.git



More information about the debian-science-commits mailing list