[python-escript] 01/02: Escript release 4.1

Joel Fenwick jfenwick-guest at moszumanska.debian.org
Tue Dec 1 04:06:53 UTC 2015


This is an automated email from the git hooks/post-receive script.

jfenwick-guest pushed a commit to branch debian
in repository python-escript.

commit 7977853be8637d3ce55b792099b685fe2643ebba
Author: Joel Fenwick <joelfenwick at uq.edu.au>
Date:   Tue Dec 1 13:49:00 2015 +1000

    Escript release 4.1
---
 SConstruct                                         |   14 +-
 cusplibrary/build/build-env.py                     |   35 +-
 cusplibrary/build/nvcc.py                          |   17 +
 cusplibrary/performance/spmv/scripts/benchmark.py  |   18 +
 cusplibrary/testing/data/random_10x10/generator.py |   18 +
 doc/SConscript                                     |   18 +-
 doc/cookbook/example01.tex                         |    2 +-
 doc/cookbook/example07.tex                         |    2 +-
 doc/cookbook/example10.tex                         |   12 +-
 doc/cookbook/verinfo.tex                           |    8 +-
 doc/doxygen/doxygen_esys                           | 2396 ++++++++++++--------
 doc/examples/cookbook/cblib.py                     |    8 +-
 doc/examples/cookbook/cblib1.py                    |    3 +-
 doc/examples/cookbook/cerjen.py                    |   18 +
 doc/examples/cookbook/example01c.py                |    6 +-
 doc/examples/cookbook/example03a.py                |   13 +-
 doc/examples/cookbook/example03b.py                |    2 +-
 doc/examples/cookbook/example04a.py                |    6 +-
 doc/examples/cookbook/example04b.py                |   13 +-
 doc/examples/cookbook/example05a.py                |   11 +-
 doc/examples/cookbook/example05b.py                |    9 +-
 doc/examples/cookbook/example05c.py                |    7 +-
 doc/examples/cookbook/example06.py                 |    9 +-
 doc/examples/cookbook/example08b.py                |   14 +-
 doc/examples/cookbook/example08c.py                |   16 +-
 doc/examples/cookbook/example09a.py                |   16 +-
 doc/examples/cookbook/example09b.py                |   16 +-
 doc/examples/cookbook/example09c.py                |   16 +-
 doc/examples/cookbook/example09m.py                |    7 +-
 doc/examples/cookbook/example09n.py                |    3 +-
 doc/examples/cookbook/example10a.py                |   15 +-
 doc/examples/cookbook/example10b.py                |    3 +-
 doc/examples/cookbook/example10c_0.py              |    3 +-
 doc/examples/cookbook/example10c_1.py              |    3 +-
 doc/examples/cookbook/example10d.py                |    3 +-
 doc/examples/cookbook/example10e.py                |    3 +-
 doc/examples/cookbook/example10m.py                |    3 +-
 doc/examples/cookbook/example10p.py                |    3 +-
 doc/examples/cookbook/example11a.py                |    3 +-
 doc/examples/cookbook/example11b.py                |    3 +-
 doc/examples/cookbook/example11c.py                |    3 +-
 doc/examples/cookbook/example11m.py                |    6 +-
 doc/examples/cookbook/wave_stab.py                 |    8 +-
 doc/examples/cookbook/wavesolver2d001.py           |    3 +-
 doc/examples/cookbook/wavesolver2d002.py           |    3 +-
 doc/examples/cookbook/wavesolver2d003.py           |    3 +-
 doc/examples/cookbook/wavesolver2d004.py           |    3 +-
 doc/examples/geotutorial/backward_euler.py         |    4 +-
 doc/examples/geotutorial/forward_euler.py          |    3 +-
 doc/examples/inversion/create_netcdf.py            |    5 +-
 doc/examples/inversion/dc_forward.py               |   19 +-
 doc/examples/inversion/grav_ermapper.py            |    3 +-
 doc/examples/inversion/grav_netcdf.py              |    3 +-
 doc/examples/inversion/gravmag_netcdf.py           |    3 +-
 doc/examples/inversion/gravmag_nodriver.py         |    3 +-
 doc/examples/inversion/gravmag_wgs84_nodriver.py   |    3 +-
 doc/examples/inversion/mag_netcdf.py               |    3 +-
 doc/examples/inversion/mag_wgs84_netcdf.py         |    3 +-
 doc/examples/inversion/plot_ermapper.py            |    5 +-
 doc/examples/inversion/plot_netcdf.py              |    5 +-
 doc/examples/inversion/strong_gravmag_netcdf.py    |    3 +-
 doc/examples/inversion/synthetic_HTI.py            |  227 +-
 doc/examples/inversion/synthetic_TTI.py            |   51 +-
 doc/examples/inversion/synthetic_VTI.py            |   50 +-
 doc/examples/inversion/synthetic_sonic.py          |   25 +-
 doc/examples/inversion/synthetic_sonicHTI.py       |   16 +-
 doc/examples/inversion/test_commemi1.py            |  450 ++++
 doc/examples/inversion/test_commemi4.py            |  553 +++++
 doc/examples/usersguide/{helmholtz.py => dirac.py} |   54 +-
 doc/examples/usersguide/fluid.py                   |    2 +-
 doc/examples/usersguide/helmholtz.py               |    3 +-
 doc/examples/usersguide/int_save.py                |    6 +-
 doc/examples/usersguide/lame.py                    |   17 +
 doc/examples/usersguide/poisson_matplotlib.py      |    3 +-
 doc/examples/usersguide/split.py                   |   26 +
 doc/examples/usersguide/voxet_reader.py            |  220 ++
 doc/examples/usersguide/wave.py                    |    3 +-
 doc/install/source.tex                             |   17 +-
 doc/install/srcadditional.tex                      |    2 +-
 doc/install/verinfo.tex                            |    8 +-
 doc/inversion/CookGravity.tex                      |    2 +-
 doc/inversion/defs.tex                             |    2 +-
 doc/inversion/verinfo.tex                          |    8 +-
 doc/sphinx_api/conf.py                             |    4 +-
 doc/sphinx_api/genrst.py                           |   17 +-
 doc/user/TutorialPDE.tex                           |    3 +-
 doc/user/changes.tex                               |   17 +
 doc/user/diffusion.tex                             |    6 +-
 doc/user/dirac.tex                                 |   84 +
 doc/user/escript.tex                               |    1 +
 doc/user/esys.bib                                  |  572 ++---
 doc/user/figures/EscriptDiagram1.pdf               |  Bin 9654 -> 8867 bytes
 doc/user/figures/EscriptDiagram1.svg               |  464 ++--
 doc/user/figures/diracplot.png                     |  Bin 0 -> 103515 bytes
 doc/user/finley.tex                                |   25 +-
 doc/user/firststep.tex                             |    2 +-
 doc/user/linearPDE.tex                             |   22 +-
 doc/user/ripley.tex                                |   49 +-
 doc/user/speckley.tex                              |    2 +-
 doc/user/subworlds.tex                             |  293 +++
 doc/user/user.tex                                  |    1 +
 doc/user/user_defs.tex                             |    3 +-
 doc/user/verinfo.tex                               |    8 +-
 doc/verinfo.tex                                    |    8 +-
 downunder/py_src/__init__.py                       |    4 +-
 downunder/py_src/coordinates.py                    |    2 +
 downunder/py_src/costfunctions.py                  |    2 +
 downunder/py_src/datasources.py                    |   10 +-
 downunder/py_src/dcresistivityforwardmodeling.py   |    3 +-
 downunder/py_src/domainbuilder.py                  |    2 +
 downunder/py_src/forwardmodels/__init__.py         |    2 +
 downunder/py_src/forwardmodels/acoustic.py         |    3 +-
 downunder/py_src/forwardmodels/base.py             |    4 +-
 downunder/py_src/forwardmodels/dcresistivity.py    |    3 +-
 downunder/py_src/forwardmodels/gravity.py          |    3 +-
 downunder/py_src/forwardmodels/magnetic.py         |    3 +-
 .../py_src/forwardmodels/magnetotelluric2d.py      |   21 +-
 downunder/py_src/forwardmodels/pressure.py         |    5 +-
 downunder/py_src/forwardmodels/subsidence.py       |    3 +-
 downunder/py_src/inversioncostfunctions.py         |    2 +
 downunder/py_src/inversions.py                     |   92 +-
 downunder/py_src/magtel1d.py                       |  451 ++++
 downunder/py_src/magtel2d.py                       | 1005 ++++++++
 downunder/py_src/mappings.py                       |    3 +-
 downunder/py_src/minimizers.py                     |   81 +-
 downunder/py_src/regularizations.py                |    2 +
 downunder/py_src/seismic.py                        |  385 ++--
 downunder/py_src/splitinversioncostfunctions.py    |  382 +++-
 downunder/py_src/splitminimizers.py                |  406 +++-
 downunder/py_src/splitregularizations.py           |   73 +-
 downunder/test/python/inversion_acoustictest_2d.py |    2 +
 downunder/test/python/ref_data/dip.geo             |   53 +
 downunder/test/python/ref_data/pole.geo            |   45 +
 downunder/test/python/ref_data/schlum.geo          |   53 +
 downunder/test/python/run_comm1.py                 |  615 +++++
 downunder/test/python/run_comm4.py                 |  692 ++++++
 downunder/test/python/run_coordinates.py           |    2 +
 downunder/test/python/run_datasources.py           |    3 +-
 downunder/test/python/run_dcforward.py             |  244 +-
 downunder/test/python/run_domainbuilder.py         |    3 +-
 downunder/test/python/run_forward.py               |    3 +-
 downunder/test/python/run_gravity.py               |    3 +-
 downunder/test/python/run_inversion_gravmag_2d.py  |    3 +-
 downunder/test/python/run_inversioncostfunction.py |    3 +-
 downunder/test/python/run_magnetic.py              |    3 +-
 downunder/test/python/run_mappings.py              |    3 +-
 downunder/test/python/run_minimizers.py            |    8 +-
 downunder/test/python/run_regularization.py        |    3 +-
 downunder/test/python/run_seismic.py               |    3 +-
 dudley/benchmarks/dudleybench.py                   |    3 +-
 dudley/benchmarks/runbenchmark.py                  |    3 +-
 dudley/py_src/__init__.py                          |    8 +-
 dudley/py_src/factorywrappers.py                   |    3 +-
 dudley/py_src/readers.py                           |    2 +
 dudley/src/CPPAdapter/MeshAdapter.cpp              |    5 +-
 dudley/src/CPPAdapter/MeshAdapter.h                |    9 +-
 dudley/src/CPPAdapter/dudleycpp.cpp                |   24 +-
 dudley/src/generateReferenceElementList.py         |    7 +-
 dudley/test/python/FCT_benchmark.py                |    2 +
 dudley/test/python/FCT_test1.py                    |    4 +-
 dudley/test/python/FCT_test2.py                    |    8 +-
 dudley/test/python/OutTest.py                      |    2 +
 dudley/test/python/PoissonSolverTest.py            |    2 +
 dudley/test/python/RT2D.py                         |   33 +-
 dudley/test/python/RecTest.py                      |    2 +
 dudley/test/python/axisymm-splitB.py               |   14 +-
 dudley/test/python/blocktest.py                    |   27 +-
 dudley/test/python/brick.py                        |    2 +
 dudley/test/python/convection.py                   |    3 +
 dudley/test/python/fixme_run_generators.py         |    2 +
 dudley/test/python/generate_dumps.py               |    2 +
 dudley/test/python/generate_meshes.py              |    2 +
 dudley/test/python/linearElastic.py                |    2 +
 dudley/test/python/rayleigh_taylor_instabilty.py   |    2 +
 dudley/test/python/rectangle.py                    |    2 +
 dudley/test/python/run_escriptOnDudley.py          |    2 +
 dudley/test/python/run_inputOutput.py              |    2 +-
 dudley/test/python/run_linearPDEsOnDudley1.py      |    2 +
 dudley/test/python/run_linearPDEsOnDudley2.py      |    2 +
 dudley/test/python/run_models.py                   |  367 +--
 dudley/test/python/run_nlpde2dOnDudley.py          |    2 +
 dudley/test/python/run_nlpde3dOnDudley.py          |    2 +
 dudley/test/python/run_simplesolve.py              |    2 +
 .../test/python/run_splitworldOnDudley.py          |   33 +-
 dudley/test/python/run_utilOnDudley.py             |    2 +
 dudley/test/python/seismic_wave.py                 |    6 +-
 dudley/test/python/slip_stress_mesh_old.py         |    2 +
 dudley/test/python/slip_stress_old.py              |    2 +
 dudley/test/python/stokes_problems.py              |    2 +
 dudley/test/python/subduction1.py                  |    2 +
 dudley/test/python/subduction1_gen.py              |    2 +
 dudley/test/python/time_chunks.py                  |    2 +-
 dudley/test/python/tp.py                           |    2 +
 escript/py_src/__init__.py                         |    5 +
 escript/py_src/datamanager.py                      |   16 +
 escript/py_src/linearPDEs.py                       |   17 +-
 escript/py_src/modelframe.py                       |   17 +-
 escript/py_src/models.py                           |   17 +-
 escript/py_src/pdetools.py                         |   19 +-
 escript/py_src/symbolic.py                         |   16 +-
 escript/py_src/unitsSI.py                          |   18 +-
 escript/py_src/util.py                             |   17 +-
 escriptcore/py_src/__init__.py                     |    2 +
 escriptcore/py_src/benchmark.py                    |    2 +
 escriptcore/py_src/datamanager.py                  |    8 +-
 escriptcore/py_src/domainCouplers.py               |    2 +
 escriptcore/py_src/faultsystems.py                 |    2 +
 escriptcore/py_src/flows.py                        |    2 +
 escriptcore/py_src/gmshrunner.py                   |    2 +
 escriptcore/py_src/heat.py                         |    2 +
 escriptcore/py_src/levelset.py                     |    2 +
 escriptcore/py_src/linearPDEs.py                   |   92 +-
 escriptcore/py_src/modelframe.py                   |   21 +-
 escriptcore/py_src/models.py                       |    2 +
 escriptcore/py_src/mountains.py                    |    2 +
 escriptcore/py_src/nonlinearPDE.py                 |   10 +-
 escriptcore/py_src/pdetools.py                     |    6 +-
 escriptcore/py_src/rheologies.py                   |    2 +
 escriptcore/py_src/runmodel.py                     |    2 +
 escriptcore/py_src/splitworld.py                   |  173 +-
 escriptcore/py_src/start.py                        |    2 +
 escriptcore/py_src/symbolic/__init__.py            |    2 +
 escriptcore/py_src/symbolic/evaluator.py           |    4 +-
 escriptcore/py_src/symbolic/functions.py           |    2 +
 escriptcore/py_src/symbolic/pretty.py              |    2 +
 escriptcore/py_src/symbolic/symbol.py              |    2 +
 escriptcore/py_src/symbolic/symconstants.py        |   18 +
 escriptcore/py_src/symbolic/utils.py               |    2 +
 escriptcore/py_src/testing.py                      |    2 +
 escriptcore/py_src/unitsSI.py                      |    2 +
 escriptcore/py_src/utestselect.py                  |    2 +
 escriptcore/py_src/util.py                         |   10 +-
 escriptcore/src/AbstractReducer.cpp                |   10 +
 escriptcore/src/AbstractReducer.h                  |   20 +-
 escriptcore/src/Data.cpp                           |   34 +-
 escriptcore/src/Data.h                             |   17 +-
 escriptcore/src/DataAbstract.cpp                   |    4 +
 escriptcore/src/DataAbstract.h                     |   19 +-
 escriptcore/src/DataExpanded.h                     |   18 +-
 escriptcore/src/LocalOps.h                         |   24 +-
 escriptcore/src/MPIDataReducer.cpp                 |  212 +-
 escriptcore/src/MPIDataReducer.h                   |   12 +-
 escriptcore/src/MPIScalarReducer.cpp               |   79 +-
 escriptcore/src/MPIScalarReducer.h                 |   12 +-
 escriptcore/src/NonReducedVariable.cpp             |   16 +-
 escriptcore/src/NonReducedVariable.h               |    6 +-
 escriptcore/src/SolverOptions.cpp                  |   14 +-
 escriptcore/src/SplitWorld.cpp                     |   40 +-
 escriptcore/src/SplitWorld.h                       |    4 +
 escriptcore/src/SubWorld.cpp                       |  292 ++-
 escriptcore/src/SubWorld.h                         |   23 +-
 escriptcore/src/escriptcpp.cpp                     |   27 +-
 escriptcore/test/DataTaggedTestCase.cpp            |   13 +
 escriptcore/test/DataTestCase.cpp                  |    3 +
 escriptcore/test/python/run_data_access.py         |    2 +
 escriptcore/test/python/run_symbolic.py            |    2 +
 escriptcore/test/python/run_testdomain.py          |    2 +
 escriptcore/test/python/run_units.py               |    2 +
 escriptcore/test/python/run_xml.py                 |    2 +
 escriptcore/test/python/test_assemblage.py         |    2 +
 escriptcore/test/python/test_assemblage_2Do1.py    |    2 +
 escriptcore/test/python/test_assemblage_2Do2.py    |    2 +
 escriptcore/test/python/test_assemblage_3Do1.py    |    2 +
 escriptcore/test/python/test_assemblage_3Do2.py    |    2 +
 escriptcore/test/python/test_condEval.py           |    2 +
 escriptcore/test/python/test_linearPDEs.py         |  100 +-
 escriptcore/test/python/test_modulefns.py          |    2 +
 escriptcore/test/python/test_nonLinearPDE.py       |    2 +
 escriptcore/test/python/test_objects.py            |    2 +
 escriptcore/test/python/test_pdetools.py           |    2 +
 escriptcore/test/python/test_shared.py             |    4 +-
 escriptcore/test/python/test_splitworld.py         |  590 ++++-
 escriptcore/test/python/test_symfuncs.py           |    2 +
 escriptcore/test/python/test_util.py               |    2 +
 escriptcore/test/python/test_util_NaN_funcs.py     |   20 +-
 escriptcore/test/python/test_util_base.py          |    2 +
 .../test/python/test_util_binary_no_tagged_data.py |    2 +
 .../python/test_util_binary_with_tagged_data.py    |    2 +
 .../test_util_overloaded_binary_no_tagged_data.py  |    2 +
 ...test_util_overloaded_binary_with_tagged_data.py |    2 +
 .../python/test_util_reduction_no_tagged_data.py   |    2 +
 .../python/test_util_reduction_with_tagged_data.py |    4 +-
 .../python/test_util_slicing_no_tagged_data.py     |    2 +
 .../python/test_util_slicing_with_tagged_data.py   |    2 +
 .../test/python/test_util_spatial_functions1.py    |    2 +
 .../test/python/test_util_spatial_functions2.py    |    2 +
 .../test/python/test_util_spatial_functions3.py    |    2 +
 .../test/python/test_util_unary_no_tagged_data.py  |    2 +
 .../python/test_util_unary_with_tagged_data.py     |    2 +
 esysUtils/src/Esys_MPI.cpp                         |   10 +-
 esysUtils/src/Esys_MPI.h                           |    8 +-
 esysUtils/src/pyerr.cpp                            |   37 +-
 esysUtils/src/pyerr.h                              |    3 +-
 finley/benchmarks/finleybench.py                   |    3 +-
 finley/benchmarks/runbenchmark.py                  |    3 +-
 finley/py_src/__init__.py                          |    6 +-
 finley/py_src/factorywrappers.py                   |   10 +-
 finley/py_src/readers.py                           |    2 +
 finley/src/Assemble.h                              |   58 +-
 finley/src/Assemble_CopyNodalData.cpp              |   72 +-
 finley/src/Assemble_LumpedSystem.cpp               |   12 +-
 finley/src/Assemble_PDE_Points.cpp                 |    4 +-
 finley/src/Assemble_PDE_Single_1D.cpp              |    4 +-
 finley/src/Assemble_PDE_Single_2D.cpp              |    4 +-
 finley/src/Assemble_PDE_Single_3D.cpp              |    4 +-
 finley/src/Assemble_PDE_Single_C.cpp               |    4 +-
 finley/src/Assemble_PDE_System_1D.cpp              |    4 +-
 finley/src/Assemble_PDE_System_2D.cpp              |    4 +-
 finley/src/Assemble_PDE_System_3D.cpp              |    4 +-
 finley/src/Assemble_PDE_System_C.cpp               |    4 +-
 finley/src/Assemble_addToSystemMatrix.cpp          |  137 +-
 finley/src/Assemble_getNormal.cpp                  |    2 +-
 finley/src/Assemble_gradient.cpp                   |   54 +-
 finley/src/Assemble_interpolate.cpp                |   10 +-
 finley/src/Assemble_jacobians.cpp                  |   74 +-
 finley/src/CPPAdapter/MeshAdapter.cpp              |  170 +-
 finley/src/CPPAdapter/MeshAdapter.h                |  123 +-
 finley/src/CPPAdapter/MeshAdapterFactory.cpp       |  925 ++++----
 finley/src/CPPAdapter/MeshAdapterFactory.h         |  305 ++-
 finley/src/CPPAdapter/finleycpp.cpp                |   21 +-
 finley/src/ElementFile.cpp                         |   78 +-
 finley/src/ElementFile.h                           |   24 +-
 finley/src/IndexList.cpp                           |   24 +-
 finley/src/IndexList.h                             |    8 +-
 finley/src/Mesh.cpp                                |  174 +-
 finley/src/Mesh.h                                  |   38 +-
 finley/src/Mesh_getPattern.cpp                     |    2 +-
 finley/src/Mesh_glueFaces.cpp                      |   20 +-
 finley/src/Mesh_hex20.cpp                          | 1112 ++++-----
 finley/src/Mesh_hex8.cpp                           |  683 +++---
 finley/src/Mesh_optimizeDOFDistribution.cpp        |   42 +-
 finley/src/Mesh_read.cpp                           |   12 +-
 finley/src/Mesh_readGmsh.cpp                       |    2 +-
 finley/src/Mesh_rec4.cpp                           |  422 ++--
 finley/src/Mesh_rec8.cpp                           |  547 +++--
 finley/src/Mesh_write.cpp                          |  428 ++--
 finley/src/NodeFile.cpp                            |  440 ++--
 finley/src/NodeFile.h                              |  136 +-
 finley/src/NodeMapping.h                           |   18 +-
 finley/src/RectangularMesh.h                       |   25 +-
 finley/src/Util.cpp                                |   63 +-
 finley/src/Util.h                                  |   16 +-
 finley/src/generateReferenceElementList.py         |    2 +
 finley/test/python/FCT_benchmark.py                |    2 +
 finley/test/python/FCT_test0.py                    |    5 +-
 finley/test/python/FCT_test1.py                    |    5 +-
 finley/test/python/FCT_test2.py                    |    6 +-
 finley/test/python/OutTest.py                      |    2 +
 finley/test/python/PoissonSolverTest.py            |    2 +
 finley/test/python/RT2D.py                         |   26 +-
 finley/test/python/RecTest.py                      |    2 +
 finley/test/python/axisymm-splitB.py               |   14 +-
 finley/test/python/blocktest.py                    |    2 +
 finley/test/python/brick.py                        |    2 +
 finley/test/python/coalgas.py                      |    3 +
 finley/test/python/convection.py                   |    3 +
 finley/test/python/damage.py                       |   15 +-
 finley/test/python/generate_dumps.py               |    2 +
 finley/test/python/generate_meshes.py              |    2 +
 finley/test/python/linearElastic.py                |    2 +
 finley/test/python/localization.py                 |    3 +
 finley/test/python/lumping_advection_test.py       |   12 +-
 finley/test/python/lumping_wave_test.py            |   11 +-
 finley/test/python/rayleigh_taylor_instabilty.py   |    2 +
 finley/test/python/rectangle.py                    |    2 +
 finley/test/python/run_amg.py                      |    2 +
 finley/test/python/run_darcy.py                    |    2 +
 finley/test/python/run_escriptOnFinley.py          |   10 +-
 finley/test/python/run_generators.py               |    2 +
 finley/test/python/run_inputOutput.py              |    2 +-
 finley/test/python/run_linearPDEsOnFinley1_2D1.py  |    2 +
 finley/test/python/run_linearPDEsOnFinley1_2D2.py  |    2 +
 finley/test/python/run_linearPDEsOnFinley1_3D1.py  |    2 +
 .../python/run_linearPDEsOnFinley1_3D2_part1.py    |   21 +-
 .../python/run_linearPDEsOnFinley1_3D2_part2.py    |   22 +-
 .../python/run_linearPDEsOnFinley1_3D2_part3-1.py  |   18 +-
 .../python/run_linearPDEsOnFinley1_3D2_part3-2.py  |   18 +-
 .../python/run_linearPDEsOnFinley1_3D2_part3-3.py  |   19 +-
 .../python/run_linearPDEsOnFinley1_3D2_part3-4.py  |   18 +-
 .../python/run_linearPDEsOnFinley1_3D2_part4.py    |   20 +-
 finley/test/python/run_linearPDEsOnFinley2.py      |    2 +
 finley/test/python/run_linearPDEsOnFinley3.py      |    2 +
 finley/test/python/run_linearPDEsOnFinleyMacro.py  |    2 +
 finley/test/python/run_models.py                   |   13 +-
 finley/test/python/run_nlpde2dOnFinley.py          |    2 +
 finley/test/python/run_nlpde3dOnFinley.py          |    2 +
 finley/test/python/run_simplesolve.py              |    2 +
 finley/test/python/run_splitworldOnFinley.py       |   28 +-
 finley/test/python/run_utilOnFinley.py             |  123 +-
 finley/test/python/run_visualization_interface.py  |    2 +
 finley/test/python/runcoalgas.py                   |    3 +
 finley/test/python/seismic_wave.py                 |    7 +-
 finley/test/python/slip_stress_mesh_old.py         |    2 +
 finley/test/python/slip_stress_old.py              |    2 +
 finley/test/python/stokes_problems.py              |    2 +
 finley/test/python/subduction1.py                  |    2 +
 finley/test/python/subduction1_gen.py              |    2 +
 finley/test/python/time_chunks.py                  |    2 +-
 finley/test/python/tp.py                           |    2 +
 modellib/py_src/__init__.py                        |   18 +
 modellib/py_src/flow.py                            |    2 +
 modellib/py_src/geometry.py                        |    4 +-
 modellib/py_src/input.py                           |    2 +
 modellib/py_src/materials.py                       |    2 +
 modellib/py_src/mechanics.py                       |    2 +
 modellib/py_src/probe.py                           |    2 +
 modellib/py_src/temperature.py                     |    2 +
 modellib/py_src/visualization.py                   |    2 +
 modellib/test/python/drucker_prager.py             |    2 +
 modellib/test/python/run_convection.py             |    4 +-
 modellib/test/python/run_domainreaders.py          |    3 +-
 modellib/test/python/run_flow.py                   |    7 +-
 modellib/test/python/run_temp.py                   |    2 +
 paso/src/Options.cpp                               |    6 +
 paso/src/ReactiveSolver.cpp                        |    2 +-
 pasowrap/py_src/__init__.py                        |    2 +
 pasowrap/py_src/pasowrap.py                        |    4 +-
 pasowrap/src/SystemMatrixAdapter.cpp               |    2 +
 pasowrap/src/TransportProblemAdapter.cpp           |    1 +
 pycad/py_src/Triangle.py                           |   10 +-
 pycad/py_src/__init__.py                           |    2 +
 pycad/py_src/design.py                             |   18 +-
 pycad/py_src/extras.py                             |    2 +
 pycad/py_src/gmsh.py                               |    2 +
 pycad/py_src/primitives.py                         |    2 +
 pycad/py_src/shapes.py                             |    2 +
 pycad/py_src/transformations.py                    |    2 +
 pycad/test/python/run_pycad_test.py                |    2 +
 ripley/generators/lamebuilder.py                   |   18 +-
 ripley/generators/lamesource.py                    |   18 +
 ripley/py_src/__init__.py                          |    6 +-
 ripley/src/Brick.cpp                               |   10 +-
 ripley/src/MultiBrick.cpp                          |   11 +-
 ripley/src/MultiRectangle.cpp                      |   12 +-
 ripley/src/Rectangle.cpp                           |   10 +-
 ripley/src/generate_assamblage.py                  |   19 +
 ripley/src/generate_assemblage_cpp.py              |    2 +
 .../test/python/run_customAssemblersOnMultiRes.py  |    2 +-
 ripley/test/python/run_escriptOnMultiResolution.py |    2 +
 ripley/test/python/run_escriptOnRipley.py          |    2 +
 ripley/test/python/run_linearPDEsOnMultiRes.py     |    2 +
 ripley/test/python/run_linearPDEsOnRipley.py       |    2 +
 ripley/test/python/run_nonlinearPDEOnMultiRes.py   |    2 +
 ripley/test/python/run_nonlinearPDEOnRipley.py     |    2 +
 ripley/test/python/run_readWriteOnMultiRes.py      |    2 +
 ripley/test/python/run_readWriteOnRipley.py        |    2 +
 ripley/test/python/run_simplesolveOnMultiRes.py    |    2 +
 ripley/test/python/run_simplesolveOnRipley.py      |    2 +
 ripley/test/python/run_splitworldOnRipley.py       |   98 +
 ripley/test/python/run_utilOnMultiRes.py           |   12 +-
 ripley/test/python/run_utilOnRipley.py             |    2 +
 run-escript.in                                     |    4 +-
 scons/badger_options.py                            |    4 +-
 scons/{badger_options.py => badger_py3_options.py} |    8 +-
 scons/guineapig_py3_options.py                     |    3 +
 scons/sage_options.py                              |  221 +-
 scons/sage_py3_options.py                          |  229 +-
 scons/squirrel_options.py                          |    3 +-
 scons/templates/__init__.py                        |   16 +
 scons/templates/freebsd10_0_options.py             |    6 +-
 scons/templates/homebrew_10.10_options.py          |    1 +
 .../vivid_mpi_options.py}                          |   11 +-
 .../{freebsd10_0_options.py => vivid_options.py}   |   34 +-
 .../vivid_py3_mpi_options.py}                      |    3 +-
 .../vivid_py3_options.py}                          |    2 +-
 scripts/makesrc.sh                                 |   17 +
 scripts/py27+3_64.valgrind                         |  222 ++
 scripts/py_comp.py                                 |   17 +-
 site_scons/dependencies.py                         |   47 +-
 site_scons/extractdebbuild.py                      |    4 +-
 site_scons/grouptest.py                            |    2 +
 site_scons/site_init.py                            |   16 +-
 site_scons/site_tools/nvcc.py                      |   18 +
 speckley/py_src/__init__.py                        |    4 +-
 speckley/src/Brick.cpp                             |    6 +-
 speckley/src/Rectangle.cpp                         |    6 +-
 speckley/src/SpeckleyDomain.cpp                    |    1 +
 speckley/test/python/run_SpeckleyRipleyCoupler.py  |    4 +-
 speckley/test/python/run_readWriteOnSpeckley.py    |    2 +
 speckley/test/python/run_specialOnSpeckley.py      |    3 +-
 svn_version                                        |    1 +
 tools/testrunner.py                                |   19 +-
 weipa/py_src/__init__.py                           |   10 +-
 weipa/src/FinleyElements.cpp                       |   15 +-
 weipa/src/FinleyNodes.cpp                          |   28 +-
 weipa/src/RipleyNodes.cpp                          |    3 +-
 weipa/src/SpeckleyNodes.cpp                        |    3 +-
 weipa/test/python/run_savesilo_tests.py            |   11 +
 weipa/test/python/run_savevtk_tests.py             |    2 +
 489 files changed, 14782 insertions(+), 7166 deletions(-)

diff --git a/SConstruct b/SConstruct
index 70d155e..f136186 100644
--- a/SConstruct
+++ b/SConstruct
@@ -140,7 +140,7 @@ vars.AddVariables(
   ('escript_opts_version', 'Version of options file (do not specify on command line)'),
   ('SVN_VERSION', 'Do not use from options file', -2),
   ('pythoncmd', 'which python to compile with','python'),
-  ('usepython3', 'Is this a python3 build? (experimental)', False),
+  ('usepython3', 'Is this a python3 build?', False),
   ('pythonlibname', 'Name of the python library to link. (This is found automatically for python2.X.)', ''),
   ('pythonlibpath', 'Path to the python library. (You should not need to set this unless your python has moved)',''),
   ('pythonincpath','Path to python include files. (You should not need to set this unless your python has moved',''),
@@ -723,7 +723,7 @@ def print_summary():
     print("Escript/Finley revision %s"%global_revision)
     print("  Install prefix:  %s"%env['prefix'])
     print("          Python:  %s"%sysconfig.PREFIX)
-    print("           boost:  %s"%env['boost_prefix'])
+    print("           boost:  %s (Version %s)"%(env['boost_prefix'],env['boost_version']))
     if env['numpy_h']:
         print("           numpy:  YES (with headers)")
     else:
@@ -732,6 +732,10 @@ def print_summary():
         print("             MPI:  YES (flavour: %s)"%env['mpi'])
     else:
         print("             MPI:  NO")
+    if env['parmetis']:
+        print("        ParMETIS:  %s (Version %s)"%(env['parmetis_prefix'],env['parmetis_version']))
+    else:
+        print("        ParMETIS:  NO")
     if env['uselapack']:
         print("          LAPACK:  YES (flavour: %s)"%env['lapack'])
     else:
@@ -742,17 +746,13 @@ def print_summary():
         print("            CUDA:  NO")
     d_list=[]
     e_list=[]
-    for i in 'debug','openmp','boomeramg','gdal','mkl','netcdf','papi','parmetis','pyproj','scipy','silo','sympy','umfpack','visit':
+    for i in 'debug','openmp','boomeramg','cppunit','gdal','mkl','netcdf','papi','pyproj','scipy','silo','sympy','umfpack','visit':
         if env[i]: e_list.append(i)
         else: d_list.append(i)
     for i in e_list:
         print("%16s:  YES"%i)
     for i in d_list:
         print("%16s:  NO"%i)
-    if env['cppunit']:
-        print("         CppUnit:  YES")
-    else:
-        print("         CppUnit:  NO")
     if env['gmshpy']:
         gmshpy=" + python module"
     else:
diff --git a/cusplibrary/build/build-env.py b/cusplibrary/build/build-env.py
index 603b003..40948b7 100644
--- a/cusplibrary/build/build-env.py
+++ b/cusplibrary/build/build-env.py
@@ -1,3 +1,20 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+from __future__ import print_function, division
+
 EnsureSConsVersion(1,2)
 
 import os
@@ -56,13 +73,13 @@ def get_mkl_paths():
     lib_base = os.environ['MKLROOT'] + '/lib'
     dirs = os.listdir(lib_base)
     for dir in dirs :
-	# select 64/32 bit MKL library path based on architecture
-	if arch64 == True and dir.find('64') > -1 :
-    	  lib_path = lib_base + '/' + dir
-	  break
-	elif arch64 == False and dir.find('64') == -1 :
-    	  lib_path = lib_base + '/' + dir
-	  break
+        # select 64/32 bit MKL library path based on architecture
+        if arch64 == True and dir.find('64') > -1 :
+          lib_path = lib_base + '/' + dir
+          break
+        elif arch64 == False and dir.find('64') == -1 :
+          lib_path = lib_base + '/' + dir
+          break
 
     if lib_path == lib_base :
       raise ValueError, 'Could not find MKL library directory which matches the arctitecture.'
@@ -306,7 +323,7 @@ def Environment():
   if env['hostspblas'] == 'mkl':
     intel_lib = 'mkl_intel'
     if platform.machine()[-2:] == '64':
-	intel_lib += '_lp64'
+        intel_lib += '_lp64'
     
     (mkl_lib_path,mkl_inc_path) = get_mkl_paths()
     env.Append(CPPPATH = [mkl_inc_path])
@@ -327,7 +344,7 @@ def Environment():
   # XXX we should probably just copy the entire environment
   if os.name == 'posix':
     if ('DYLD_LIBRARY_PATH' in os.environ) and (env['PLATFORM'] == "darwin") :
-      	env['ENV']['DYLD_LIBRARY_PATH'] = os.environ['DYLD_LIBRARY_PATH']
+        env['ENV']['DYLD_LIBRARY_PATH'] = os.environ['DYLD_LIBRARY_PATH']
     elif 'LD_LIBRARY_PATH' in os.environ:
       env['ENV']['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
 
diff --git a/cusplibrary/build/nvcc.py b/cusplibrary/build/nvcc.py
index 0596f93..f155ba1 100644
--- a/cusplibrary/build/nvcc.py
+++ b/cusplibrary/build/nvcc.py
@@ -1,3 +1,18 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 """SCons.Tool.nvcc
 
 Tool-specific initialization for NVIDIA CUDA Compiler.
@@ -7,6 +22,8 @@ It will usually be imported through the generic SCons.Tool.Tool()
 selection method.
 
 """
+from __future__ import print_function, division
+
 
 import SCons.Tool
 import SCons.Scanner.C
diff --git a/cusplibrary/performance/spmv/scripts/benchmark.py b/cusplibrary/performance/spmv/scripts/benchmark.py
index d903a9c..1df1bcb 100644
--- a/cusplibrary/performance/spmv/scripts/benchmark.py
+++ b/cusplibrary/performance/spmv/scripts/benchmark.py
@@ -1,4 +1,22 @@
 #!/usr/bin/env python 
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 import os,csv
 
 device_id = '0'  # index of the device to use
diff --git a/cusplibrary/testing/data/random_10x10/generator.py b/cusplibrary/testing/data/random_10x10/generator.py
index b911bf7..fd22c66 100644
--- a/cusplibrary/testing/data/random_10x10/generator.py
+++ b/cusplibrary/testing/data/random_10x10/generator.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 from scipy.sparse import coo_matrix
 from scipy.io import mmwrite
 from numpy.random import permutation
diff --git a/doc/SConscript b/doc/SConscript
index 4dc9129..da27e8c 100644
--- a/doc/SConscript
+++ b/doc/SConscript
@@ -89,6 +89,7 @@ sortOutExample('usersguide/poisson.py')
 sortOutExample('usersguide/diffusion.py')
 sortOutExample('usersguide/poisson_vtk.py')
 sortOutExample('usersguide/darcy.py')
+sortOutExample('usersguide/dirac.py')
 sortOutExample('usersguide/slip.py')
 sortOutExample('usersguide/int_save.py')
 sortOutExample('usersguide/wave.py', needsMPL=True)
@@ -97,6 +98,7 @@ sortOutExample('usersguide/quad.py', needsGMSH=True)
 sortOutExample('usersguide/brick.py', needsGMSH=True)
 sortOutExample('usersguide/refine.py', needsGMSH=True)
 sortOutExample('usersguide/poisson_matplotlib.py', needsGD=True, allowsMPI=False)
+sortOutExample('usersguide/voxet_reader.py')
 
 sortOutExample('geotutorial/steadystate_variablek.py')
 sortOutExample('geotutorial/steadystate.py')
@@ -142,6 +144,11 @@ sortOutExample('inversion/mag_netcdf.py')
 sortOutExample('inversion/plot_ermapper.py', needsMPL=True, allowsMPI=False)
 sortOutExample('inversion/plot_netcdf.py', needsMPL=True, allowsMPI=False)
 sortOutExample('inversion/dc_forward.py', needsGMSH=True)
+
+
+sortOutExample('inversion/test_commemi1.py', needsMPL=True, allowsMPI=True)
+sortOutExample('inversion/test_commemi4.py', needsMPL=True, allowsMPI=True)
+
 example_deps.append('inversion/content.txt')
 example_deps.append('inversion/data/GravitySmall')
 example_deps.append('inversion/data/MagneticSmall')
@@ -165,7 +172,16 @@ if len(skipped_tests)>0:
 
 example_files = example_files_allow_mpi + example_files_no_mpi + example_deps
 
-ex2=[os.path.join("examples", str(x)) for x in example_files]
+wave_examples = ['inversion/synthetic_HTI.py',
+    'inversion/synthetic_VTI.py',
+    'inversion/synthetic_TTI.py',
+    'inversion/synthetic_sonic.py',
+    'inversion/synthetic_sonicHTI.py']
+
+for i in wave_examples:
+    sortOutExample(i)
+
+ex2=[os.path.join("examples", str(x)) for x in example_files]#+wave_examples]
 
 #=============================================================================
 
diff --git a/doc/cookbook/example01.tex b/doc/cookbook/example01.tex
index fd1cb2f..024b539 100644
--- a/doc/cookbook/example01.tex
+++ b/doc/cookbook/example01.tex
@@ -611,7 +611,7 @@ Two types will be demonstrated in this cookbook;
 The \mpl package is a component of SciPy\footnote{\url{http://www.scipy.org}}
 and is good for basic graphs and plots. 
 For more complex visualisation tasks, in particular two and three dimensional
-problems we recommend the use of more advanced tools. For instance,  \mayavi
+problems we recommend the use of more advanced tools. For instance, \mayavi
 \footnote{\url{http://code.enthought.com/projects/mayavi/}}
 which is based upon the \verb|VTK| toolkit. The usage of \verb|VTK| based 
 visualisation is discussed in Chapter~\ref{Sec:2DHD} which focuses on a two
diff --git a/doc/cookbook/example07.tex b/doc/cookbook/example07.tex
index e3eb512..c42bf13 100644
--- a/doc/cookbook/example07.tex
+++ b/doc/cookbook/example07.tex
@@ -213,7 +213,7 @@ u_m1=u
 \subsection{Visualising the Source}
 There are two options for visualising the source. The first is to export the
 initial conditions of the model to VTK, which can be interpreted as a scalar
-surface in Mayavi2. The second is to take a cross section of the model which
+surface in \mayavi. The second is to take a cross section of the model which
 will require the \textit{Locator} function. 
 First \verb!Locator! must be imported;
 \begin{python}
diff --git a/doc/cookbook/example10.tex b/doc/cookbook/example10.tex
index e059bc5..df1e406 100644
--- a/doc/cookbook/example10.tex
+++ b/doc/cookbook/example10.tex
@@ -19,7 +19,7 @@ In this chapter the gravitational potential field is developed for \esc.
 Gravitational fields are present in many modelling scenarios, including
 geophysical investigations, planetary motion and attraction and micro-particle
 interactions. Gravitational fields also present an opportunity to demonstrate
-the saving and visualisation of vector data for Mayavi, and the construction of
+the saving and visualisation of vector data for \mayavi, and the construction of
 variable sized meshes.
 
 The gravitational potential $U$ at a point $P$ due to a region with a mass
@@ -93,7 +93,7 @@ ser to \verb!r=0.0!. This is a Dirichlet boundary condition.
 \sslist{example10a.py}
 A gravity pole is used in this example to demonstrate the vector characteristics
 of gravity, and also to demonstrate how this information can be exported for
-visualisation to Mayavi or an equivalent using the VTK data format.
+visualisation to \mayavi or an equivalent using the VTK data format.
 
 The solution script for this section is very simple. First the domain is
 constructed, then the parameters of the model are set, and finally the steady
@@ -127,11 +127,11 @@ saveVTK(os.path.join(save_path,"ex10a.vtu"),\
         grav_pot=sol,g_field=g_field,g_fieldz=g_fieldz,gz=gz)
 \end{python}
 
-It is quite simple to visualise the data from the gravity solution in Mayavi2.
-With Mayavi2 open go to File, Load data, Open file \ldots as in
+It is quite simple to visualise the data from the gravity solution in \mayavi.
+With \mayavi open go to File, Load data, Open file \ldots as in
 \autoref{fig:mayavi2openfile} and select the saved data file. The data will
 have then been loaded and is ready for visualisation. Notice that under the data
-object in the Mayavi2 navigation tree the 4 values saved to the VTK file are
+object in the \mayavi navigation tree the 4 values saved to the VTK file are
 available (\autoref{fig:mayavi2data}). There are two vector values,
 \verb|gfield| and \verb|gfieldz|. Note that to plot both of these on the same
 chart requires that the data object be imported twice.
@@ -149,7 +149,7 @@ potential and gravitational field vectors is illustrated in
 \begin{figure}[ht]
 \centering
 \includegraphics[width=0.75\textwidth]{figures/mayavi2_openfile.png}
-\caption{Open a file in Mayavi2}
+\caption{Open a file in \mayavi}
 \label{fig:mayavi2openfile}
 \end{figure}
 
diff --git a/doc/cookbook/verinfo.tex b/doc/cookbook/verinfo.tex
deleted file mode 100644
index e509a29..0000000
--- a/doc/cookbook/verinfo.tex
+++ /dev/null
@@ -1,7 +0,0 @@
-
-\newcommand{\relver}{development}
-\newcommand{\reldate}{\today}
-
-
-%\newcommand{\relver}{4.0}
-%\newcommand{\reldate}{\today}
diff --git a/doc/cookbook/verinfo.tex b/doc/cookbook/verinfo.tex
new file mode 120000
index 0000000..256af4d
--- /dev/null
+++ b/doc/cookbook/verinfo.tex
@@ -0,0 +1 @@
+../verinfo.tex
\ No newline at end of file
diff --git a/doc/doxygen/doxygen_esys b/doc/doxygen/doxygen_esys
index 8d94b0d..ea6a3db 100644
--- a/doc/doxygen/doxygen_esys
+++ b/doc/doxygen/doxygen_esys
@@ -1,110 +1,129 @@
-# Doxyfile 1.8.1.2
+# Doxyfile 1.8.8
 
 # This file describes the settings to be used by the documentation system
 # doxygen (www.doxygen.org) for a project.
 #
-# All text after a hash (#) is considered a comment and will be ignored.
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
 # The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
 
 #---------------------------------------------------------------------------
 # Project related configuration options
 #---------------------------------------------------------------------------
 
 # This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
 
 DOXYFILE_ENCODING      = UTF-8
 
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
 
 PROJECT_NAME           = escript
 
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
 
 PROJECT_NUMBER         = Revision$Revision:1215$
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
 
 PROJECT_BRIEF          =
 
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
 
 PROJECT_LOGO           =
 
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
 
 OUTPUT_DIRECTORY       = release/doc/doxygen
 
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
 
 CREATE_SUBDIRS         = NO
 
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
 # The OUTPUT_LANGUAGE tag is used to specify the language in which all
 # documentation generated by doxygen is written. Doxygen will use this
 # information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
 
 OUTPUT_LANGUAGE        = English
 
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
 
 BRIEF_MEMBER_DESC      = YES
 
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
 # brief descriptions will be completely suppressed.
+# The default value is: YES.
 
 REPEAT_BRIEF           = YES
 
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
 
 ABBREVIATE_BRIEF       =
 
 # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
+# doxygen will generate a detailed section even if there is only a brief
 # description.
+# The default value is: NO.
 
 ALWAYS_DETAILED_SEC    = YES
 
@@ -112,169 +131,207 @@ ALWAYS_DETAILED_SEC    = YES
 # inherited members of a class in the documentation of that class as if those
 # members were ordinary class members. Constructors, destructors and assignment
 # operators of the base classes will not be shown.
+# The default value is: NO.
 
 INLINE_INHERITED_MEMB  = NO
 
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
 
 FULL_PATH_NAMES        = NO
 
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
 
 STRIP_FROM_PATH        =
 
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
 
 STRIP_FROM_INC_PATH    =
 
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
 
 SHORT_NAMES            = NO
 
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
 
 JAVADOC_AUTOBRIEF      = NO
 
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
 
 QT_AUTOBRIEF           = NO
 
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
 
 MULTILINE_CPP_IS_BRIEF = NO
 
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
 
 INHERIT_DOCS           = YES
 
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
 
 SEPARATE_MEMBER_PAGES  = NO
 
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
 
 TAB_SIZE               = 4
 
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
 
 ALIASES                =
 
 # This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
 
 TCL_SUBST              =
 
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_FOR_C  = NO
 
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_JAVA   = NO
 
 # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
 
 OPTIMIZE_FOR_FORTRAN   = NO
 
 # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_VHDL   = NO
 
 # Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
 
 EXTENSION_MAPPING      =
 
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
 # documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
 
 MARKDOWN_SUPPORT       = YES
 
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
 # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
 # diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
 
 BUILTIN_STL_SUPPORT    = YES
 
 # If you use Microsoft's C++/CLI language, you should set this option to YES to
 # enable parsing support.
+# The default value is: NO.
 
 CPP_CLI_SUPPORT        = NO
 
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
 
 SIP_SUPPORT            = NO
 
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
 
 IDL_PROPERTY_SUPPORT   = YES
 
@@ -282,67 +339,61 @@ IDL_PROPERTY_SUPPORT   = YES
 # tag is set to YES, then doxygen will reuse the documentation of the first
 # member in the group (if any) for the other members of the group. By default
 # all members of a group must be documented explicitly.
+# The default value is: NO.
 
 DISTRIBUTE_GROUP_DOC   = NO
 
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
 
 SUBGROUPING            = YES
 
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
 
 INLINE_GROUPED_CLASSES = NO
 
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
 
 INLINE_SIMPLE_STRUCTS  = NO
 
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
 # typedef struct TypeS {} TypeT, will appear in the documentation as a struct
 # with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
 # types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
 
 TYPEDEF_HIDES_STRUCT   = NO
 
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
 
 LOOKUP_CACHE_SIZE      = 0
 
@@ -351,309 +402,356 @@ LOOKUP_CACHE_SIZE      = 0
 #---------------------------------------------------------------------------
 
 # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
 
 EXTRACT_ALL            = YES
 
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
 
 EXTRACT_PRIVATE        = YES
 
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation.
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
 
 EXTRACT_PACKAGE        = NO
 
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
 
 EXTRACT_STATIC         = YES
 
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
 
 EXTRACT_LOCAL_CLASSES  = YES
 
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
 
 EXTRACT_LOCAL_METHODS  = NO
 
 # If this flag is set to YES, the members of anonymous namespaces will be
 # extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
 
 EXTRACT_ANON_NSPACES   = NO
 
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_MEMBERS     = NO
 
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_CLASSES     = NO
 
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
 
 HIDE_FRIEND_COMPOUNDS  = NO
 
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
 
 HIDE_IN_BODY_DOCS      = NO
 
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
 
 INTERNAL_DOCS          = NO
 
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
 # allowed. This is useful if you have classes or files whose names only differ
 # in case and if your file system supports case sensitive file names. Windows
 # and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
 
 CASE_SENSE_NAMES       = YES
 
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
 
 HIDE_SCOPE_NAMES       = NO
 
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
 
 SHOW_INCLUDE_FILES     = YES
 
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
 
 FORCE_LOCAL_INCLUDES   = NO
 
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
 
 INLINE_INFO            = YES
 
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
 
 SORT_MEMBER_DOCS       = YES
 
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
 
 SORT_BRIEF_DOCS        = NO
 
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
 
 SORT_MEMBERS_CTORS_1ST = NO
 
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
 
 SORT_GROUP_NAMES       = NO
 
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
 # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
 
 SORT_BY_SCOPE_NAME     = NO
 
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
 
 STRICT_PROTO_MATCHING  = NO
 
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TODOLIST      = YES
 
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TESTLIST      = YES
 
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
 
 GENERATE_BUGLIST       = YES
 
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
 
 GENERATE_DEPRECATEDLIST= YES
 
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
 
 ENABLED_SECTIONS       =
 
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
 
 MAX_INITIALIZER_LINES  = 30
 
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
 
 SHOW_USED_FILES        = YES
 
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
 
 SHOW_FILES             = YES
 
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
 
 SHOW_NAMESPACES        = YES
 
 # The FILE_VERSION_FILTER tag can be used to specify a program or script that
 # doxygen should invoke to get the current version for each file (typically from
 # the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
 
 FILE_VERSION_FILTER    =
 
 # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
 # by doxygen. The layout file controls the global structure of the generated
 # output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
 
 LAYOUT_FILE            =
 
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
 
 CITE_BIB_FILES         =
 
 #---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
+# Configuration options related to warning and progress messages
 #---------------------------------------------------------------------------
 
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
 
 QUIET                  = NO
 
 # The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
 
 WARNINGS               = YES
 
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
 
 WARN_IF_UNDOCUMENTED   = YES
 
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
 
 WARN_IF_DOC_ERROR      = YES
 
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
 
 WARN_NO_PARAMDOC       = NO
 
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
 
 WARN_FORMAT            =
 
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
 
 WARN_LOGFILE           =
 
 #---------------------------------------------------------------------------
-# configuration options related to the input files
+# Configuration options related to the input files
 #---------------------------------------------------------------------------
 
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
 
 INPUT                  = escriptcore/src \
                          esysUtils/src \
@@ -663,38 +761,41 @@ INPUT                  = escriptcore/src \
                          pasowrap/src \
                          pythonMPI/src \
                          ripley/src \
-			 speckley/src \
+                         speckley/src \
                          weipa/src
 
 # This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
 
 INPUT_ENCODING         = UTF-8
 
 # If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
 
 FILE_PATTERNS          = *.c \
                          *.cpp \
                          *.h
 
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
 
 RECURSIVE              = YES
 
 # The EXCLUDE tag can be used to specify files and/or directories that should be
 # excluded from the INPUT source files. This way you can easily exclude a
 # subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
 # Note that relative paths are relative to the directory from which doxygen is
 # run.
 
@@ -703,14 +804,16 @@ EXCLUDE                = test
 # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
 # directories that are symbolic links (a Unix file system feature) are excluded
 # from the input.
+# The default value is: NO.
 
 EXCLUDE_SYMLINKS       = NO
 
 # If the value of the INPUT tag contains directories, you can use the
 # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
 
 EXCLUDE_PATTERNS       = */.svn/* \
                          */.svn \
@@ -721,755 +824,1108 @@ EXCLUDE_PATTERNS       = */.svn/* \
 # output. The symbol name can be a fully qualified name, a word, or if the
 # wildcard * is used, a substring. Examples: ANamespace, AClass,
 # AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
 
 EXCLUDE_SYMBOLS        =
 
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
 
 EXAMPLE_PATH           =
 
 # If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
 
 EXAMPLE_PATTERNS       =
 
 # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
 
 EXAMPLE_RECURSIVE      = NO
 
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
 
 IMAGE_PATH             =
 
 # The INPUT_FILTER tag can be used to specify a program that doxygen should
 # invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
 
 INPUT_FILTER           =
 
 # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
 
 FILTER_PATTERNS        =
 
 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
 
 FILTER_SOURCE_FILES    = NO
 
 # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
 
 FILTER_SOURCE_PATTERNS =
 
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
 #---------------------------------------------------------------------------
-# configuration options related to source browsing
+# Configuration options related to source browsing
 #---------------------------------------------------------------------------
 
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
 
 SOURCE_BROWSER         = NO
 
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
 
 INLINE_SOURCES         = NO
 
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
 
 STRIP_CODE_COMMENTS    = YES
 
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
 
 REFERENCED_BY_RELATION = YES
 
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
 
 REFERENCES_RELATION    = YES
 
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
 
 REFERENCES_LINK_SOURCE = YES
 
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
 
 USE_HTAGS              = NO
 
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
 
 VERBATIM_HEADERS       = YES
 
+# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
 #---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
+# Configuration options related to the alphabetical class index
 #---------------------------------------------------------------------------
 
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
 
 ALPHABETICAL_INDEX     = YES
 
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
 COLS_IN_ALPHA_INDEX    = 5
 
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
 IGNORE_PREFIX          =
 
 #---------------------------------------------------------------------------
-# configuration options related to the HTML output
+# Configuration options related to the HTML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
 
 GENERATE_HTML          = YES
 
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_OUTPUT            =
 
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FILE_EXTENSION    = .html
 
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-#  for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_HEADER            =
 
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FOOTER            =
 
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# style sheet in the HTML output directory as well, or it will be erased!
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_STYLESHEET        =
 
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra stylesheet files is of importance (e.g. the last
+# stylesheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
 # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
 # other source files which should be copied to the HTML output directory. Note
 # that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_EXTRA_FILES       =
 
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_HUE    = 220
 
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_SAT    = 100
 
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_GAMMA  = 80
 
 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_TIMESTAMP         = YES
 
 # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
 # documentation will contain sections that can be hidden and shown after the
 # page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_DYNAMIC_SECTIONS  = NO
 
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_INDEX_NUM_ENTRIES = 100
 
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
 # for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_DOCSET        = NO
 
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_FEEDNAME        = "Doxygen generated docs"
 
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_BUNDLE_ID       = org.doxygen.Project
 
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
 # the documentation publisher. This should be a reverse domain-name style
 # string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
 
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_PUBLISHER_NAME  = Publisher
 
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_HTMLHELP      = NO
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
 # written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_FILE               =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 HHC_LOCATION           =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 GENERATE_CHI           = NO
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_INDEX_ENCODING     =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 BINARY_TOC             = NO
 
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 TOC_EXPAND             = NO
 
 # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_QHP           = NO
 
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QCH_FILE               =
 
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_NAMESPACE          = org.doxygen.Project
 
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_VIRTUAL_FOLDER     = doc
 
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_NAME   =
 
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_ATTRS  =
 
 # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_SECT_FILTER_ATTRS  =
 
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHG_LOCATION           =
 
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-#  will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_ECLIPSEHELP   = NO
 
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
 
 ECLIPSE_DOC_ID         = org.doxygen.Project
 
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 DISABLE_INDEX          = NO
 
 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_TREEVIEW      = NO
 
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 ENUM_VALUES_PER_LINE   = 4
 
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 TREEVIEW_WIDTH         = 250
 
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 EXT_LINKS_IN_WINDOW    = NO
 
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 FORMULA_FONTSIZE       = 10
 
 # Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 FORMULA_TRANSPARENT    = YES
 
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 USE_MATHJAX            = NO
 
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
 
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_EXTENSIONS     =
 
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 SEARCHENGINE           = NO
 
 # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
 
 SERVER_BASED_SEARCH    = NO
 
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
 #---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
+# Configuration options related to the LaTeX output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
 
 GENERATE_LATEX         = NO
 
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_OUTPUT           =
 
 # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_CMD_NAME         = latex
 
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 MAKEINDEX_CMD_NAME     = makeindex
 
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 COMPACT_LATEX          = NO
 
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PAPER_TYPE             = a4wide
 
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 EXTRA_PACKAGES         =
 
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
+# for the replacement values of the other commands the user is refered to
+# HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HEADER           =
 
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_FOOTER           =
 
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PDF_HYPERLINKS         = NO
 
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
 # higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 USE_PDFLATEX           = NO
 
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BATCHMODE        = NO
 
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HIDE_INDICES     = NO
 
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_SOURCE_CODE      = NO
 
 # The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BIB_STYLE        = plain
 
 #---------------------------------------------------------------------------
-# configuration options related to the RTF output
+# Configuration options related to the RTF output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
 
 GENERATE_RTF           = NO
 
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_OUTPUT             =
 
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 COMPACT_RTF            = NO
 
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_HYPERLINKS         = NO
 
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_STYLESHEET_FILE    =
 
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_EXTENSIONS_FILE    =
 
 #---------------------------------------------------------------------------
-# configuration options related to the man page output
+# Configuration options related to the man page output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
 
 GENERATE_MAN           = NO
 
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_OUTPUT             =
 
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_EXTENSION          =
 
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_LINKS              = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the XML output
+# Configuration options related to the XML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
 
 GENERATE_XML           = NO
 
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_OUTPUT             = xml
 
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
-XML_SCHEMA             =
+XML_PROGRAMLISTING     = YES
 
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
 
-XML_DTD                =
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
 
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
+GENERATE_DOCBOOK       = NO
 
-XML_PROGRAMLISTING     = YES
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
 
 #---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
+# Configuration options for the AutoGen Definitions output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_AUTOGEN_DEF   = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the Perl module output
+# Configuration options related to the Perl module output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_PERLMOD       = NO
 
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_LATEX          = NO
 
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_PRETTY         = YES
 
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_MAKEVAR_PREFIX =
 
@@ -1477,106 +1933,128 @@ PERLMOD_MAKEVAR_PREFIX =
 # Configuration options related to the preprocessor
 #---------------------------------------------------------------------------
 
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
 
 ENABLE_PREPROCESSING   = YES
 
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 MACRO_EXPANSION        = YES
 
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 EXPAND_ONLY_PREDEF     = NO
 
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SEARCH_INCLUDES        = YES
 
 # The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
 
 INCLUDE_PATH           =
 
 # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
 # patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 INCLUDE_FILE_PATTERNS  =
 
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 PREDEFINED             =
 
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 EXPAND_AS_DEFINED      =
 
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SKIP_FUNCTION_MACROS   = YES
 
 #---------------------------------------------------------------------------
-# Configuration::additions related to external references
+# Configuration options related to external references
 #---------------------------------------------------------------------------
 
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
 # TAGFILES = file1 file2 ...
 # Adding location for the tag files is done as follows:
-#
 # TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
 
 TAGFILES               =
 
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
 
 GENERATE_TAGFILE       =
 
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
 
 ALLEXTERNALS           = NO
 
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
 
 EXTERNAL_GROUPS        = YES
 
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
 # The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
 
 PERL_PATH              =
 
@@ -1584,222 +2062,304 @@ PERL_PATH              =
 # Configuration options related to the dot tool
 #---------------------------------------------------------------------------
 
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
 
 CLASS_DIAGRAMS         = YES
 
 # You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
 # documentation. The MSCGEN_PATH tag allows you to specify the directory where
 # the mscgen tool resides. If left empty the tool is assumed to be found in the
 # default search path.
 
 MSCGEN_PATH            =
 
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
 
 HIDE_UNDOC_RELATIONS   = YES
 
 # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: YES.
 
 HAVE_DOT               = NO
 
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_NUM_THREADS        = 0
 
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTNAME           = FreeSans
 
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTSIZE           = 10
 
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTPATH           =
 
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CLASS_GRAPH            = YES
 
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 COLLABORATION_GRAPH    = YES
 
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GROUP_GRAPHS           = YES
 
 # If the UML_LOOK tag is set to YES doxygen will generate inheritance and
 # collaboration diagrams in a style similar to the OMG's Unified Modeling
 # Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 UML_LOOK               = NO
 
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 UML_LIMIT_NUM_FIELDS   = 10
 
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 TEMPLATE_RELATIONS     = NO
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDE_GRAPH          = YES
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDED_BY_GRAPH      = YES
 
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALL_GRAPH             = NO
 
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALLER_GRAPH           = NO
 
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GRAPHICAL_HIERARCHY    = YES
 
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DIRECTORY_GRAPH        = YES
 
 # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_IMAGE_FORMAT       = png
 
 # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
 # enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INTERACTIVE_SVG        = NO
 
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
 # found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_PATH               =
 
 # The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOTFILE_DIRS           =
 
 # The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
 
 MSCFILE_DIRS           =
 
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+PLANTUML_JAR_PATH      =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_GRAPH_MAX_NODES    = 50
 
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
 # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 MAX_DOT_GRAPH_DEPTH    = 0
 
 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_TRANSPARENT        = YES
 
 # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
 # files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_MULTI_TARGETS      = NO
 
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GENERATE_LEGEND        = YES
 
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_CLEANUP            = YES
diff --git a/doc/examples/cookbook/cblib.py b/doc/examples/cookbook/cblib.py
index e2ff712..4202274 100644
--- a/doc/examples/cookbook/cblib.py
+++ b/doc/examples/cookbook/cblib.py
@@ -1,4 +1,3 @@
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +13,7 @@ from __future__ import division
 #
 ##############################################################################
 
+from __future__ import division, print_function
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -30,6 +30,12 @@ from esys.escript.pdetools import Locator
 import numpy as np
 import pylab as pl
 
+try:
+    from mpl_toolkits.natgrid import _natgrid
+    HAVE_NATGRID=True
+except ImportError:
+    HAVE_NATGRID=False
+
 def toXYTuple(coords):
     """
     extracts the X and Y coordinates as two ```numpy`` arrays from the escript coordinates ```coords``` as produced by a ``.getX`` call.
diff --git a/doc/examples/cookbook/cblib1.py b/doc/examples/cookbook/cblib1.py
index cd33ece..63dd144 100644
--- a/doc/examples/cookbook/cblib1.py
+++ b/doc/examples/cookbook/cblib1.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import division
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/cerjen.py b/doc/examples/cookbook/cerjen.py
index 29e4326..05fed0a 100644
--- a/doc/examples/cookbook/cerjen.py
+++ b/doc/examples/cookbook/cerjen.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import division, print_function
+
 import pylab as pl
 import numpy as np
 
diff --git a/doc/examples/cookbook/example01c.py b/doc/examples/cookbook/example01c.py
index 5327ee0..4c109ea 100644
--- a/doc/examples/cookbook/example01c.py
+++ b/doc/examples/cookbook/example01c.py
@@ -50,9 +50,9 @@ except ImportError:
 
 ########################################################MPI WORLD CHECK
 if getMPISizeWorld() > 1:
-	import sys
-	print("This example will not run in an MPI world.")
-	sys.exit(0)
+    import sys
+    print("This example will not run in an MPI world.")
+    sys.exit(0)
 
 if HAVE_FINLEY:
     #################################################ESTABLISHING VARIABLES
diff --git a/doc/examples/cookbook/example03a.py b/doc/examples/cookbook/example03a.py
index b706b5c..4099bb7 100644
--- a/doc/examples/cookbook/example03a.py
+++ b/doc/examples/cookbook/example03a.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -47,7 +46,7 @@ from esys.escript.unitsSI import *
 import pylab as pl #Plotting package.
 import numpy as np #Array package.
 import os #This package is necessary to handle saving our data.
-from cblib import toXYTuple
+from cblib import toXYTuple, HAVE_NATGRID
 try:
     from esys.finley import Rectangle
     HAVE_FINLEY = True
@@ -60,12 +59,8 @@ if getMPISizeWorld() > 1:
         print("This example will not run in an MPI world.")
         sys.exit(0)
 
-try:
-    from mpl_toolkits.natgrid import _natgrid
-    HAVE_NATGRID=True
-except ImportError:
-    HAVE_NATGRID=False
-
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
 
 if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
diff --git a/doc/examples/cookbook/example03b.py b/doc/examples/cookbook/example03b.py
index 09a25c5..626a16d 100644
--- a/doc/examples/cookbook/example03b.py
+++ b/doc/examples/cookbook/example03b.py
@@ -67,7 +67,7 @@ if HAVE_FINLEY:
     Ti=2273.*Celsius # Kelvin -the starting temperature of our RHS Block
     rhoi = 2750*kg/m**3 #kg/m^{3} density of granite
     cpi = 790.*J/(kg*K) #j/Kg.K thermal capacity
-    rhocpi = rhoi*cpi	#DENSITY * SPECIFIC HEAT
+    rhocpi = rhoi*cpi   #DENSITY * SPECIFIC HEAT
     kappai=2.2*W/m/K #watts/m.K thermal conductivity
     ## Country Rock Variables - Sandstone
     Tc = 473*Celsius # Kelvin #the starting temperature of our country rock
diff --git a/doc/examples/cookbook/example04a.py b/doc/examples/cookbook/example04a.py
index 8f378ef..c47edd7 100644
--- a/doc/examples/cookbook/example04a.py
+++ b/doc/examples/cookbook/example04a.py
@@ -47,9 +47,9 @@ except ImportError:
 
 ########################################################MPI WORLD CHECK
 if getMPISizeWorld() > 1:
-	import sys
-	print("This example will not run in an MPI world.")
-	sys.exit(0)
+    import sys
+    print("This example will not run in an MPI world.")
+    sys.exit(0)
 
 if HAVE_FINLEY:
     # make sure path exists 
diff --git a/doc/examples/cookbook/example04b.py b/doc/examples/cookbook/example04b.py
index e1b5188..43e4db5 100644
--- a/doc/examples/cookbook/example04b.py
+++ b/doc/examples/cookbook/example04b.py
@@ -1,4 +1,3 @@
-from __future__ import division, print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +13,7 @@ from __future__ import division, print_function
 #
 ##############################################################################
 
+from __future__ import division, print_function
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -39,7 +39,7 @@ from esys.escript import *
 from esys.escript.unitsSI import *
 from esys.escript.linearPDEs import LinearPDE
 import pylab as pl #Plotting package
-from cblib import toRegGrid
+from cblib import toRegGrid, HAVE_NATGRID
 import os
 try:
     # This imports the rectangle domain function 
@@ -51,9 +51,9 @@ except ImportError:
 
 ########################################################MPI WORLD CHECK
 if getMPISizeWorld() > 1:
-	import sys
-	print("This example will not run in an MPI world.")
-	sys.exit(0)
+    import sys
+    print("This example will not run in an MPI world.")
+    sys.exit(0)
 
 try:
     from mpl_toolkits.natgrid import _natgrid
@@ -61,6 +61,9 @@ try:
 except ImportError:
     HAVE_NATGRID=False
 
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
+
 if HAVE_FINLEY and HAVE_NATGRID:
     # make sure path exists
     save_path= os.path.join("data","example04")
diff --git a/doc/examples/cookbook/example05a.py b/doc/examples/cookbook/example05a.py
index e97aa4d..20fc9f4 100644
--- a/doc/examples/cookbook/example05a.py
+++ b/doc/examples/cookbook/example05a.py
@@ -1,4 +1,3 @@
-from __future__ import division, print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +13,7 @@ from __future__ import division, print_function
 #
 ##############################################################################
 
+from __future__ import division, print_function
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -40,7 +40,7 @@ from math import * # math package
 from esys.escript import *
 from esys.escript.unitsSI import *
 from esys.escript.linearPDEs import LinearPDE
-from cblib import toRegGrid
+from cblib import toRegGrid, HAVE_NATGRID
 import pylab as pl #Plotting package
 
 try:
@@ -56,11 +56,8 @@ if getMPISizeWorld() > 1:
         print("This example will not run in an MPI world.")
         sys.exit(0)
 
-try:
-    from mpl_toolkits.natgrid import _natgrid
-    HAVE_NATGRID=True
-except ImportError:
-    HAVE_NATGRID=False
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
 
 if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
diff --git a/doc/examples/cookbook/example05b.py b/doc/examples/cookbook/example05b.py
index 027b610..2a93f62 100644
--- a/doc/examples/cookbook/example05b.py
+++ b/doc/examples/cookbook/example05b.py
@@ -41,7 +41,7 @@ from esys.escript import *
 from esys.escript.unitsSI import *
 from esys.escript.linearPDEs import LinearPDE
 from esys.escript.pdetools import Projector
-from cblib import toRegGrid
+from cblib import toRegGrid, HAVE_NATGRID
 import pylab as pl #Plotting package
 
 try:
@@ -56,7 +56,12 @@ if getMPISizeWorld() > 1:
         import sys
         print("This example will not run in an MPI world.")
         sys.exit(0)
-if HAVE_FINLEY:
+
+if not HAVE_NATGRID:
+    print("This example requires natgrid to be available to matplotlib")
+
+
+if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
     #set modal to 1 for a syncline or -1 for an anticline structural 
     #configuration
diff --git a/doc/examples/cookbook/example05c.py b/doc/examples/cookbook/example05c.py
index 00c620e..4af2bb6 100644
--- a/doc/examples/cookbook/example05c.py
+++ b/doc/examples/cookbook/example05c.py
@@ -42,7 +42,7 @@ from esys.escript import *
 from esys.escript.unitsSI import *
 from esys.escript.linearPDEs import LinearPDE
 from esys.escript.pdetools import Projector
-from cblib import toRegGrid, subsample
+from cblib import toRegGrid, subsample, HAVE_NATGRID
 import pylab as pl #Plotting package
 import numpy as np
 
@@ -59,7 +59,10 @@ if getMPISizeWorld() > 1:
         print("This example will not run in an MPI world.")
         sys.exit(0)
 
-if HAVE_FINLEY:
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
+
+if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
     #set modal to 1 for a syncline or -1 for an anticline structural 
     #configuration
diff --git a/doc/examples/cookbook/example06.py b/doc/examples/cookbook/example06.py
index 247c102..7449fb2 100644
--- a/doc/examples/cookbook/example06.py
+++ b/doc/examples/cookbook/example06.py
@@ -1,4 +1,3 @@
-from __future__ import division, print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +13,7 @@ from __future__ import division, print_function
 #
 ##############################################################################
 
+from __future__ import division, print_function
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -37,7 +37,7 @@ from esys.pycad.gmsh import Design
 from esys.escript import *
 import numpy as np
 import pylab as pl #Plotting package
-from cblib import toRegGrid, subsample
+from cblib import toRegGrid, subsample, HAVE_NATGRID
 from esys.escript.unitsSI import *
 from esys.escript.linearPDEs import LinearPDE
 import os, sys
@@ -55,7 +55,10 @@ if getMPISizeWorld() > 1:
         print("This example will not run in an MPI world.")
         sys.exit(0)
 
-if HAVE_FINLEY:
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
+
+if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
     # where to put output files
     save_path= os.path.join("data","example06")
diff --git a/doc/examples/cookbook/example08b.py b/doc/examples/cookbook/example08b.py
index b853fa4..ff2eb24 100644
--- a/doc/examples/cookbook/example08b.py
+++ b/doc/examples/cookbook/example08b.py
@@ -87,12 +87,6 @@ if HAVE_FINLEY:
     print("Time step size= ",h, "Expected number of outputs= ",tend/h)
 
     U0=0.1 # amplitude of point source
-    ls=500   # length of the source
-    source=np.zeros(ls,'float') # source array
-    decay1=np.zeros(ls,'float') # decay curve one
-    decay2=np.zeros(ls,'float') # decay curve two
-    time=np.zeros(ls,'float')   # time values
-    g=np.log(0.01)/ls
 
     dfeq=50 #Dominant Frequency
     a = 2.0 * (np.pi * dfeq)**2.0
@@ -100,7 +94,13 @@ if HAVE_FINLEY:
     srclength = 5. * t0
     ls = int(srclength/h)
     print('source length',ls)
+
     source=np.zeros(ls,'float') # source array
+    decay1=np.zeros(ls,'float') # decay curve one
+    decay2=np.zeros(ls,'float') # decay curve two
+    time=np.zeros(ls,'float')   # time values
+    g=np.log(0.01)/ls
+
     ampmax=0
     for it in range(0,ls):
         t = it*h
@@ -113,7 +113,7 @@ if HAVE_FINLEY:
         #source[t]=np.exp(g*t)*U0*np.sin(2.*np.pi*t/(0.75*ls))*(np.exp(-.1*g*t)-1)
         #decay1[t]=np.exp(g*t)
         #decay2[t]=(np.exp(-.1*g*t)-1)
-        time[t]=t*h
+        time[it]=t*h
     #tdecay=decay1*decay2*U0
     #decay1=decay1*U0; decay2=decay2*U0
     pl.clf(); 
diff --git a/doc/examples/cookbook/example08c.py b/doc/examples/cookbook/example08c.py
index 13d213e..af95fbb 100644
--- a/doc/examples/cookbook/example08c.py
+++ b/doc/examples/cookbook/example08c.py
@@ -110,20 +110,20 @@ if HAVE_FINLEY:
     print("Time step size= ",h, "Expected number of outputs= ",tend/h)
 
     U0=0.1 # amplitude of point source
-    ls=500   # length of the source
-    source=np.zeros(ls,'float') # source array
-    decay1=np.zeros(ls,'float') # decay curve one
-    decay2=np.zeros(ls,'float') # decay curve two
-    time=np.zeros(ls,'float')   # time values
-    g=np.log(0.01)/ls
-
     dfeq=50 #Dominant Frequency
     a = 2.0 * (np.pi * dfeq)**2.0
     t0 = 5.0 / (2.0 * np.pi * dfeq)
     srclength = 5. * t0
+
     ls = int(srclength/h)
     print('source length',ls)
+
     source=np.zeros(ls,'float') # source array
+    decay1=np.zeros(ls,'float') # decay curve one
+    decay2=np.zeros(ls,'float') # decay curve two
+    time=np.zeros(ls,'float')   # time values
+    g=np.log(0.01)/ls
+
     ampmax=0
     for it in range(0,ls):
         t = it*h
@@ -132,7 +132,7 @@ if HAVE_FINLEY:
         source[it] = -2. * a * tt * dum1
         if (abs(source[it]) > ampmax):
             ampmax = abs(source[it])
-        time[t]=t*h
+        time[it]=t*h
 
     ####################################################DOMAIN CONSTRUCTION
     # Domain Corners
diff --git a/doc/examples/cookbook/example09a.py b/doc/examples/cookbook/example09a.py
index 4d7d539..158adfb 100644
--- a/doc/examples/cookbook/example09a.py
+++ b/doc/examples/cookbook/example09a.py
@@ -97,20 +97,20 @@ if HAVE_FINLEY:
 
     ####################################################CREATING THE SOURCE FUNCTION
     U0=0.1 # amplitude of point source
-    ls=500   # length of the source
-    source=np.zeros(ls,'float') # source array
-    decay1=np.zeros(ls,'float') # decay curve one
-    decay2=np.zeros(ls,'float') # decay curve two
-    time=np.zeros(ls,'float')   # time values
-    g=np.log(0.01)/ls
-
     dfeq=50 #Dominant Frequency
     a = 2.0 * (np.pi * dfeq)**2.0
     t0 = 5.0 / (2.0 * np.pi * dfeq)
     srclength = 5. * t0
+
     ls = int(srclength/h)
     print('source length',ls)
+
     source=np.zeros(ls,'float') # source array
+    decay1=np.zeros(ls,'float') # decay curve one
+    decay2=np.zeros(ls,'float') # decay curve two
+    time=np.zeros(ls,'float')   # time values
+    g=np.log(0.01)/ls
+
     ampmax=0
     for it in range(0,ls):
         t = it*h
@@ -119,7 +119,7 @@ if HAVE_FINLEY:
         source[it] = -2. * a * tt * dum1
         if (abs(source[it]) > ampmax):
             ampmax = abs(source[it])
-        time[t]=t*h
+        time[it]=t*h
 
     # will introduce a spherical source at middle left of bottom face
     xc=[mx/2,my/2,0]
diff --git a/doc/examples/cookbook/example09b.py b/doc/examples/cookbook/example09b.py
index 9b0269d..e8b779c 100644
--- a/doc/examples/cookbook/example09b.py
+++ b/doc/examples/cookbook/example09b.py
@@ -96,20 +96,20 @@ if HAVE_FINLEY:
 
     ####################################################CREATING THE SOURCE FUNCTION
     U0=0.1 # amplitude of point source
-    ls=500   # length of the source
-    source=np.zeros(ls,'float') # source array
-    decay1=np.zeros(ls,'float') # decay curve one
-    decay2=np.zeros(ls,'float') # decay curve two
-    time=np.zeros(ls,'float')   # time values
-    g=np.log(0.01)/ls
-
     dfeq=50 #Dominant Frequency
     a = 2.0 * (np.pi * dfeq)**2.0
     t0 = 5.0 / (2.0 * np.pi * dfeq)
     srclength = 5. * t0
+
     ls = int(srclength/h)
     print('source length',ls)
+
     source=np.zeros(ls,'float') # source array
+    decay1=np.zeros(ls,'float') # decay curve one
+    decay2=np.zeros(ls,'float') # decay curve two
+    time=np.zeros(ls,'float')   # time values
+    g=np.log(0.01)/ls
+
     ampmax=0
     for it in range(0,ls):
         t = it*h
@@ -118,7 +118,7 @@ if HAVE_FINLEY:
         source[it] = -2. * a * tt * dum1
         if (abs(source[it]) > ampmax):
             ampmax = abs(source[it])
-        time[t]=t*h
+        time[it]=t*h
 
     # will introduce a spherical source at middle left of bottom face
     xc=[mx/2,my/2,0]
diff --git a/doc/examples/cookbook/example09c.py b/doc/examples/cookbook/example09c.py
index fa7befc..58cf640 100644
--- a/doc/examples/cookbook/example09c.py
+++ b/doc/examples/cookbook/example09c.py
@@ -110,20 +110,20 @@ if HAVE_FINLEY:
     print("Time step size= ",h, "Expected number of outputs= ",tend/h)
 
     U0=0.1 # amplitude of point source
-    ls=500   # length of the source
-    source=np.zeros(ls,'float') # source array
-    decay1=np.zeros(ls,'float') # decay curve one
-    decay2=np.zeros(ls,'float') # decay curve two
-    time=np.zeros(ls,'float')   # time values
-    g=np.log(0.01)/ls
-
     dfeq=50 #Dominant Frequency
     a = 2.0 * (np.pi * dfeq)**2.0
     t0 = 5.0 / (2.0 * np.pi * dfeq)
     srclength = 5. * t0
+
     ls = int(srclength/h)
     print('source length',ls)
+
     source=np.zeros(ls,'float') # source array
+    decay1=np.zeros(ls,'float') # decay curve one
+    decay2=np.zeros(ls,'float') # decay curve two
+    time=np.zeros(ls,'float')   # time values
+    g=np.log(0.01)/ls
+
     ampmax=0
     for it in range(0,ls):
         t = it*h
@@ -132,7 +132,7 @@ if HAVE_FINLEY:
         source[it] = -2. * a * tt * dum1
         if (abs(source[it]) > ampmax):
             ampmax = abs(source[it])
-        time[t]=t*h
+        time[it]=t*h
 
     # will introduce a spherical source at middle left of bottom face
     xc=[150,0]
diff --git a/doc/examples/cookbook/example09m.py b/doc/examples/cookbook/example09m.py
index df81b25..e25a9c8 100644
--- a/doc/examples/cookbook/example09m.py
+++ b/doc/examples/cookbook/example09m.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -151,6 +150,8 @@ if HAVE_FINLEY:
 
     d.setScriptFileName(os.path.join(save_path,"example09m.geo"))
     d.setMeshFileName(os.path.join(save_path,"example09m.msh"))
+    if testing:
+        d.setOptions(optimize_quality=0)
     #
     #  make the domain:
     #
@@ -169,6 +170,8 @@ if HAVE_FINLEY:
     cmplx_domain=layer_cake(domaindes,xwidth,ywidth,intfaces)
     cmplx_domain.setScriptFileName(os.path.join(save_path,"example09lc.geo"))
     cmplx_domain.setMeshFileName(os.path.join(save_path,"example09lc.msh"))
+    if testing:
+        cmplx_domain.setOptions(optimize_quality=0)
     dcmplx=MakeDomain(cmplx_domain)
     dcmplx.write(os.path.join(save_path,"example09lc.fly"))
 
diff --git a/doc/examples/cookbook/example09n.py b/doc/examples/cookbook/example09n.py
index b4eea32..17d86de 100644
--- a/doc/examples/cookbook/example09n.py
+++ b/doc/examples/cookbook/example09n.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10a.py b/doc/examples/cookbook/example10a.py
index 45ba148..db362b4 100644
--- a/doc/examples/cookbook/example10a.py
+++ b/doc/examples/cookbook/example10a.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -44,7 +43,7 @@ from math import pi, sqrt, sin, cos
 
 from esys.escript.pdetools import Projector
 
-from cblib import toRegGrid
+from cblib import toRegGrid, HAVE_NATGRID
 import pylab as pl #Plotting package
 import numpy as np
 
@@ -59,12 +58,8 @@ if getMPISizeWorld() > 1:
     print("This example will not run in an MPI world.")
     sys.exit(0)
 
-try:
-    from mpl_toolkits.natgrid import _natgrid
-    HAVE_NATGRID=True
-except ImportError:
-    HAVE_NATGRID=False
-
+if not HAVE_NATGRID:
+    print("This example requires that natgrid is available to matplotlib")
 
 if HAVE_FINLEY and HAVE_NATGRID:
     #################################################ESTABLISHING VARIABLES
@@ -122,7 +117,7 @@ if HAVE_FINLEY and HAVE_NATGRID:
 
     pl.clf()
 
-    r=np.linspace(0.0000001,mx/2,100)	# starting point would be 0 but that would cause division by zero later
+    r=np.linspace(0.0000001,mx/2,100)   # starting point would be 0 but that would cause division by zero later
     m=2*pl.pi*10*10*200*-G/(r*r)
 
     pl.plot(xi,zi[:,cut])
diff --git a/doc/examples/cookbook/example10b.py b/doc/examples/cookbook/example10b.py
index 72a656c..c7c80eb 100644
--- a/doc/examples/cookbook/example10b.py
+++ b/doc/examples/cookbook/example10b.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10c_0.py b/doc/examples/cookbook/example10c_0.py
index f549599..f6202ca 100644
--- a/doc/examples/cookbook/example10c_0.py
+++ b/doc/examples/cookbook/example10c_0.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10c_1.py b/doc/examples/cookbook/example10c_1.py
index 430b883..44ef5ae 100644
--- a/doc/examples/cookbook/example10c_1.py
+++ b/doc/examples/cookbook/example10c_1.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10d.py b/doc/examples/cookbook/example10d.py
index 5a5bc17..96d1854 100644
--- a/doc/examples/cookbook/example10d.py
+++ b/doc/examples/cookbook/example10d.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10e.py b/doc/examples/cookbook/example10e.py
index fd61ba9..daa4cac 100644
--- a/doc/examples/cookbook/example10e.py
+++ b/doc/examples/cookbook/example10e.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10m.py b/doc/examples/cookbook/example10m.py
index bbd484e..efaa282 100644
--- a/doc/examples/cookbook/example10m.py
+++ b/doc/examples/cookbook/example10m.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example10p.py b/doc/examples/cookbook/example10p.py
index 841dc08..759eb64 100644
--- a/doc/examples/cookbook/example10p.py
+++ b/doc/examples/cookbook/example10p.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example11a.py b/doc/examples/cookbook/example11a.py
index c913422..e045570 100644
--- a/doc/examples/cookbook/example11a.py
+++ b/doc/examples/cookbook/example11a.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example11b.py b/doc/examples/cookbook/example11b.py
index bf86ada..4ddfdaf 100644
--- a/doc/examples/cookbook/example11b.py
+++ b/doc/examples/cookbook/example11b.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example11c.py b/doc/examples/cookbook/example11c.py
index 037c8cd..ba92a39 100644
--- a/doc/examples/cookbook/example11c.py
+++ b/doc/examples/cookbook/example11c.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/example11m.py b/doc/examples/cookbook/example11m.py
index 2f7d14e..a1717e6 100644
--- a/doc/examples/cookbook/example11m.py
+++ b/doc/examples/cookbook/example11m.py
@@ -45,9 +45,9 @@ except ImportError:
     HAVE_FINLEY = False
 ########################################################MPI WORLD CHECK
 if getMPISizeWorld() > 1:
-	import sys
-	print("This example will not run in an MPI world.")
-	sys.exit(0)
+    import sys
+    print("This example will not run in an MPI world.")
+    sys.exit(0)
 
 if HAVE_FINLEY:
     # make sure path exists 
diff --git a/doc/examples/cookbook/wave_stab.py b/doc/examples/cookbook/wave_stab.py
index a27e570..1a2c840 100644
--- a/doc/examples/cookbook/wave_stab.py
+++ b/doc/examples/cookbook/wave_stab.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -39,9 +41,9 @@ mtim= np.zeros(len(ndx),'float')
 nvel= np.arange(500.,5000.,500.)
 
 for vel in nvel:
-	mtim=ndx/vel
-	pl.plot(ndx,mtim,label='%d m/s'%vel)
-	
+    mtim=ndx/vel
+    pl.plot(ndx,mtim,label='%d m/s'%vel)
+
 pl.title('Maximum time steps calculations by velocity')
 pl.xlabel('Minimum grid spacing (m)')
 pl.ylabel('Maximum stable time step (s)')
diff --git a/doc/examples/cookbook/wavesolver2d001.py b/doc/examples/cookbook/wavesolver2d001.py
index 2ba90d0..8464a91 100644
--- a/doc/examples/cookbook/wavesolver2d001.py
+++ b/doc/examples/cookbook/wavesolver2d001.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/wavesolver2d002.py b/doc/examples/cookbook/wavesolver2d002.py
index 1fc43b4..38e49a5 100644
--- a/doc/examples/cookbook/wavesolver2d002.py
+++ b/doc/examples/cookbook/wavesolver2d002.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/wavesolver2d003.py b/doc/examples/cookbook/wavesolver2d003.py
index 554481e..576b0c4 100644
--- a/doc/examples/cookbook/wavesolver2d003.py
+++ b/doc/examples/cookbook/wavesolver2d003.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/cookbook/wavesolver2d004.py b/doc/examples/cookbook/wavesolver2d004.py
index d327cba..e7162ae 100644
--- a/doc/examples/cookbook/wavesolver2d004.py
+++ b/doc/examples/cookbook/wavesolver2d004.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/geotutorial/backward_euler.py b/doc/examples/geotutorial/backward_euler.py
index dff5f08..c3e6576 100644
--- a/doc/examples/geotutorial/backward_euler.py
+++ b/doc/examples/geotutorial/backward_euler.py
@@ -1,6 +1,3 @@
-from __future__ import division
-from __future__ import print_function
-
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -15,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/geotutorial/forward_euler.py b/doc/examples/geotutorial/forward_euler.py
index 9eaebad..b976d98 100644
--- a/doc/examples/geotutorial/forward_euler.py
+++ b/doc/examples/geotutorial/forward_euler.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/inversion/create_netcdf.py b/doc/examples/inversion/create_netcdf.py
index 087a33b..66d308f 100644
--- a/doc/examples/inversion/create_netcdf.py
+++ b/doc/examples/inversion/create_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -19,6 +17,7 @@ from __future__ import print_function
 This example shows how to create a netCDF input file that is suitable for
 inversions in esys.downunder. 
 """
+from __future__ import division, print_function
 
 import sys
 from datetime import datetime
@@ -46,7 +45,7 @@ try:
     NY=10
 
     # Dummy value (for unset areas)
-    MISSING=-9999
+    MISSING=np.nan
 
     # Data error (can be constant or variable over the data points)
     SIGMA = 3.
diff --git a/doc/examples/inversion/dc_forward.py b/doc/examples/inversion/dc_forward.py
index 1475f9d..4b41023 100644
--- a/doc/examples/inversion/dc_forward.py
+++ b/doc/examples/inversion/dc_forward.py
@@ -1,4 +1,19 @@
-from __future__ import print_function
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
 # -------------------------------------------------------------------------------------------------
 # DESCRIPTION:
 # ------------
@@ -12,6 +27,8 @@ from __future__ import print_function
 # Carsten Rucker, Thomas Gunther and Klaus Spitzer
 # -------------------------------------------------------------------------------------------------
 
+from __future__ import division, print_function
+
 import esys.finley      as finley
 import esys.escript     as escript
 from esys.downunder     import *
diff --git a/doc/examples/inversion/grav_ermapper.py b/doc/examples/inversion/grav_ermapper.py
index 5b2bac1..52e18b3 100644
--- a/doc/examples/inversion/grav_ermapper.py
+++ b/doc/examples/inversion/grav_ermapper.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import print_function
 ##############################################################################
 
 """3D gravity inversion example using ER Mapper data"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/inversion/grav_netcdf.py b/doc/examples/inversion/grav_netcdf.py
index 845c4bb..11edaec 100644
--- a/doc/examples/inversion/grav_netcdf.py
+++ b/doc/examples/inversion/grav_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import print_function
 ##############################################################################
 
 """3D gravity inversion example using netCDF data"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/inversion/gravmag_netcdf.py b/doc/examples/inversion/gravmag_netcdf.py
index 8535fb3..00da435 100644
--- a/doc/examples/inversion/gravmag_netcdf.py
+++ b/doc/examples/inversion/gravmag_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import print_function
 ##############################################################################
 
 """3D gravity/magnetic joint inversion example using netCDF data"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/inversion/gravmag_nodriver.py b/doc/examples/inversion/gravmag_nodriver.py
index b31cbc4..049b3fa 100644
--- a/doc/examples/inversion/gravmag_nodriver.py
+++ b/doc/examples/inversion/gravmag_nodriver.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2012-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 """
 Advanced 3D gravity/magnetic joint inversion example without using any
diff --git a/doc/examples/inversion/gravmag_wgs84_nodriver.py b/doc/examples/inversion/gravmag_wgs84_nodriver.py
index 95157eb..792ab5a 100644
--- a/doc/examples/inversion/gravmag_wgs84_nodriver.py
+++ b/doc/examples/inversion/gravmag_wgs84_nodriver.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2012-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 """
 Advanced 3D gravity/magnetic joint inversion example without using any
diff --git a/doc/examples/inversion/mag_netcdf.py b/doc/examples/inversion/mag_netcdf.py
index d6b4ddb..656118a 100644
--- a/doc/examples/inversion/mag_netcdf.py
+++ b/doc/examples/inversion/mag_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import print_function
 ##############################################################################
 
 """3D magnetic inversion example using netCDF data"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/inversion/mag_wgs84_netcdf.py b/doc/examples/inversion/mag_wgs84_netcdf.py
index 0ee0621..098f761 100644
--- a/doc/examples/inversion/mag_wgs84_netcdf.py
+++ b/doc/examples/inversion/mag_wgs84_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 """3D magnetic inversion example using netCDF data"""
 
diff --git a/doc/examples/inversion/plot_ermapper.py b/doc/examples/inversion/plot_ermapper.py
index 710f4e8..b58e453 100644
--- a/doc/examples/inversion/plot_ermapper.py
+++ b/doc/examples/inversion/plot_ermapper.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -15,7 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
-"""This example show how to display ER Mapper raster data with matplotlib"""
+"""This example shows how to display ER Mapper raster data with matplotlib"""
+from __future__ import division, print_function
 
 import matplotlib
 # The following line is here to allow automated testing. Remove or comment if
diff --git a/doc/examples/inversion/plot_netcdf.py b/doc/examples/inversion/plot_netcdf.py
index 11a728b..c292785 100644
--- a/doc/examples/inversion/plot_netcdf.py
+++ b/doc/examples/inversion/plot_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -15,8 +13,9 @@ from __future__ import print_function
 #
 ##############################################################################
 
-"""This example show how to display netCDF input data with matplotlib"""
+"""This example shows how to display netCDF input data with matplotlib"""
 
+from __future__ import division, print_function
 import matplotlib
 # The following line is here to allow automated testing. Remove or comment if
 # you would like to display the final plot in a window instead.
diff --git a/doc/examples/inversion/strong_gravmag_netcdf.py b/doc/examples/inversion/strong_gravmag_netcdf.py
index 43e0297..4b9259f 100644
--- a/doc/examples/inversion/strong_gravmag_netcdf.py
+++ b/doc/examples/inversion/strong_gravmag_netcdf.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2009-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 """3D gravity/magnetic joint inversion example using netCDF data"""
 
diff --git a/doc/examples/inversion/synthetic_HTI.py b/doc/examples/inversion/synthetic_HTI.py
index 6096f28..af8cecf 100644
--- a/doc/examples/inversion/synthetic_HTI.py
+++ b/doc/examples/inversion/synthetic_HTI.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,9 +11,9 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
-__copyright__="""Copyright (c) 2003-2015 by The University of Queensland
+__copyright__="""Copyright (c) 2003-2015 by University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
 __license__="""Licensed under the Open Software License version 3.0
@@ -25,98 +23,135 @@ __url__="https://launchpad.net/escript-finley"
 from esys.escript import *
 from esys.escript import unitsSI as U
 from esys.escript.pdetools import Locator
-from esys.finley import Brick, Rectangle
+from esys.speckley import Brick, Rectangle
 from esys.weipa import saveSilo
-from esys.downunder import Ricker, HTIWave, SimpleSEGYWriter
+from esys.downunder import Ricker, SimpleSEGYWriter, HTIWave
 from math import ceil
+from time import time
+
 
 DIM=2          # spatial dimension
 
-ne_z=550.
 
-absorption_zone=500*U.m
+ORDER = 5
+ne_z= 20
 
 # layers from the bottom up:
-layers=[absorption_zone, 100*U.m, 200 * U.m,  300*U.m ]
-v_Ps=[ 3*U.km/U.sec, 3*U.km/U.sec, 1.5*U.km/U.sec, 1.5*U.km/U.sec ]
-v_Ss=[1.4*U.km/U.sec, 1.4*U.km/U.sec, 0.7*U.km/U.sec, 0.7*U.km/U.sec ]
-rhos=[2000*U.kg/U.m**3, 2000*U.kg/U.m**3, 2000*U.kg/U.m**3,  2000*U.kg/U.m**3]
-epss=[0., 0., 0.1*0, 0. ]
-gammas=[0.,0.,  0.03*0, 0.]
-deltas=[0.,0., -0.1*0, 0.]        
+layers=[20*U.m, 180*U.m ]
+v_Ps=[i*U.km/U.sec for i in [3, 2.5]]
+v_Ss= [i*U.km/U.sec for i in [3, 2]]
+rhos=[i*U.kg/U.m**3 for i in [2.6, 2.1]]
+epss=[0, .110]
+gammas=[0, 0.035]
+deltas=[0, 0.255]
 src_dir=[0,0,1]
 
-t_end=3.0*U.sec
-frq=20.*U.Hz
-sampling_interval=4*U.msec
+t_end=0.01*U.sec #increase this end time as desired
+frq=50.*U.Hz
+sampling_interval=2*U.msec
 numRcvPerLine=101
-rangeRcv=800*U.m
+rangeRcv=200*U.m
 
 
-# location of source in crossing array lines with in 0..numRcvInLine one needs to be None
-srcEW=numRcvPerLine//2
-srcNS=None
+# location of source
+if DIM == 2:
+    src_locations = [(0, 0)]
+else:
+    src_locations = [(0, 0, 0)]
 
-# dommain dimension
-width_x=rangeRcv + 4*absorption_zone
+# domain dimensions
+width_x=rangeRcv
 width_y=width_x
 depth=sum(layers)
 #
-# create array 
+# create array
 #
-receiver_line=[2*absorption_zone + i * (rangeRcv/(numRcvPerLine-1)) for i in range(numRcvPerLine) ]
+receiver_line=[i * (rangeRcv/(numRcvPerLine-1)) for i in range(numRcvPerLine)]
 #
 #   set source location with tag "source""
 #
 src_tags=["source"]
 
-if srcEW:
-      srcNS=numRcvPerLine//2
-elif srcNS:
-      srcEW=numRcvPerLine//2
-else:
-    raise ValueError("on of the variables srcEW or srcNS must be None!")
-if DIM == 2:    
-    src_locations  = [ (receiver_line[srcEW], depth) ]
-    src_loc_2D=(receiver_line[srcEW], 0.)
-else:
-    src_locations  = [ (receiver_line[srcEW], receiver_line[srcNS], depth)]
-    src_loc_2D=(receiver_line[srcEW], receiver_line[srcNS])
+src_loc_2D=(0, 0)
+
+
 
 #
 #   create sensor arrays:
 #
-# East-west line of receiver
-rcv_locations=[]
-rg=[]
+# East-west line of receivers
+rcvEW_locations=[]
+# North-south line of receivers (if 3 dimensional problem)
+rcvNS_locations=[]
+rgEW=[]
+rgNS=[]
 mid_point=receiver_line[len(receiver_line)//2]
 
 for ix in range(len(receiver_line)):
-        if DIM == 2:
-            rcv_locations.append((receiver_line[ix],  depth))
-            rg.append( ( receiver_line[ix], 0.) ) 
-        else:
-           rcv_locations.append((receiver_line[ix], mid_point, depth))
-           rg.append( ( receiver_line[ix], mid_point) ) 
-# North-south line of receiver
+    rgEW.append((receiver_line[ix], 0))
+    if DIM == 2:
+        rcvEW_locations.append((receiver_line[ix],  0))
+    else:
+        rcvEW_locations.append((receiver_line[ix], 0, 0))
+        rcvNS_locations.append((0, receiver_line[ix], 0))
+        rgNS.append((0, receiver_line[ix]))
+# North-south line of receivers
 if DIM == 3:
      for iy in range(len(receiver_line)):
-            rcv_locations.append((mid_point, receiver_line[iy],  depth))
-            rg.append( (  mid_point, receiver_line[iy]) ) 
+            rcv_locations.append((mid_point, receiver_line[iy],  0))
+            rg.append( (  mid_point, receiver_line[iy]) )
 #
 # create domain:
 #
 if DIM == 2:
-   domain=Rectangle(ceil(ne_z*width_x/depth), ne_z ,l0=width_x, l1=depth, 
-        diracPoints=src_locations, diracTags=src_tags)
+    domain = Rectangle(ORDER,
+            ceil(ne_z*width_x/depth), ne_z ,l0=width_x, l1=(-depth,0),
+            diracPoints=src_locations, diracTags=src_tags)
+    #suppress the x-component on the x boundary
+    q = whereZero(domain.getX()[0])*[1,0]
 else:
-   domain=Brick(ceil(ne_z*width_x/depth),ceil(ne_z*width_y/depth),ne_z,l0=width_x,l1=width_y,l2=depth, 
-        diracPoints=src_locations, diracTags=src_tags)
-wl=Ricker(frq)
+    domain=Brick(ORDER,
+            ceil(ne_z*width_x/depth), ceil(ne_z*width_y/depth), ne_z,
+            l0=width_x, l1=width_y, l2=(-depth,0),
+            diracPoints=src_locations, diracTags=src_tags)
+    q = wherePositive(
+            #suppress the x-component on the x boundary
+            whereZero(domain.getX()[0])*[1,0,0]
+            + #logical or
+            #suppress the y-component on the y boundary at the source
+            whereZero(domain.getX()[1])*[0,1,0])
+
+# set up reciever locations
+locEW=Locator(domain,rcvEW_locations)
+tracerEW_x=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='x-displacement - east-west line')
+tracerEW_z=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='z-displacement - east-west line')
+if DIM==3:
+    locNS=Locator(domain,rcvNS_locations)
+    tracerEW_y=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D,
+            sampling_interval=sampling_interval,
+            text='x-displacement - east-west line')
+    tracerNS_x=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+            sampling_interval=sampling_interval,
+            text='x-displacement - north-south line')
+    tracerNS_y=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+            sampling_interval=sampling_interval,
+            text='y-displacement - north-south line')
+    tracerNS_z=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+            sampling_interval=sampling_interval,
+            text='z-displacement - north-south line')
+if not tracerEW_x.obspy_available():
+    print("\nWARNING: obspy not available, SEGY files will not be written\n")
+elif getMPISizeWorld() > 1:
+    print("\nWARNING: SEGY files cannot be written with multiple processes\n")
+
 
 #======================================================================
-z=Function(domain).getX()[DIM-1]
-z_bottom=0
+z=ReducedFunction(domain).getX()[DIM-1]
+z_bottom=-depth
 v_p=0
 v_s=0
 delta=0
@@ -124,43 +159,55 @@ vareps=0
 gamma=0
 rho=0
 for l in range(len(layers)):
-       m=wherePositive(z-z_bottom)*whereNonPositive(z-(z_bottom+layers[l]))
-       v_p=v_p*(1-m)+v_Ps[l]*m
-       v_s=v_s*(1-m)+v_Ss[l]*m
-       rho=rho*(1-m)+rhos[l]*m
-       vareps=vareps*(1-m)+epss[l]*m
-       gamma=gamma*(1-m)+gammas[l]*m
-       delta=delta*(1-m)+deltas[l]*m
-       z_bottom+=layers[l]
-
-sw=HTIWave(domain, v_p, v_s, wl, src_tags[0], source_vector = src_dir, eps=vareps, gamma=gamma, delta=delta, rho=rho,  \
-                     absorption_zone=300*U.m, absorption_cut=1e-2, lumping=True)
-
-
-loc=Locator(domain,rcv_locations)
-tracer_x=SimpleSEGYWriter(receiver_group=rg, source=src_loc_2D, sampling_interval=sampling_interval, text='x-displacement')
-tracer_z=SimpleSEGYWriter(receiver_group=rg, source=src_loc_2D, sampling_interval=sampling_interval, text='z-displacement')
-if DIM==3:
-   tracer_y=SimpleSEGYWriter(receiver_group=rg, source=src_loc_2D, sampling_interval=sampling_interval, text='y-displacement')
+    m=wherePositive(z-z_bottom)*whereNonPositive(z-(z_bottom+layers[l]))
+    v_p=v_p*(1-m)+v_Ps[l]*m
+    v_s=v_s*(1-m)+v_Ss[l]*m
+    rho=rho*(1-m)+rhos[l]*m
+    vareps=vareps*(1-m)+epss[l]*m
+    gamma=gamma*(1-m)+gammas[l]*m
+    delta=delta*(1-m)+deltas[l]*m
+    z_bottom+=layers[l]
+
+wl=Ricker(frq)
+dt=min((1./5.)*min(inf(domain.getSize()/v_p), inf(domain.getSize()/v_s)), wl.getTimeScale())
+
+sw=HTIWave(domain, v_p, v_s, wl, src_tags[0], source_vector = src_dir,
+        eps=vareps, gamma=gamma, delta=delta, rho=rho,
+        absorption_zone=None, absorption_cut=1e-2, lumping=True, dt=dt)
+sw.setQ(q)
+
+locEW=Locator(domain, rcvEW_locations)
+if DIM == 3:
+    locNS=Locator(domain, rcvNS_locations)
+
+mkDir('output')
 
 t=0.
-mkDir('tmp')
 n=0
 k=0
+u=None
 while t < t_end:
+    start = time()
     t,u = sw.update(t+sampling_interval)
-    tracer_x.addRecord(loc(u[0]))
-    tracer_y.addRecord(loc(u[1]))
+    tracerEW_x.addRecord(locEW(u[0]))
+    tracerEW_z.addRecord(locEW(u[DIM-1]))
     if DIM==3:
-        tracer_z.addRecord(loc(u[2]))
-    print(t, loc(u[0])[len(rg)//2-4:len(rg)//2+1], wl.getValue(t))
-    #if n%5 == 0 : saveSilo("tmp/u_%d.silo"%(n/5,), u=u)
-    if t>0.3 and t< 0.5: 
-        saveSilo("tmp/u_%d.silo"%(k,), u=u)
-        k+=1
-        n+=1
-tracer_x.write('line_x.sgy')
-tracer_z.write('line_z.sgy')
-if DIM == 3: 
-        tracer_y.write('line_y.sgy')
-
+           tracerEW_y.addRecord(locEW(u[1]))
+           tracerNS_x.addRecord(locNS(u[0]))
+           tracerNS_y.addRecord(locNS(u[1]))
+           tracerNS_z.addRecord(locNS(u[2]))
+    print(t, locEW(u[DIM-1])[len(rgEW)//2-4:len(rgEW)//2+1], wl.getValue(t))
+    k+=1
+    if k%5 == 0:
+        saveSilo("output/normalHTI_%d.silo"%(n,), v_p=v_p, u=u, cycle=k, time=t)
+        n += 1
+if k%5 != 0:
+    saveSilo("output/normalHTI_%d.silo"%(n,), v_p=v_p, u=u, cycle=k, time=t)
+if tracerEW_x.obspy_available() and getMPISizeWorld() == 1:
+    tracerEW_x.write('output/lineEW_x.sgy')
+    tracerEW_z.write('output/lineEW_z.sgy')
+    if DIM == 3: 
+        tracerEW_y.write('output/lineEW_y.sgy')
+        tracerNS_x.write('output/lineNS_x.sgy')
+        tracerNS_y.write('output/lineNS_y.sgy')
+        tracerNS_z.write('output/lineNS_z.sgy')
diff --git a/doc/examples/inversion/synthetic_TTI.py b/doc/examples/inversion/synthetic_TTI.py
index 71d10e9..1be4358 100644
--- a/doc/examples/inversion/synthetic_TTI.py
+++ b/doc/examples/inversion/synthetic_TTI.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,7 +11,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -25,7 +23,7 @@ __url__="https://launchpad.net/escript-finley"
 from esys.escript import *
 from esys.escript import unitsSI as U
 from esys.escript.pdetools import Locator
-from esys.ripley import Rectangle
+from esys.speckley import Rectangle
 from esys.weipa import saveSilo
 from esys.downunder import Ricker, TTIWave, SimpleSEGYWriter
 from math import ceil
@@ -42,25 +40,25 @@ rho=     [ 2000 * U.kg/U.m**3  , 2000 * U.kg/U.m**3, 2000 * U.kg/U.m**3 ]
 #
 #   other input:
 #
-t_end=3.0*U.sec                     #  length of the record
+t_end=0.008*U.sec                   #only this low for testing purposes
 frq=10.*U.Hz                         #  dominant frequnce in the Ricker (maximum frequence ~ 2 * frq)
 sampling_interval=4*U.msec          # sampling interval
 ne_z=None                           # number of elements in vertical direction, if none it is guessed 
 n_out = 5                         # a silo file is created every n_out's sample
-absorption_zone=300*U.m             # absorbtion zone to be added in horizantal direction to the area covered by receiver line 
+absorption_zone=100*U.m             # absorbtion zone to be added in horizantal direction to the area covered by receiver line 
                                     # and subtracted from the lowest layer.
 # defines the receiver line 
 rangeRcv=800*U.m                    # width of the receveiver line
 numRcvPerLine=101                   # total number of receiver
-src_id=numRcvPerLine/2              # location of source in crossing array lines with in 0..numRcvInLine 
+src_id=numRcvPerLine//2              # location of source in crossing array lines with in 0..numRcvInLine 
 lumping = True
 src_dir=[0,1]
 
-# dommain dimension
+# domain dimension
 width_x=rangeRcv + 4*absorption_zone
 depth=sum(layers)
 if ne_z is None:
-    ne_z=int(ceil(depth*(2*frq)/min(v_P)*10))
+    ne_z=int(ceil(depth*(2*frq)/min(v_P)))
 ne_x=int(ceil(ne_z*width_x/depth))
 #
 # create receiver array 
@@ -97,8 +95,9 @@ for i in range(len(layers)):
 #
 # create domain:
 #
-domain=Rectangle(ne_x,ne_z,l0=width_x,l1=depth, 
-                diracPoints=src_locations, diracTags=src_tags)
+order = 5
+domain=Rectangle(order, ne_x,ne_z, l0=width_x, l1=depth, 
+            diracPoints=src_locations, diracTags=src_tags, d0=getMPISizeWorld())
 #
 # create the wavelet:
 #
@@ -108,7 +107,7 @@ wl=Ricker(frq)
 #
 #  set 
 #
-z=Function(domain).getX()[1]
+z=ReducedFunction(domain).getX()[1]
 z_top=0
 V_P=0
 V_S=0
@@ -119,14 +118,14 @@ Rho=0
 z_top=depth
 
 for l in range(len(layers)):
-       m=whereNonPositive(z-z_top)*wherePositive(z-(z_top-layers[l]))
-       V_P = V_P     * (1-m)  + v_P[l]  * m
-       V_S = V_S     * (1-m)  + v_S[l]  * m
-       Delta = Delta * (1-m)  + delta[l]* m
-       Eps = Eps     * (1-m)  + eps[l]  * m
-       Tilt = Tilt   * (1-m)  + tilt[l] * m
-       Rho = Rho     * (1-m)  + rho[l]  * m
-       z_top-=layers[l]
+    m=whereNonPositive(z-z_top)*wherePositive(z-(z_top-layers[l]))
+    V_P = V_P     * (1-m)  + v_P[l]  * m
+    V_S = V_S     * (1-m)  + v_S[l]  * m
+    Delta = Delta * (1-m)  + delta[l]* m
+    Eps = Eps     * (1-m)  + eps[l]  * m
+    Tilt = Tilt   * (1-m)  + tilt[l] * m
+    Rho = Rho     * (1-m)  + rho[l]  * m
+    z_top-=layers[l]
 
 sw=TTIWave(domain, V_P, V_S, wl, src_tags[0], source_vector = src_dir,
                 eps=Eps, delta=Delta, rho=Rho, theta=Tilt,
@@ -138,6 +137,11 @@ grploc=[ (x[0], 0.) for x in srclog.getX() ]
 tracer_x=SimpleSEGYWriter(receiver_group=grploc, source=srcloc, sampling_interval=sampling_interval, text='x-displacement')
 tracer_z=SimpleSEGYWriter(receiver_group=grploc, source=srcloc, sampling_interval=sampling_interval, text='z-displacement')
 
+if not tracer_x.obspy_available():
+    print("\nWARNING: obspy not available, SEGY files will not be written\n")
+elif getMPISizeWorld() > 1:
+    print("\nWARNING: SEGY files cannot be written with multiple processes\n")
+
 t=0.
 mkDir('output')
 n=0
@@ -149,10 +153,11 @@ while t < t_end:
         tracer_z.addRecord(srclog(u[1]))
         print("t=%s, src=%s: \t %s \t %s \t %s"%(t, wl.getValue(t),srclog(u[1])[0], srclog(u[1])[src_id], srclog(u[1])[-1]))
         if not n_out is None and n%n_out == 0:
-            print("time step %s writen to file %s."%(n_out, k_out))
+            print("time step %s written to file %s"%(n_out, "output/u_%d.silo"%(k_out,)))
             saveSilo("output/u_%d.silo"%(k_out,), u=u)
             k_out+=1
         n+=1
-tracer_x.write('output/lineX.sgy')
-tracer_z.write('output/lineZ.sgy')
+if tracer_x.obspy_available() and getMPISizeWorld() == 1:
+    tracer_x.write('output/lineX.sgy')
+    tracer_z.write('output/lineZ.sgy')
 print("calculation completed @ %s"%(time.asctime(),))
diff --git a/doc/examples/inversion/synthetic_VTI.py b/doc/examples/inversion/synthetic_VTI.py
index ea9fb5f..67de9b4 100644
--- a/doc/examples/inversion/synthetic_VTI.py
+++ b/doc/examples/inversion/synthetic_VTI.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,7 +11,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -25,7 +23,7 @@ __url__="https://launchpad.net/escript-finley"
 from esys.escript import *
 from esys.escript import unitsSI as U
 from esys.escript.pdetools import Locator
-from esys.ripley import Brick, Rectangle
+from esys.speckley import Brick, Rectangle
 from esys.weipa import saveSilo
 from esys.downunder import Ricker, VTIWave, SimpleSEGYWriter
 from math import ceil
@@ -36,13 +34,13 @@ DIM=2          # spatial dimension
 depth=1*U.km    # depth 
 v_p_top=1.5*U.km/U.sec
 v_p_bottom=3*U.km/U.sec
-absorption_zone=300*U.m
-ne_z=500.
+absorption_zone=100*U.m
+ne_z=50.
 
 reflector_at=0.5*depth
 
 
-t_end=0.4*U.sec
+t_end=0.008*U.sec #only this low for testing purposes
 frq=8.*U.Hz
 sampling_interval=4*U.msec
 numRcvPerLine=101
@@ -101,11 +99,13 @@ if DIM == 3:
 #
 # create domain:
 #
+order = 5
 if DIM == 2:
-   domain=Rectangle(ceil(ne_z*width_x/depth),ne_z,l0=width_x,l1=depth, 
+    domain=Rectangle(order, ceil(ne_z*width_x/depth),ne_z,l0=width_x,l1=depth, 
                 diracPoints=src_locations, diracTags=src_tags)
 else:
-   domain=Brick(ceil(ne_z*width_x/depth),ceil(ne_z*width_y/depth),ne_z,l0=width_x,l1=width_y,l2=depth,
+    domain=Brick(order, ceil(ne_z*width_x/depth), ceil(ne_z*width_y/depth),
+                ne_z, l0=width_x, l1=width_y, l2=depth,
                 diracPoints=src_locations, diracTags=src_tags)
 wl=Ricker(frq)
 
@@ -128,12 +128,23 @@ locEW=Locator(domain,rcvEW_locations)
 tracerEW_x=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D, sampling_interval=sampling_interval, text='x-displacement - east-west line')
 tracerEW_z=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D, sampling_interval=sampling_interval, text='z-displacement - east-west line')
 if DIM==3:
-   locNS=Locator(domain,rcvNS_locations)
-   tracerEW_y=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D, sampling_interval=sampling_interval, text='x-displacement - east-west line')
-   tracerNS_x=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D, sampling_interval=sampling_interval, text='x-displacement - north-south line')
-   tracerNS_y=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D, sampling_interval=sampling_interval, text='y-displacement - north-south line')
-   tracerNS_z=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D, sampling_interval=sampling_interval, text='z-displacement - north-south line')
-
+    locNS=Locator(domain,rcvNS_locations)
+    tracerEW_y=SimpleSEGYWriter(receiver_group=rgEW, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='x-displacement - east-west line')
+    tracerNS_x=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='x-displacement - north-south line')
+    tracerNS_y=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='y-displacement - north-south line')
+    tracerNS_z=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D,
+        sampling_interval=sampling_interval,
+        text='z-displacement - north-south line')
+if not tracerEW_x.obspy_available():
+    print("\nWARNING: obspy not available, SEGY files will not be written\n")
+elif getMPISizeWorld() > 1:
+    print("\nWARNING: SEGY files cannot be written with multiple processes\n")
 
 t=0.
 mkDir('tmp')
@@ -149,11 +160,12 @@ while t < t_end:
                tracerNS_z.addRecord(locNS(u[2]))
         print(t, locEW(u[DIM-1])[len(rgEW)//2-4:len(rgEW)//2+1], wl.getValue(t))
         #if n%5 == 0 : saveSilo("tmp/u_%d.silo"%(n/5,), u=u)
-        saveSilo("tmp/u_%d.silo"%(n,), u=u)
+        saveSilo("tmp/u_%d.silo"%(n,), u=u, cycle=n, time=t)
         n+=1
-tracerEW_x.write('lineEW_x.sgy')
-tracerEW_z.write('lineEW_z.sgy')
-if DIM == 3: 
+if tracerEW_x.obspy_available() and getMPISizeWorld() == 1:
+    tracerEW_x.write('lineEW_x.sgy')
+    tracerEW_z.write('lineEW_z.sgy')
+    if DIM == 3: 
         tracerEW_y.write('lineEW_y.sgy')
         tracerNS_x.write('lineNS_x.sgy')
         tracerNS_y.write('lineNS_y.sgy')
diff --git a/doc/examples/inversion/synthetic_sonic.py b/doc/examples/inversion/synthetic_sonic.py
index 5c1d4e3..9d3faf2 100644
--- a/doc/examples/inversion/synthetic_sonic.py
+++ b/doc/examples/inversion/synthetic_sonic.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,7 +11,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -25,7 +23,7 @@ __url__="https://launchpad.net/escript-finley"
 from esys.escript import *
 from esys.escript import unitsSI as U
 from esys.escript.pdetools import Locator
-from esys.finley import Brick, Rectangle
+from esys.speckley import Brick, Rectangle
 from esys.weipa import saveSilo
 from esys.downunder import Ricker, SonicWave, SimpleSEGYWriter
 from math import ceil
@@ -37,12 +35,12 @@ depth=1*U.km    # depth
 v_p_top=1.5*U.km/U.sec
 v_p_bottom=3*U.km/U.sec
 absorption_zone=300*U.m
-ne_z=400
+ne_z=40
 
 reflector_at=0.5*depth
 
 
-t_end=1.*U.sec
+t_end=0.008*U.sec #only this low for testing purposes
 frq=20.*U.Hz
 sampling_interval=4*U.msec
 numRcvPerLine=101
@@ -101,11 +99,12 @@ if DIM == 3:
 #
 # create domain:
 #
+order = 5
 if DIM == 2:
-   domain=Rectangle(ceil(ne_z*width_x/depth),ne_z,l0=width_x,l1=depth, 
+   domain=Rectangle(order, ceil(ne_z*width_x/depth),ne_z,l0=width_x,l1=depth, 
         diracPoints=src_locations, diracTags=src_tags)
 else:
-   domain=Brick(ceil(ne_z*width_x/depth),ceil(ne_z*width_y/depth),ne_z,l0=width_x,l1=width_y,l2=depth, 
+   domain=Brick(order, ceil(ne_z*width_x/depth),ceil(ne_z*width_y/depth),ne_z,l0=width_x,l1=width_y,l2=depth, 
         diracPoints=src_locations, diracTags=src_tags)
 wl=Ricker(frq)
 m=whereNegative(Function(domain).getX()[DIM-1]-reflector_at)
@@ -119,6 +118,10 @@ if DIM==3:
    locNS=Locator(domain,rcvNS_locations)
    tracerNS=SimpleSEGYWriter(receiver_group=rgNS, source=src_loc_2D, sampling_interval=sampling_interval)
 
+if not tracerEW.obspy_available():
+    print("\nWARNING: obspy not available, SEGY files will not be written\n")
+elif getMPISizeWorld() > 1:
+    print("\nWARNING: SEGY files cannot be written with multiple processes\n")
 
 t=0.
 mkDir('tmp')
@@ -130,5 +133,7 @@ while t < t_end:
     print(t, locEW(p)[:4], wl.getValue(t))
     if n%5 == 0 : saveSilo("tmp/u_%d.silo"%(n//5,), p=p)
     n+=1
-tracerEW.write('lineEW.sgy')
-if DIM == 3: tracerNS.write('lineNS.sgy')
+if tracerEW.obspy_available() and getMPISizeWorld() == 1:
+    tracerEW.write('lineEW.sgy')
+    if DIM == 3:
+        tracerNS.write('lineNS.sgy')
diff --git a/doc/examples/inversion/synthetic_sonicHTI.py b/doc/examples/inversion/synthetic_sonicHTI.py
index 53909e4..8cff398 100644
--- a/doc/examples/inversion/synthetic_sonicHTI.py
+++ b/doc/examples/inversion/synthetic_sonicHTI.py
@@ -43,12 +43,11 @@ azmths=[  0.,0.,0,  0, 0.]
 
 dt=0.5*U.msec
 
-ne_z=400
+ne_z=40
 
-ne_z=800
 dt=0.5*U.msec
 
-t_end=3.0*U.sec
+t_end=0.008*U.sec #only this low for testing purposes
 frq=15.*U.Hz
 tcenter=None
 sampling_interval=4*U.msec
@@ -156,6 +155,11 @@ loc=Locator(domain,rcv_locations)
 tracerP=SimpleSEGYWriter(receiver_group=rg, source=src_loc_2D, sampling_interval=sampling_interval, text='P')
 tracerQ=SimpleSEGYWriter(receiver_group=rg, source=src_loc_2D, sampling_interval=sampling_interval, text='Q')
 
+if not tracerP.obspy_available():
+    print("\nWARNING: obspy not available, SEGY files will not be written\n")
+elif getMPISizeWorld() > 1:
+    print("\nWARNING: SEGY files cannot be written with multiple processes\n")
+
 t=0.
 OUT_DIR="out%sm%smus"%(int(width_x/ne_x),int(sw.getTimeStepSize()*1000000))
 mkDir(OUT_DIR)
@@ -171,5 +175,7 @@ while t < t_end:
     print(t, wl.getValue(t)," :", Plog[0], Plog[srcEW], Plog[-1])
 timer1=time.time()-timer1
 print("time= %e sec; %s sec per step"%(timer1,timer1/max(sw.n,1)))
-tracerP.write(os.path.join(OUT_DIR,'lineP.sgy'))
-tracerQ.write(os.path.join(OUT_DIR,'lineQ.sgy'))
+
+if tracerP.obspy_available() and getMPISizeWorld() == 1:
+    tracerP.write(os.path.join(OUT_DIR,'lineP.sgy'))
+    tracerQ.write(os.path.join(OUT_DIR,'lineQ.sgy'))
diff --git a/doc/examples/inversion/test_commemi1.py b/doc/examples/inversion/test_commemi1.py
new file mode 100644
index 0000000..fa7051d
--- /dev/null
+++ b/doc/examples/inversion/test_commemi1.py
@@ -0,0 +1,450 @@
+##############################################################################
+#
+# Copyright (c) 2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+
+"""
+Test script to run test model COMMEMI-1
+"""
+
+from __future__ import print_function, division
+
+import matplotlib
+# The following line is here to allow automated testing. Remove or comment if
+# you would like to display the final plot in a window instead.
+matplotlib.use('agg')
+
+
+import esys.downunder.magtel2d as mt2d
+import numpy
+import datetime
+import esys.escript            as escript
+import esys.finley             as finley
+import esys.escript.pdetools   as pdetools
+
+#-------------------------------------------------------------
+# The following functions create the mesh used by this example
+#-------------------------------------------------------------
+
+
+def makeLayerCake(x_start,x_extent,z_layers):
+    # --------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which sets up a 2D model with N layers.
+    # 
+    # ARGUMENTS:                                                              
+    # ----------
+    # x_start             :: start coordinate of mesh.
+    # x_extent            :: horizontal extent of mesh.
+    # z_layers            :: list with interface coordinates.
+    #
+    # RETURNS:
+    # --------
+    # borders             :: borders of layers. 
+    # air_earth_interface :: line at the air/earth interface.
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa, 
+    # University of Queensland
+    #
+    #
+    # HISTORY:
+    # --------
+    #
+    # --------------------------------------------------------------------------
+
+    import esys.pycad   as pycad     # @UnresolvedImport
+    import esys.weipa   as weipa     # @UnresolvedImport    
+    import esys.finley  as finley    # @UnresolvedImport
+    import esys.escript as escript   # @UnresolvedImport
+
+         
+    # --------------------------------------------------------------------------
+    # Point definitions.
+    # --------------------------------------------------------------------------
+         
+    # Loop through all layers and define the vertices at all interfaces.
+    scale = 1.0
+    points = []
+    for i in range(0,len(z_layers)):
+            # Adjust scale at corners of air/earth interface:
+            if z_layers[i] == 0:
+                scale = 0.15
+            else:
+                scale = 1.0
+            points.append( pycad.Point(x_start           , z_layers[i], 0.0, local_scale = scale) ) # Left-Corner.     
+            points.append( pycad.Point(x_start + x_extent, z_layers[i], 0.0, local_scale = scale) ) # Right-Corner. 
+
+
+    # --------------------------------------------------------------------------
+    # Line definitions.
+    # --------------------------------------------------------------------------
+
+    # Now connect the points to define the horizontal lines for all interfaces:
+    hlines = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-1:
+            hlines.append( pycad.Line(points[i],points[i+1]) )     
+    
+    # Now connect the points to define the vertical lines for all interfaces:
+    vlines_left = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-3:
+            vlines_left.append( pycad.Line(points[i],points[i+2]) )     
+
+    vlines_right = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-4:
+            vlines_right.append( pycad.Line(points[i+1],points[i+3]) )     
+
+
+
+    # --------------------------------------------------------------------------
+    # Curveloop and Area definitions.
+    # --------------------------------------------------------------------------
+
+    # Join line segments for each layer.          
+    borders = []
+    for i in range(0,len(z_layers)-1):
+        border = [ hlines[i],vlines_right[i],-hlines[i+1],-vlines_left[i] ]
+        borders.append( pycad.CurveLoop( border) )       
+
+
+
+    # --------------------------------------------------------------------------
+    # Return values.
+    # --------------------------------------------------------------------------
+
+    # Explicitly specify the air-earth-boundary:
+    air_earth_interface = hlines[1]
+    
+    return borders, air_earth_interface
+                                      
+#_______________________________________________________________________________
+
+
+
+
+def setupMesh(mode, x_start, x_extent, a_extent, z_layers, anomaly_coord, elem_sizes):
+    # --------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which sets up the COMMEMI-1 mesh.
+    # 
+    #
+    # ARGUMENTS:                                                              
+    # ----------
+    # mode           :: TE or TM mode.
+    # x_start        :: horizontal start-point mesh.
+    # x_extent       :: horizontal extent of mesh.
+    # a_extent       :: vertical extent of air-layer.
+    # z_layers       :: list with coordinates of top-interfaces in Z-direction, incl. basement.
+    # anomaly_coord  :: dictionary with coordinate tuples of anomalies, counterclockwise.
+    # elem_sizes     :: mesh element sizes, large, normal, small. 
+    #
+    # RETURNS:
+    # --------
+    # <Nothing> A mesh file is written to the output folder.
+    # 
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa, 
+    # The University of Queensland
+    #
+    #
+    # HISTORY:
+    # --------
+    #
+    # --------------------------------------------------------------------------
+
+
+
+    # --------------------------------------------------------------------------
+    # Imports.
+    # --------------------------------------------------------------------------
+    
+    # System imports.
+    import math
+    
+    # Escript modules.
+    import esys.pycad              as pycad     # @UnresolvedImport   
+    import esys.finley             as finley    # @UnresolvedImport
+    import esys.escript            as escript   # @UnresolvedImport
+    import esys.weipa              as weipa     # @UnresolvedImport    
+    # <Note>: "@UnresolvedImport" ignores any warnings in Eclipse/PyDev (PyDev has trouble with external libraries).
+    
+    # Warn about magnetotelluric TM mode:
+    if mode.lower() == 'tm':
+        print("TM mode not yet supported")
+        return None
+        
+    # --------------------------------------------------------------------------
+    # Anomaly border.
+    # --------------------------------------------------------------------------
+     
+    #<Note>: define the anomaly which must be 'cut out' in the main mesh. 
+    
+    
+    # Prepare list to store the anomaly borders:
+    border_anomaly = []
+                
+    # Cycle anomaly dictionary and define the border for each.
+    for anomaly in anomaly_coord:
+        
+        # Extract the coordinates for current key:
+        coord = anomaly_coord[anomaly]
+            
+        # Points defining the anomaly from left-top.
+        points0 = []
+        for i in range( 0, len(coord) ):            
+            points0.append(pycad.Point(coord[i][0], coord[i][1], 0.0))
+    
+        # Define the line segments connecting the points.
+        lines0 = []
+        for i in range( 0, len(points0)-1 ):
+            lines0.append(pycad.Line(points0[i],points0[i+1]))
+        # Connect the last segment from end to start:    
+        lines0.append(pycad.Line(points0[-1], points0[0])) 
+        
+        # And define the border of the anomalous area.
+        border_anomaly.append( pycad.CurveLoop(*lines0) ) 
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Get the borders for each layer (air & host).
+    # --------------------------------------------------------------------------
+
+    # Borders around layers and the air/earth interface.
+    borders, air_earth_interface = makeLayerCake(x_start,x_extent,z_layers)
+
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Specification of number of elements in domains.
+    # --------------------------------------------------------------------------
+        
+    #<Note>: specifying the number of mesh elements is somewhat heuristic 
+    #        and is dependent on the mesh size and the anomaly sizes. 
+
+    coord = anomaly_coord["anomaly_1"]
+     
+    # First get the max-length of the anomaly to specify the number of elements.
+    length = max(( abs(coord[2][0]-coord[0][0]) ),  # X-length
+                 ( abs(coord[2][1]-coord[0][1]) ))  # Y-length                 
+    
+    # Specify number of elements in air, anomaly and on air/earth interface:        
+    nr_elements_air       = 1 * x_extent / elem_sizes["large"]
+    nr_elements_anomaly   = 2 * length   / elem_sizes["small"]
+    nr_elements_interface = 4 * x_extent / elem_sizes["small"]
+    #___________________________________________________________________________
+    
+    
+    
+     
+    # --------------------------------------------------------------------------
+    # Domain definitions.
+    # --------------------------------------------------------------------------
+
+    # Define the air & layer areas; note the 'holes' specifiers.
+    domain_air     = pycad.PlaneSurface( borders[0] )   
+    domain_host    = pycad.PlaneSurface( borders[1] , holes = [ border_anomaly[0] ] )    
+    domain_anomaly = pycad.PlaneSurface( border_anomaly[0] )    
+        
+    # Specify the element sizes in the domains and along the interface.
+    #<Note>: Sizes must be assigned in the order as they appear below:    
+    domain_air.setElementDistribution( nr_elements_air )         
+    domain_anomaly.setElementDistribution( nr_elements_anomaly ) 
+    air_earth_interface.setElementDistribution( nr_elements_interface )
+
+    # Ready to define the mesh-design..
+    design2D = pycad.gmsh.Design(dim=2, element_size=elem_sizes["normal"] , keep_files=False)
+    # ..and also specify the domains for tagging with property values later on:
+    design2D.addItems( pycad.PropertySet("domain_air"    , domain_air),
+                       pycad.PropertySet("domain_host"   , domain_host),
+                       pycad.PropertySet("domain_anomaly", domain_anomaly) ) 
+    
+    # Now define the unstructured finley-mesh..
+    model2D = finley.MakeDomain(design2D)
+    #___________________________________________________________________________
+
+
+    return model2D    
+    #___________________________________________________________________________
+    
+def generateCommemi1Mesh():
+    # --------------------------------------------------------------------------
+    # Geometric mesh parameters.
+    # --------------------------------------------------------------------------
+
+    # Mesh extents.
+    a_extent = 20000    # 20km - Vertical extent of air-layer in (m).
+    z_extent = 20000    # 20km - Vertical extent of subsurface in (m).
+    x_extent = 40000    # 40km - Horizontal extent of mesh in (m).
+
+    # Start point of mesh.
+    x_start = 0 #-x_extent/2.0
+
+    # Define interface locations in z-direction: top, air/earth, basement. 
+    z_layers    = [   a_extent, 0, -z_extent]
+
+    # Mesh elements sizes.
+    elem_sizes = { 
+                'large' : 10.00 * x_extent/100.0, # 5.00% of x_extent.
+                'normal': 05.00 * x_extent/100.0, # 2.50% of x_extent.
+                'small' : 00.50 * x_extent/100.0  # 0.25% of x_extent.
+                }
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Geometric anomaly parameters.
+    # --------------------------------------------------------------------------
+
+    # Extents of the rectangular 2D anomaly.
+    x_anomaly = 1000    # 1km - Horizontal extent of anomaly in (m).
+    z_anomaly = 2000    # 2km - Vertical extent of anomaly in (m).
+
+    # Coordinates of the rectangular 2D anomaly.
+    ya1 = -250                                    # Top
+    ya2 = -z_anomaly + ya1                        # Bottom
+    xa1 = x_start + x_extent/2.0 - x_anomaly/2.0  # Left
+    xa2 = x_start + x_extent/2.0 + x_anomaly/2.0  # Right
+
+    # Save in dictionary as a list of tuples from left-top corner, counterclockwise.
+    anomaly_coord = { 
+                    'anomaly_1': ([xa1,ya1],[xa1,ya2],[xa2,ya2],[xa2,ya1]) 
+                    }
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Setup the COMMEMI-1 mesh.
+    # --------------------------------------------------------------------------
+
+    # This creates the mesh and saves it to the output folder.
+    return setupMesh("TE", x_start, x_extent, a_extent, z_layers,  anomaly_coord, elem_sizes)
+    #___________________________________________________________________________
+
+
+
+#-------------------------------------------------------------
+# End of mesh set up functions
+#-------------------------------------------------------------
+
+
+
+# ---
+# Initialisations
+# ---
+
+# Get timing:
+startTime = datetime.datetime.now()
+
+# Mode (TE includes air-layer, whereas TM does not):
+mode = 'TE'
+
+# Read the mesh file and define the 'finley' domain:
+#mesh_file = "data/commemi1_te.fly"
+#domain = finley.ReadMesh(mesh_file, numDim=2)
+if escript.getEscriptParamInt('GMSH_SUPPORT'):
+    domain = generateCommemi1Mesh()
+
+# Sounding frequencies (in Hz):
+freq_def = {"high":1.0e+1,"low":1.0e+1,"step":1}
+# Frequencies will be mapped on a log-scale from
+# 'high' to 'low' with 'step' points per decade.
+# (also only one frequency must be passed via dict)
+
+# Step sizes for sampling along vertical and horizontal axis (in m):
+xstep=400
+zstep=200
+
+
+
+# ---
+# Resistivity model
+# ---
+
+# Resistivity values assigned to tagged regions (in Ohm.m):
+rho  = [
+        1.0e+14, # 0: air
+        100.0  , # 1: host
+          0.5    # 2: anomaly
+       ]
+
+# Tags must match those in the file:
+tags = ["domain_air", "domain_host", "domain_anomaly"]
+
+
+# ---
+# Layer definitions for 1D response at boundaries.
+# ---
+
+# List with resistivity values for left and right boundary.
+rho_1d_left  = [ rho[0], rho[1] ]
+rho_1d_rght  = [ rho[0], rho[1] ]
+
+# Associated interfaces for 1D response left and right (must match the mesh file).
+ifc_1d_left = [ 20000, 0, -20000]
+ifc_1d_rght = [ 20000, 0, -20000]
+
+# Save in dictionary with layer interfaces and resistivities left and right:
+ifc_1d = {"left":ifc_1d_left , "right":ifc_1d_rght}
+rho_1d = {"left":rho_1d_left , "right":rho_1d_rght}
+
+
+
+# ---
+# Run MT_2D
+# ---
+
+# Class options:
+mt2d.MT_2D._solver = "DIRECT"
+mt2d.MT_2D._debug   = False
+
+if mt2d.MT_2D._solver == "DIRECT" and escript.getMPISizeWorld() > 1:
+    print("Direct solvers and multiple MPI processes are not currently supported")
+elif mt2d.MT_2D._solver == "DIRECT" and not escript.getEscriptParamInt('PASO_DIRECT'):
+    print("escript was not built with support for direct solvers, aborting")
+elif not escript.getEscriptParamInt('GMSH_SUPPORT'):
+    print("This example requires gmsh")
+else:
+    # Instantiate an MT_2D object with required & optional parameters:
+    obj_mt2d = mt2d.MT_2D(domain, mode, freq_def, tags, rho, rho_1d, ifc_1d,
+            xstep=xstep ,zstep=zstep, maps=None, plot=True)
+
+    # Solve for fields, apparent resistivity and phase:
+    mt2d_fields, arho_2d, aphi_2d = obj_mt2d.pdeSolve()
+
+
+    #
+    print(datetime.datetime.now()-startTime)
+
+
+    print("Done!")
+
+
+
diff --git a/doc/examples/inversion/test_commemi4.py b/doc/examples/inversion/test_commemi4.py
new file mode 100644
index 0000000..cafbebf
--- /dev/null
+++ b/doc/examples/inversion/test_commemi4.py
@@ -0,0 +1,553 @@
+##############################################################################
+#
+# Copyright (c) 2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+"""
+Test script to run test model COMMEMI-4
+"""
+
+from __future__ import print_function, division
+
+import matplotlib
+# The following line is here to allow automated testing. Remove or comment if
+# you would like to display the final plot in a window instead.
+matplotlib.use('agg')
+
+
+import esys.downunder.magtel2d as mt2d
+import numpy
+import datetime
+import esys.escript            as escript
+import esys.finley             as finley
+import esys.escript.pdetools   as pdetools
+
+
+
+
+
+
+def setupMesh(mode, coord, elem_sizes):         
+    #---------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which setups the COMMEMI-4 mesh.
+    # 
+    #
+    # ARGUMENTS:                                                              
+    # ----------
+    # mode       :: TE or TM mode.
+    # coord      :: dictionary with coordinate tuples.
+    # elem_sizes :: mesh element sizes, large, normal, small. 
+    #
+    # RETURNS:
+    # --------
+    # <Nothing> A mesh file is written to the output folder.
+    # 
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa, 
+    # University of Queensland
+    #
+    #---------------------------------------------------------------------------
+
+
+
+    #---------------------------------------------------------------------------
+    # Imports.
+    #---------------------------------------------------------------------------
+        
+    import esys.pycad              as pycad     # @UnresolvedImport   
+    import esys.finley             as finley    # @UnresolvedImport
+    import esys.escript            as escript   # @UnresolvedImport
+    import esys.weipa              as weipa     # @UnresolvedImport    
+    # <Note>: "@UnresolvedImport" ignores any warnings in Eclipse/PyDev (PyDev has trouble with external libraries).
+
+
+
+    model = "COMMEMI-4"
+
+    print("Preparing the mesh " + model + " ...")
+    print("")
+    
+    # Warn about magnetotelluric TM mode:
+    if mode.lower() == 'tm':
+        print("TM mode not yet supported")
+        return
+
+
+        
+    # Path to write the mesh:
+    outpath = "../out/commemi4"
+    
+    
+        
+     
+    # --------------------------------------------------------------------------
+    # Initialisations.
+    # --------------------------------------------------------------------------
+
+    # Get coordinates from dictionary as list of tuples  
+    a0 = coord["air"]   
+    l1 = coord["lyr1"]  
+    s1 = coord["slab"]  
+    b1 = coord["basin"] 
+    l2 = coord["lyr2"]  
+    l3 = coord["lyr3"]  
+    
+    # Mesh length from top-boundary.
+    x_extent = abs(a0[3][0]-a0[0][0])
+    
+    
+
+        
+    # --------------------------------------------------------------------------
+    # Point definitions.
+    # --------------------------------------------------------------------------
+    
+    #<Note>: define all points spanning the mesh, anomalies and layers; 
+    #        note also shared domain points must be defined only once.
+ 
+ 
+    # Mesh top boundary.    
+    air = []
+    air.append( pycad.Point( *a0[0] ) )    # 0: left  , top    (@ boundary)
+    air.append( pycad.Point( *a0[3] ) )    # 3: right , top    (@ boundary)
+    
+    
+    # First-layer.
+    ly1 = []
+    ly1.append( pycad.Point( *l1[0] ) )    # 0: left  , top    (@ air/earth interface)                       
+    ly1.append( pycad.Point( *l1[1] ) )    # 1: left  , bottom (@ boundary)                       
+    ly1.append( pycad.Point( *l1[2] ) )    # 2: right , bottom (@ slab/basin)   
+    ly1.append( pycad.Point( *l1[3] ) )    # 3: right , bottom (@ boundary)     
+    ly1.append( pycad.Point( *l1[4] ) )    # 4: right , top    (@ air/earth interface)                 
+
+   
+    # Slab.
+    sl1 = []
+    sl1.append( ly1[1]                )    # 0: left  , top    (@ boundary)                       
+    sl1.append( pycad.Point( *s1[1] ) )    # 1: left  , bottom (@ boundary)                       
+    sl1.append( pycad.Point( *s1[2] ) )    # 2: right , bottom (@ slab/basin)                         
+    sl1.append( ly1[2]                )    # 3: right , top    (@ slab/basin)                       
+    
+    
+    # Basin.
+    bs1 = []
+    bs1.append( ly1[2]                )    # 0: left  , top    (@ slab/basin)
+    bs1.append( sl1[2]                )    # 1: left  , centre (@ slab/basin) 
+    bs1.append( pycad.Point( *b1[2] ) )    # 2: left  , bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[3] ) )    # 3: centre, bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[4] ) )    # 4: edge  , bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[5] ) )    # 5: right , bottom (@ boundary)
+    bs1.append( ly1[3]                )    # 6: right , top 
+    
+    
+    # Second-Layer.
+    ly2 = []
+    ly2.append( sl1[1]                )    # 0: left  , top    (@ lyr2/slab)
+    ly2.append( pycad.Point( *l2[1] ) )    # 1: left  , bottom (@ boundary) 
+    ly2.append( pycad.Point( *l2[2] ) )    # 2: right , bottom (@ boundary)                       
+    ly2.append( bs1[5]                )    # 3: right , top    (@ basin/boundary)                       
+    ly2.append( bs1[4]                )    # 4: edge  , top    (@ lyr2/basin)                      
+    ly2.append( bs1[3]                )    # 5: centre, top    (@ lyr2/basin)
+    ly2.append( bs1[2]                )    # 6: left  , top    (@ lyr2/basin)
+    ly2.append( sl1[2]                )    # 7: left  , centre (@ slab/basin) 
+    
+    
+    # Basement layer.       
+    ly3 = []    
+    ly3.append( ly2[1]                )    # 0: left  , top    (@ boundary)
+    ly3.append( pycad.Point( *l3[1] ) )    # 1: left  , bottom (@ boundary) 
+    ly3.append( pycad.Point( *l3[2] ) )    # 2: right , bottom (@ boundary) 
+    ly3.append( ly2[2]                )    # 3: right , top    (@ boundary)
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Line definitions.
+    # --------------------------------------------------------------------------
+
+    #<Note>: connects the points to define lines counterclockwise;    
+    #        shared lines are re-used to ensure that all domains  
+    #        are recognised as parts of the same mesh. 
+        
+    # Air.
+    ln0 = []
+    ln0.append( pycad.Line(air[0], ly1[0]) ) # 0 left-top     to left-bottom.
+    ln0.append( pycad.Line(ly1[0], ly1[4]) ) # 1 left-bottom  to right-bottom (air-earth interface).
+    ln0.append( pycad.Line(ly1[4], air[1]) ) # 2 right-bottom to right-top.
+    ln0.append( pycad.Line(air[1], air[0]) ) # 3 right-top    to left-top.
+        
+    # Top Layer.
+    ln1 = []
+    ln1.append( pycad.Line(ly1[0], ly1[1]) ) # 0 left-top         to left-bottom.   
+    ln1.append( pycad.Line(ly1[1], ly1[2]) ) # 1 left-bottom      to start-slab/basin.  
+    ln1.append( pycad.Line(ly1[2], ly1[3]) ) # 2 start-slab/basin to basin-boundary 
+    ln1.append( pycad.Line(ly1[3], ly1[4]) ) # 3 basin-boundary   to right-top.     
+    ln1.append( -ln0[1]                    ) # 4 right-top        to left-top.
+
+ 
+    # Slab.
+    ln2 = []
+    ln2.append( pycad.Line(sl1[0], sl1[1]) ) # 0 left-top     to left-bottom.   
+    ln2.append( pycad.Line(sl1[1], sl1[2]) ) # 1 left-bottom  to right-bottom.         
+    ln2.append( pycad.Line(sl1[2], sl1[3]) ) # 2 right-bottom to right-top.            
+    ln2.append( -ln1[1]                    ) # 3 right-top    to left-top
+
+
+    # Basin.
+    ln3 = []
+    ln3.append( -ln2[2]                    ) # 0 left-top         to left-centre.         
+    ln3.append( pycad.Line(bs1[1], bs1[2]) ) # 1 left-centre      to left-bottom.         
+    ln3.append( pycad.Line(bs1[2], bs1[3]) ) # 2 left-bottom      to mid-bottom.          
+    ln3.append( pycad.Line(bs1[3], bs1[4]) ) # 3 mid-bottom       to right-mid-top.       
+    ln3.append( pycad.Line(bs1[4], bs1[5]) ) # 4 right-mid-top    to right-bottom.        
+    ln3.append( pycad.Line(bs1[5], bs1[6]) ) # 5 right-bottom     to right-top.           
+    ln3.append( -ln1[2]                    ) # 6 right-top        to right-slab/basin.    
+    
+    
+    # Layer below.
+    ln4 = []
+    ln4.append( pycad.Line(ly2[0], ly2[1]) ) # 0 left-top      to left-bottom.        
+    ln4.append( pycad.Line(ly2[1], ly2[2]) ) # 1 left-bottom   to right-bottom.        
+    ln4.append( pycad.Line(ly2[2], ly2[3]) ) # 2 right-bottom  to right-top.            
+    ln4.append( -ln3[4]                    ) # 3 right-top     to right-mid-top.       
+    ln4.append( -ln3[3]                    ) # 4 right-mid-top to mid-bottom.          
+    ln4.append( -ln3[2]                    ) # 5 mid-bottom    to left-bottom.         
+    ln4.append( -ln3[1]                    ) # 6 left-bottom   to left-centre.         
+    ln4.append( -ln2[1]                    ) # 7 left-centre   to left-top.            
+        
+    # Basement layer.
+    ln5 = []
+    ln5.append( pycad.Line(ly3[0], ly3[1]) ) # 0 left-top     to left-bottom.
+    ln5.append( pycad.Line(ly3[1], ly3[2]) ) # 1 left-bottom  to right-bottom.
+    ln5.append( pycad.Line(ly3[2], ly3[3]) ) # 2 right-bottom to right-top.
+    ln5.append( -ln4[1]                    ) # 3 right-top    to left-top.
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Domain definitions.
+    # --------------------------------------------------------------------------
+    
+       
+    # First define all borders.       
+    borders = []   
+    borders.append( pycad.CurveLoop(*ln0) )   
+    borders.append( pycad.CurveLoop(*ln1) )   
+    borders.append( pycad.CurveLoop(*ln2) )   
+    borders.append( pycad.CurveLoop(*ln3) )    
+    borders.append( pycad.CurveLoop(*ln4) )    
+    borders.append( pycad.CurveLoop(*ln5) )    
+
+    # And next the domains.
+    domains = []
+    for i in range( len(borders) ):        
+        domains.append( pycad.PlaneSurface(borders[i]) ) 
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Set element sizes in domains.
+    # --------------------------------------------------------------------------
+    
+    # Horizontal extents of segments along slab and basin:
+    x_extents = []
+    x_extents.append( l1[2][0] - l1[0][0] ) # 0
+    x_extents.append( l1[3][0] - l1[2][0] ) # 1
+
+    # Number of elements in the air-domain, first-layer as well as slab- and basin-domain.
+    domains[0].setElementDistribution(     x_extent / elem_sizes["large"]   )
+    domains[1].setElementDistribution(     x_extent / (elem_sizes["small"]) )
+    domains[2].setElementDistribution( 0.4*x_extent / (elem_sizes["small"]) )
+    domains[3].setElementDistribution( 0.5*x_extent / (elem_sizes["small"]) )
+    #<Note> slab and basin multiplied by approximate ratio of their x_extent.
+    #___________________________________________________________________________
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Now define the gmsh 'design' object. 
+    #---------------------------------------------------------------------------
+
+    design2D = pycad.gmsh.Design(dim=2, element_size=elem_sizes['large'], keep_files=False)
+    
+    # Also specify the domains for tagging with property values later on:
+    design2D.addItems(   
+    pycad.PropertySet( "air"   , domains[0]) ,   
+    pycad.PropertySet( "lyr1"  , domains[1]) ,   
+    pycad.PropertySet( "slab"  , domains[2]) ,   
+    pycad.PropertySet( "basin" , domains[3]) ,
+    pycad.PropertySet( "lyr2"  , domains[4]) ,
+    pycad.PropertySet( "lyr3"  , domains[5]) )   
+    
+    # Now define the unstructured finley-mesh..
+    model2D = finley.MakeDomain(design2D)  
+    #___________________________________________________________________________
+
+
+    return model2D
+
+def generateCommemi4Mesh():
+    #---------------------------------------------------------------------------
+    # DESCRIPTION:
+    # ------------
+    # Script for preparing the COMMEMI-2 2D model.
+    #
+    # The COMMEMI-4 2D model consist of a 3-layered halfspace,
+    # hosting an anomalous horizontal slab and a basin-structure 
+    # in the first layer. 
+    #
+    # References:
+    # -----------
+    # See Franke A., p.89, 2003 (MSc. Thesis).
+    # 
+    # Antje Franke, "Zweidimensionale Finite-Elemente-Modellierung 
+    # niederfrequenter elektromagnetischer Felder in der Fernzone", 
+    # Diplomarbeit (MSc.), 2003, Technische Universtitaet Freiberg.
+    #
+    # --------------------------------------------------------------------------
+
+
+    #---------------------------------------------------------------------------
+    # Geometric mesh parameters.
+    # --------------------------------------------------------------------------
+
+    # Horizontal extent and start point of mesh.
+    a_extent = 50000   # 50km - Vertical extent of air-layer in (m).
+    z_extent = 50000   # 50km - Vertical extent of subsurface in (m).
+    x_extent = 60000   # 60km - Horizontal extent of model in (m).
+
+    # Start point of mesh.
+    x_start  = 0 #-x_extent/2.0
+
+    # Mesh elements sizes.
+    elem_sizes = { 
+                'large' : 4.00 * x_extent/100.0, # 
+                'normal': 2.00 * x_extent/100.0, # 
+                'small' : 0.25 * x_extent/100.0  # 
+                }
+   #____________________________________________________________________________
+
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Coordinate definitions.
+    # --------------------------------------------------------------------------
+
+    # X-coordinates of all domain corners (in order of appearance, left to right).
+    x0 = x_start                          # left         (@ boundary)
+    x1 = x_start + 24000                  # centre       (@ slab/basin)
+    x2 = x_start + 24000 + 8000           # edge-bottom  (@ slab/lyr1)
+    x3 = x_start + 24000 + 8000 + 3000    # edge-top     (@ slab/lyr1)
+    x4 = x_start + x_extent               # right        (@ boundary) 
+
+    # Y-coordinates of all domain corners (in order of appearance, top to bottom).
+    y0 = a_extent                         # top          
+    y1 = 0                                # centre       (@ air/earth)
+    y2 =-500                              # lyr1-bottom  (@ boundary-left) 
+    y3 =-1000                             # basin-bottom (@ boundary-right) 
+    y4 =-2000                             # slab-bottom  (@ boundary-left) 
+    y5 =-4000                             # basin-bottom (@ centre)  
+    y6 =-25000                            # lyr1-bottom 
+    y7 =-z_extent                         # bottom
+
+    # Save in dictionary as a list of tuples for each domain, from left-top corner, counterclockwise.
+    coord = {                                 
+            'air'  : ([x0, y0, 0],    # 0: left  , top
+                        [x0, y1, 0],    # 1: left  , bottom (@ air/earth)
+                        [x4, y1, 0],    # 2: right , bottom (@ air/earth)
+                        [x4, y0, 0]),   # 3: right , top
+                                        
+            'lyr1' : ([x0, y1, 0],    # 0: left  , top    
+                        [x0, y2, 0],    # 1: left  , bottom 
+                        [x1, y2, 0],    # 2: right , bottom (@ slab/basin)
+                        [x4, y2, 0],    # 3: right , bottom (@ boundary)
+                        [x4, y1, 0]),   # 4: right , top 
+                                            
+            'slab' : ([x0, y2, 0],    # 0: left  , top    
+                        [x0, y4, 0],    # 1: left  , bottom 
+                        [x1, y4, 0],    # 2: right , bottom (@ slab/basin)
+                        [x1, y2, 0]),   # 3: right , top    (@ slab/basin)
+                                    
+            'basin': ([x1, y2, 0],    # 0: left  , top    (@ slab/basin)
+                        [x1, y4, 0],    # 1: left  , centre (@ slab/basin) 
+                        [x1, y5, 0],    # 2: left  , bottom (@ lyr1/basin) 
+                        [x2, y5, 0],    # 3: centre, bottom (@ lyr1/basin)        
+                        [x3, y3, 0],    # 4: edge  , bottom (@ lyr1/basin)
+                        [x4, y3, 0],    # 5: right , bottom (@ boundary)
+                        [x4, y2, 0]),   # 6: right , top
+                                    
+            'lyr2' : ([x0, y4, 0],    # 0: left  , top    
+                        [x0, y6, 0],    # 1: left  , bottom 
+                        [x4, y6, 0],    # 2: right , bottom 
+                        [x4, y3, 0],    # 3: right , top    (@ basin/boundary)
+                        [x3, y3, 0],    # 4: edge  , top    (@ lyr2/basin)
+                        [x2, y5, 0],    # 5: centre, top    (@ lyr2/basin)
+                        [x1, y5, 0],    # 6: left  , top    (@ lyr2/basin)
+                        [x1, y4, 0]),   # 7: left  , centre (@ slab/basin)
+                                    
+            'lyr3' : ([x0, y6, 0],    # 0: left  , top    
+                        [x0, y7, 0],    # 1: left  , bottom 
+                        [x4, y7, 0],    # 2: right , bottom 
+                        [x4, y6, 0]),   # 3: right , top                   
+            }
+    #___________________________________________________________________________
+
+
+
+
+
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Setup the COMMEMI-4 mesh.
+    #---------------------------------------------------------------------------
+
+    # This creates the mesh and saves it to the output folder.
+    return setupMesh("TE", coord, elem_sizes)
+    #___________________________________________________________________________
+
+
+
+# ---
+# Initialisations
+# ---
+
+# Get timing:
+startTime = datetime.datetime.now()
+
+# Mode (TE includes air-layer, whereas TM does not):
+mode = 'TE'
+
+# Read the mesh file and define the 'finley' domain:
+#mesh_file = "commemi4_tm.fly"
+#domain = finley.ReadMesh(mesh_file, numDim=2)
+if escript.getEscriptParamInt('GMSH_SUPPORT'):
+    domain=generateCommemi4Mesh()
+
+# Sounding frequencies (in Hz):
+freq_def = {"high":1.0e+0,"low":1.0e-0,"step":1}
+# Frequencies will be mapped on a log-scale from
+# 'high' to 'low' with 'step' points per decade.
+# (also only one frequency must be passed via dict)
+
+# Step sizes for sampling along vertical and horizontal axis (in m):
+xstep=300
+zstep=250
+
+
+
+# ---
+# Resistivity model
+# ---
+
+# Resistivity values assigned to tagged regions (in Ohm.m):
+rho  = [
+        1.0e+14, # 0: air     1.0e-30
+        25.0   , # 1: lyr1    0.04
+        10.0   , # 2: slab    0.1
+        2.5    , # 3: basin   0.4
+        1000.0 , # 4: lyr2    0.001
+        5.0      # 5: lyr3    0.2
+       ]
+
+# Tags must match those in the file:
+tags = ["air", "lyr1", "slab", "basin", "lyr2", "lyr3"]
+
+# Optional user defined map of resistivity:
+def f4(x,z,r): return escript.sqrt(escript.sqrt(x*x+z*z))/r
+maps = [None, None, None, None, f4, None]
+
+
+
+# ---
+# Layer definitions for 1D response at boundaries.
+# ---
+
+# List with resistivity values for left and right boundary.
+rho_1d_left  = [ rho[0], rho[1], rho[2], rho[4], rho[5] ]
+rho_1d_rght  = [ rho[0], rho[1], rho[3], rho[4], rho[5] ]
+
+# Associated interfaces for 1D response left and right (must match the mesh file).
+ifc_1d_left = [ 50000, 0, -500, -2000, -25000, -50000]
+ifc_1d_rght = [ 50000, 0, -500, -1000, -25000, -50000]
+
+# Save in dictionary with layer interfaces and resistivities left and right:
+ifc_1d = {"left":ifc_1d_left , "right":ifc_1d_rght}
+rho_1d = {"left":rho_1d_left , "right":rho_1d_rght}
+
+
+
+# ---
+# Adjust parameters here for TM mode
+# ---
+
+# Simply delete first element from lists:
+if mode.upper() == 'TM':
+  tags.pop(0)
+  rho.pop(0)
+  rho_1d['left'].pop(0)
+  rho_1d['right'].pop(0)
+  ifc_1d['left'].pop(0)
+  ifc_1d['right'].pop(0)
+  if maps is not None:
+    maps.pop(0)
+
+
+
+# ---
+# Run MT_2D
+# ---
+
+# Class options:
+mt2d.MT_2D._solver = "DIRECT"
+mt2d.MT_2D._debug   = False
+
+if mt2d.MT_2D._solver == "DIRECT" and escript.getMPISizeWorld() > 1:
+    print("Direct solvers and multiple MPI processes are not currently supported")
+elif mt2d.MT_2D._solver == "DIRECT" and not escript.getEscriptParamInt('PASO_DIRECT'):
+    print("escript was not built with support for direct solvers, aborting")
+elif not escript.getEscriptParamInt('GMSH_SUPPORT'):
+    print("This example requires gmsh")
+else:
+
+    # Instantiate an MT_2D object with required & optional parameters:
+    obj_mt2d = mt2d.MT_2D(domain, mode, freq_def, tags, rho, rho_1d, ifc_1d,
+            xstep=xstep ,zstep=zstep, maps=None, plot=True)
+
+    # Solve for fields, apparent resistivity and phase:
+    mt2d_fields, arho_2d, aphi_2d = obj_mt2d.pdeSolve()
+
+
+    #
+    print("Runtime:", datetime.datetime.now()-startTime)
+
+
+    print("Done!")
+
+
+
diff --git a/doc/examples/usersguide/helmholtz.py b/doc/examples/usersguide/dirac.py
similarity index 50%
copy from doc/examples/usersguide/helmholtz.py
copy to doc/examples/usersguide/dirac.py
index 464140c..6997a54 100644
--- a/doc/examples/usersguide/helmholtz.py
+++ b/doc/examples/usersguide/dirac.py
@@ -1,8 +1,7 @@
-from __future__ import division
-from __future__ import print_function
+
 ##############################################################################
 #
-# Copyright (c) 2003-2015 by The University of Queensland
+# Copyright (c) 2012-2015 by The University of Queensland
 # http://www.uq.edu.au
 #
 # Primary Business: Queensland, Australia
@@ -14,42 +13,45 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+"""
+example to demonstrate the use of Dirac Delta functions 
+"""
+from __future__ import division, print_function
 
-__copyright__="""Copyright (c) 2003-2015 by The University of Queensland
+__copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
 __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
+
 from esys.escript import *
-from esys.escript.linearPDEs import LinearPDE
+from esys.escript.linearPDEs import LinearSinglePDE
+from esys.weipa import saveVTK
+
 try:
     from esys.finley import Rectangle
     HAVE_FINLEY = True
 except ImportError:
     HAVE_FINLEY = False
-from esys.weipa import saveVTK
-
+# generate domain:
 if not HAVE_FINLEY:
     print("Finley module not available")
 else:
-    #... set some parameters ...
-    kappa=1.
-    omega=0.1
-    eta=10.
-    #... generate domain ...
-    mydomain = Rectangle(l0=5.,l1=1.,n0=50, n1=10)
-    #... open PDE and set coefficients ...
-    mypde=LinearPDE(mydomain)
-    mypde.setSymmetryOn()
-    n=mydomain.getNormal()
-    x=mydomain.getX()
-    mypde.setValue(A=kappa*kronecker(mydomain),D=omega,Y=omega*x[0], \
-                   d=eta,y=kappa*n[0]+eta*x[0])
-    #... calculate error of the PDE solution ...
-    u=mypde.getSolution()
-    print("error is ",Lsup(u-x[0]))
-    # output should be similar to "error is 1.e-7"
-    saveVTK("x0.vtu",sol=u)
- 
+    mydomain=Rectangle(30,30, l0=3, l1=2, 
+                diracPoints=[(1.,1.), (2.,1.)],  diracTags=['in', 'out'])
+    # fix the solution on the boundary
+    x = mydomain.getX()
+    gammaD = whereZero(x[0])+whereZero(x[1])+whereZero(x[0]-3.)+whereZero(x[1]-2.)
+    # fix the solution on the boundary
+    s=Scalar(0., DiracDeltaFunctions(mydomain))
+    s.setTaggedValue('in', +1.)
+    s.setTaggedValue('out', -1.)
+    # define PDE and get its solution u
+    mypde = LinearSinglePDE(domain=mydomain)
+    mypde.setValue(q=gammaD, A=kronecker(2), y_dirac=s)
+    u = mypde.getSolution()
+    print("Solution = ",str(u))
+    # write u to an external file
+    saveVTK("u.vtu",sol=u)
diff --git a/doc/examples/usersguide/fluid.py b/doc/examples/usersguide/fluid.py
index 419c5f4..0686bba 100644
--- a/doc/examples/usersguide/fluid.py
+++ b/doc/examples/usersguide/fluid.py
@@ -1,4 +1,3 @@
-from __future__ import division, print_function
 ##############################################################################
 #
 # Copyright (c) 2008-2015 by The University of Queensland
@@ -17,6 +16,7 @@ from __future__ import division, print_function
 ########      August 2008      ########
 ##########    Leon Graham    ########## 
 ## Newtonian fluid using StokesProblemCartesian class##
+from __future__ import division, print_function
 
 from esys.escript import *
 try:
diff --git a/doc/examples/usersguide/helmholtz.py b/doc/examples/usersguide/helmholtz.py
index 464140c..1fe67bf 100644
--- a/doc/examples/usersguide/helmholtz.py
+++ b/doc/examples/usersguide/helmholtz.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/usersguide/int_save.py b/doc/examples/usersguide/int_save.py
index 22c925a..d693c2a 100644
--- a/doc/examples/usersguide/int_save.py
+++ b/doc/examples/usersguide/int_save.py
@@ -37,12 +37,12 @@ if not HAVE_FINLEY:
     print("Finley module not available")
 else:
 
-    n=4		#Change this to whatever you like
+    n=4         #Change this to whatever you like
     r=Rectangle(n,n)
     x=r.getX()
     x0=x[0]
     x1=x[1]    #we'll use this later
-    toobig=100	#An exception will be thrown if interpolation produces a value larger than this
+    toobig=100  #An exception will be thrown if interpolation produces a value larger than this
 
     #First one dimensional interpolation
 
@@ -56,7 +56,7 @@ else:
     minval=0
     maxval=1
 
-    step=sup(maxval-minval)/numslices	#The width of the gap between entries in the table
+    step=sup(maxval-minval)/numslices   #The width of the gap between entries in the table
 
     result=interpolateTable(sine_table, x0, minval, step, toobig)
 
diff --git a/doc/examples/usersguide/lame.py b/doc/examples/usersguide/lame.py
index 1f2d749..e7a9ee1 100644
--- a/doc/examples/usersguide/lame.py
+++ b/doc/examples/usersguide/lame.py
@@ -1,3 +1,20 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+from __future__ import division, print_function
+
 #set up domain and symbols
 from esys.escript import *
 from esys.finley import Rectangle
diff --git a/doc/examples/usersguide/poisson_matplotlib.py b/doc/examples/usersguide/poisson_matplotlib.py
index 9d5e306..c43377c 100644
--- a/doc/examples/usersguide/poisson_matplotlib.py
+++ b/doc/examples/usersguide/poisson_matplotlib.py
@@ -1,4 +1,3 @@
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,7 +12,7 @@ from __future__ import division
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/examples/usersguide/split.py b/doc/examples/usersguide/split.py
new file mode 100644
index 0000000..dd343f9
--- /dev/null
+++ b/doc/examples/usersguide/split.py
@@ -0,0 +1,26 @@
+from esys.escript import *
+from esys.escript.linearPDEs import Poisson
+from esys.ripley import Rectangle 
+
+# Each node is in its own world
+sw=SplitWorld(getMPISizeWorld())
+buildDomains(sw, Rectangle, 100, 100)
+
+#describe the work we want to do
+# In this case we solve a Poisson equation
+def task(self, **kwargs):
+    v=kwargs['v']
+    dom=self.domain
+    pde=Poisson(dom)
+    x=dom.getX()
+    gammaD=whereZero(x[0])+whereZero(x[1])
+    pde.setValue(f=v, q=gammaD)
+    soln=pde.getSolution()
+    soln.dump('soln%d.ncdf'%v)
+
+# Now we add some jobs
+for i in range(1,20):
+    addJob(sw, FunctionJob, task, v=i)
+# Run them
+sw.runJobs() 
+
diff --git a/doc/examples/usersguide/voxet_reader.py b/doc/examples/usersguide/voxet_reader.py
new file mode 100644
index 0000000..7971fbc
--- /dev/null
+++ b/doc/examples/usersguide/voxet_reader.py
@@ -0,0 +1,220 @@
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+import os
+from esys.downunder import CartesianReferenceSystem
+from esys.escript import ReducedFunction
+from esys.ripley import readBinaryGrid, BYTEORDER_BIG_ENDIAN, DATATYPE_FLOAT32, DATATYPE_FLOAT64
+
+def readVoxet(domain, filename, voproperty=1, origin=None, fillValue=0.,
+              referenceSystem=CartesianReferenceSystem()):
+    """
+    Reads a single property from a GOCAD Voxet file and returns a data
+    object on the given domain with the property data.
+    Restrictions:
+    - Voxet origin in UVW space (i.e. AXIS_MIN) needs to be [0,0,0]
+    - samples size must be 4 (float32) or 8 (float64)
+    - data type must be IEEE
+    - format must be RAW
+    - domain resolution must be (approximately) a multiple of voxet resolution
+
+    :param domain: the domain to use for data (must be a ripley domain)
+    :type domain: `Domain`
+    :param filename: Voxet header filename (usually ends in .vo)
+    :type filename: ``string``
+    :param voproperty: identifier of the property to read. Either the numeric
+                       property ID, the property name, or the filename of the
+                       property data.
+    :type voproperty: ``int`` or ``string``
+    :param origin: if supplied will override the Voxet origin as read from the
+                   file.
+    :type origin: ``list`` or ``tuple`` or ``None``
+    :param fillValue: value to use for cells that are not covered by property
+                      data (if applicable)
+    :type fillValue: ``float``
+    :param referenceSystem: coordinate system of domain. Used to scale vertical
+                            axis accordingly
+    :type referenceSystem: `ReferenceSystem`
+    """
+    header=open(filename).readlines()
+    if not header[0].startswith('GOCAD Voxet'):
+        raise ValueError("Voxet header not found. Invalid Voxet file?!")
+    NE=None
+    axis_uvw=[None,None,None]
+    axis_min=[0.,0.,0.]
+    axis_max=[1.,1.,1.]
+    # props[id]=[name,file,datatype]
+    props={}
+    for line in header:
+        if line.startswith('AXIS_O '):
+            if origin is None:
+                origin=[float(i) for i in line.split()[1:4]]
+        elif line.startswith('AXIS_U '):
+            u=[float(i) for i in line.split()[1:4]]
+            if (u[1] != 0) or (u[2] != 0):
+                raise ValueError('This coordinate system is not supported')
+            axis_uvw[0]=u[0]
+        elif line.startswith('AXIS_V '):
+            v=[float(i) for i in line.split()[1:4]]
+            if (v[0] != 0) or (v[2] != 0):
+                raise ValueError('This coordinate system is not supported')
+            axis_uvw[1]=v[1]
+        elif line.startswith('AXIS_W '):
+            w=[float(i) for i in line.split()[1:4]]
+            if (w[0] != 0) or (w[1] != 0):
+                raise ValueError('This coordinate system is not supported')
+            axis_uvw[2]=w[2]
+        elif line.startswith('AXIS_MIN '):
+            axis_min=[float(i) for i in line.split()[1:4]]
+            if axis_min != [0,0,0]:
+                raise ValueError('AXIS_MIN != [0,0,0] is not supported')
+        elif line.startswith('AXIS_MAX '):
+            axis_max=[float(i) for i in line.split()[1:4]]
+        elif line.startswith('AXIS_N '):
+            NE=[int(i) for i in line.split()[1:4]]
+        elif line.startswith('PROPERTY '):
+            propid=int(line.split()[1])
+            if not propid in props:
+                props[propid]=[None,None,None]
+            props[propid][0]=line.split()[2].strip()
+        elif line.startswith('PROP_ESIZE '):
+            propid=int(line.split()[1])
+            t=int(line.split()[2])
+            if t==4:
+                props[propid][2]=DATATYPE_FLOAT32
+            elif t==8:
+                props[propid][2]=DATATYPE_FLOAT64
+            else:
+                raise ValueError('Unsupported data size '+t)
+        elif line.startswith('PROP_ETYPE '):
+            t=line.split()[2].strip()
+            if t != 'IEEE':
+                raise ValueError('Unsupported data type '+t)
+        elif line.startswith('PROP_FORMAT '):
+            t=line.split()[2].strip()
+            if t != 'RAW':
+                raise ValueError('Unsupported data format '+t)
+        elif line.startswith('PROP_OFFSET '):
+            dataoffset=int(line.split()[2])
+            if dataoffset != 0:
+                raise ValueError('data offset != 0 not supported')
+        elif line.startswith('PROP_FILE '):
+            propid=int(line.split()[1])
+            props[propid][1]=line.split()[2].strip()
+
+    if (axis_uvw[0] is None) or (axis_uvw[1] is None) or (axis_uvw[2] is None)\
+            or (NE is None) or (origin is None):
+        raise ValueError('Could not determine data configuration. Invalid file?!')
+    if len(props)==0:
+        raise ValueError('No properties found.')
+
+    # voxets have these conventions:
+    # AXIS_N = number of samples (=cells!) in each dimension
+    # AXIS_UVW * AXIS_MAX = voxet length in each dimension
+    # AXIS_O = origin of voxet (cell centres!)
+    # see also http://paulbourke.net/dataformats/gocad/gocad.pdf
+
+    length = [axis_uvw[i]*axis_max[i] for i in range(3)]
+
+    # modify length and origin to account for the fact that Voxet cells are
+    # centred at the data points, i.e.:
+    # BEFORE:                   AFTER:
+    #
+    #       O----length---->|        O------length------>|
+    #      ___________________        ___________________
+    #     | * | * | * | * | * |      | * | * | * | * | * |
+    #      -------------------        -------------------
+
+    for i in range(3):
+        dz = length[i] / (NE[i]-1)
+        origin[i] -= dz/2.
+        length[i] += dz
+
+    if referenceSystem.isCartesian():
+        v_scale=1.
+    else:
+        v_scale=1./referenceSystem.getHeightUnit()
+
+    origin[-1] = origin[-1]*v_scale
+
+    # retrieve domain configuration so we know where to place the voxet data
+    gridorigin, gridspacing, gridNE = domain.getGridParameters()
+
+    # determine base location of this dataset within the domain
+    first=[int((origin[i]-gridorigin[i])/gridspacing[i]) for i in range(domain.getDim())]
+
+    # determine the resolution difference between domain and data.
+    # If domain has twice the resolution we can double up the data etc.
+    multiplier=[int(round((abs(length[i])/NE[i])/gridspacing[i])) for i in range(domain.getDim())]
+
+    # NOTE: Depending on your data you might have to multiply your vertical
+    # multiplier by 1000. to convert km in meters.
+    #multiplier[-1] = int(multiplier[-1] * v_scale * 1000.)
+    multiplier[-1] = int(multiplier[-1] * v_scale)
+
+    datatype=None
+    propfile=None
+    for pid in props.keys():
+        p=props[pid]
+        if (isinstance(voproperty, int) and pid == voproperty) or \
+           (isinstance(voproperty, str) and (p[0]==voproperty or p[1]==voproperty)):
+            datatype=p[2]
+            name=p[1]
+            #remove quotes which GoCAD introduces for filenames with spaces
+            if name.startswith('"') and name.endswith('"'):
+                name=name[1:-1]
+            propfile=os.path.join(os.path.dirname(filename), name)
+            print("Voxet property file: %s"%propfile)
+            break
+
+    if propfile is None or datatype is None:
+        raise ValueError("Invalid property "+str(voproperty))
+
+    reverse=[0]*domain.getDim()
+    if axis_uvw[-1] < 0:
+        reverse[-1]=1
+
+    print("calling readBinaryGrid with first=%s, nValues=%s, multiplier=%s, reverse=%s"%(str(first),str(NE),str(multiplier),str(reverse)))
+    data=readBinaryGrid(propfile, ReducedFunction(domain), shape=(),
+            fill=fillValue, byteOrder=BYTEORDER_BIG_ENDIAN,
+            dataType=p[2], first=first, numValues=NE, multiplier=multiplier,
+            reverse=reverse)
+
+    return data
+
+
+if __name__ == "__main__":
+    from esys.escript import *
+    from esys.escript.linearPDEs import Poisson
+    from esys.ripley import Brick
+    from esys.weipa import saveSilo, saveVoxet
+
+    dom = Brick(l0=1.,l1=1.,n0=9, n1=9, n2=9)
+    x = dom.getX()
+    gammaD = whereZero(x[0])+whereZero(x[1])
+    pde = Poisson(dom)
+    q = gammaD
+    pde.setValue(f=1, q=q)
+    u = pde.getSolution()
+    u=interpolate(u+dom.getX()[2], ReducedFunction(dom))
+    print(u)
+    saveVoxet('/tmp/poisson.vo', u=u)
+    print("-------")
+    dom = Brick(l0=1.,l1=1.,l2=4.,n0=18, n1=18, n2=36)
+    v=readVoxet(dom, '/tmp/poisson.vo', 'u', fillValue=0.5)
+    print(v)
+    #saveSilo('/tmp/poisson', v=v)
diff --git a/doc/examples/usersguide/wave.py b/doc/examples/usersguide/wave.py
index 38df722..f9feea3 100644
--- a/doc/examples/usersguide/wave.py
+++ b/doc/examples/usersguide/wave.py
@@ -1,5 +1,3 @@
-from __future__ import division
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/doc/install/source.tex b/doc/install/source.tex
index a0723c5..d4bf047 100644
--- a/doc/install/source.tex
+++ b/doc/install/source.tex
@@ -36,11 +36,14 @@ However, it also passes our tests when compiled using ``clang++''.
 
 Our current test compilers include:
 \begin{itemize}
- \item g++ 4.7.2, 4.9.1
- \item clang++ (OSX 10.9 default, OSX 10.10 default)
- \item intel icpc v14
+ \item g++ 4.7.2, 4.9.2
+ \item clang++ (OSX 10.10 default)
+ \item intel icpc v15
 \end{itemize}
 
+g++ 5 is known to have an optimisation bug that will cause \escript to
+crash. An alternate compiler will be required until this bug has been fixed.
+
 Note that:
 \begin{itemize}
  \item OpenMP will not function correctly for g++ $\leq$ 4.2.1 (and is not currently supported by clang).
@@ -223,7 +226,7 @@ yum install epel-release.noarch
 
 \noindent Install packages:
 \begin{shellCode}
-yum install netcdf-devel netcdf_cxx_devel gdal-python
+yum install netcdf-devel netcdf-cxx-devel gdal-python
 yum install python-devel numpy scipy scons boost-devel
 yum install python-matplotlib gcc gcc-c++
 yum install boost-python 
@@ -246,7 +249,9 @@ scons -j1 options_file=scons/templates/centos7_0_options.py
 \noindent Now go to Section~\ref{sec:cleanup} for cleanup.
 
 \subsection{Fedora}\label{sec:fedorasrc}
-These instructions were prepared using release $21.5$.
+These instructions were prepared using release $21.5$. Release $22$ by default
+uses gcc 5.2, which currently has an optimisation bug that will cause
+\escript to crash.
 
 \noindent Install packages
 \begin{shellCode}
@@ -445,7 +450,7 @@ Some other packages which might be useful include:
  \item Visit --- visualisation package. Can be used independently but our \texttt{weipa} library can make a Visit 
 plug-in to allow direct visualisation of escript files.
  \item gmsh --- meshing software used by our \texttt{pycad} library.
- \item mayavi --- another visualisation tool.
+ \item Mayavi2 --- another visualisation tool.
 \end{itemize}
 
 
diff --git a/doc/install/srcadditional.tex b/doc/install/srcadditional.tex
index a597f78..117062f 100644
--- a/doc/install/srcadditional.tex
+++ b/doc/install/srcadditional.tex
@@ -17,6 +17,6 @@ To perform visualizations you will need some additional tools.
 Since these do not need to be linked with any of the packages above, you can install versions available for your system, or build them from source.
 \begin{itemize}
 \item \file{ppmtompeg} and \file{jpegtopnm} from the \file{netpbm} suite - to build from source you also need \file{libjpeg} and its headers as well as \file{libpng}\footnote{libpng requires zlib to build} and its headers
-\item A tool to visualize VTK files - for example Mayavi or LLNL's VisIt.
+\item A tool to visualize VTK files - for example Mayavi2 or LLNL's VisIt.
 \end{itemize}
 
diff --git a/doc/install/verinfo.tex b/doc/install/verinfo.tex
deleted file mode 100644
index e509a29..0000000
--- a/doc/install/verinfo.tex
+++ /dev/null
@@ -1,7 +0,0 @@
-
-\newcommand{\relver}{development}
-\newcommand{\reldate}{\today}
-
-
-%\newcommand{\relver}{4.0}
-%\newcommand{\reldate}{\today}
diff --git a/doc/install/verinfo.tex b/doc/install/verinfo.tex
new file mode 120000
index 0000000..256af4d
--- /dev/null
+++ b/doc/install/verinfo.tex
@@ -0,0 +1 @@
+../verinfo.tex
\ No newline at end of file
diff --git a/doc/inversion/CookGravity.tex b/doc/inversion/CookGravity.tex
index 818993f..dbd27e3 100644
--- a/doc/inversion/CookGravity.tex
+++ b/doc/inversion/CookGravity.tex
@@ -509,7 +509,7 @@ execution of the script is aborted with an error message.
 In the final step of script~\ref{code: gravity1} the calculated density
 distribution is written to an external file.
 A popular file format used by several visualization packages such as
-\VisIt~\cite{VISIT} and \mayavi~\cite{MAYAVI} is the \VTK file format.
+\VisIt~\cite{VisIt} and \mayavi~\cite{mayavi} is the \VTK file format.
 The result of the inversion which has been named \verb|rho| can be written to
 the file \file{result.vtu} by adding the statement
 \begin{verbatim}
diff --git a/doc/inversion/defs.tex b/doc/inversion/defs.tex
index 7182adb..57311db 100644
--- a/doc/inversion/defs.tex
+++ b/doc/inversion/defs.tex
@@ -34,7 +34,7 @@
 %\newcommand{\CIHAN}[1]{\textbf{CIHAN: #1} }
 %\newcommand{\LG}[1]{\textbf{LG: #1} }
 \newcommand{\VisIt}{{\it VisIt}\index{visualization!VisIt}\index{VisIt}\xspace}
-\newcommand{\mayavi}{{\it mayavi}\index{visualization!mayavi}\index{mayavi}\xspace}
+\newcommand{\mayavi}{{\it Mayavi2}\index{visualization!mayavi}\index{mayavi}\xspace}
 \newcommand{\GOCAD}{{\it GOCAD}\index{GOCAD}\xspace}
 \newcommand{\VTK}{{\it VTK}\index{visualization!VTK}\index{VTK}\xspace}
 \newcommand{\SILO}{{\it SILO}\index{visualization!SILO}\index{SILO}\xspace}
diff --git a/doc/inversion/verinfo.tex b/doc/inversion/verinfo.tex
deleted file mode 100644
index e509a29..0000000
--- a/doc/inversion/verinfo.tex
+++ /dev/null
@@ -1,7 +0,0 @@
-
-\newcommand{\relver}{development}
-\newcommand{\reldate}{\today}
-
-
-%\newcommand{\relver}{4.0}
-%\newcommand{\reldate}{\today}
diff --git a/doc/inversion/verinfo.tex b/doc/inversion/verinfo.tex
new file mode 120000
index 0000000..256af4d
--- /dev/null
+++ b/doc/inversion/verinfo.tex
@@ -0,0 +1 @@
+../verinfo.tex
\ No newline at end of file
diff --git a/doc/sphinx_api/conf.py b/doc/sphinx_api/conf.py
index 2bd5fc7..f1a6cb7 100644
--- a/doc/sphinx_api/conf.py
+++ b/doc/sphinx_api/conf.py
@@ -11,6 +11,8 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
+from __future__ import division, print_function
+
 import sys, os
 
 # If extensions (or modules to document with autodoc) are in another directory,
@@ -21,7 +23,7 @@ import sys, os
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.0'	# For the autodoc_default_flags variable
+needs_sphinx = '1.0'    # For the autodoc_default_flags variable
 
 autodoc_default_flags=['show-inheritance', 'inherited-members', 'members', 'undoc-members']
 
diff --git a/doc/sphinx_api/genrst.py b/doc/sphinx_api/genrst.py
index 368eb18..17d01f1 100755
--- a/doc/sphinx_api/genrst.py
+++ b/doc/sphinx_api/genrst.py
@@ -1,5 +1,20 @@
 #!/usr/bin/env python
-from __future__ import print_function
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import division, print_function
 
 import os
 import inspect
diff --git a/doc/user/TutorialPDE.tex b/doc/user/TutorialPDE.tex
index 0097376..718b226 100644
--- a/doc/user/TutorialPDE.tex
+++ b/doc/user/TutorialPDE.tex
@@ -28,5 +28,6 @@ a way to ask questions.
 \input{wave}
 \input{heatedblock}
 \input{stokesflow}
-% \input{levelset}
 \input{slip}
+\input{dirac}
+% \input{levelset}
diff --git a/doc/user/changes.tex b/doc/user/changes.tex
index 43c8190..8c7fdc7 100644
--- a/doc/user/changes.tex
+++ b/doc/user/changes.tex
@@ -16,6 +16,23 @@
 \chapter{Changes from previous releases}
 \label{app:changes}
 
+\subsection*{4.0 to 4.1}
+\begin{itemize}
+  \item Added multi-resolution \ripley domains
+  \item The gmshReader now supports reading with multiple processes
+  \item Using the help() function on some domains is now more informative
+  \item User guide updated with information on use of Dirac points
+  \item Minimizer misfit now available via the callback function
+  \item Specifying use of a direct solver without a direct solver being available now raises an exception rather than silently default to a non-direct solver
+  \item Synthetic seismic examples for various wave types now included in the examples distributed
+  \item Reading NetCDF files when the default value in the file is nan no longer causes issues
+  \item Various documentation updates and fixes in user guide, install guide, and code documentation
+  \item Various compatibility fixes with Python3
+  \item HTI and VTIWave models now support setting a q value for custom boundary conditions
+  \item Using a MPI-enabled gmsh with an MPI build of \escript should now reliably wait for gmsh to end
+  \item Added an example for voxet reading with \ripley
+\end{itemize}
+
 \subsection*{3.4.2 to 4.0}
 \begin{itemize}
   \item New spectral element domain, \speckley
diff --git a/doc/user/diffusion.tex b/doc/user/diffusion.tex
index b293f0e..f40fba5 100644
--- a/doc/user/diffusion.tex
+++ b/doc/user/diffusion.tex
@@ -268,7 +268,7 @@ the \finley PDE solver:
 \end{python}
 To visualize the solution `x0.vtu' you can use the command
 \begin{verbatim}
-mayavi -d x0.vtu -m Surface
+mayavi2 -d x0.vtu -m Surface
 \end{verbatim}
 and it is easy to see that the solution $T=x_{0}$ is calculated.
  
@@ -404,7 +404,7 @@ The files contain the temperature distributions at time steps $1, 2, i,
 \fig{DIFFUSION FIG 2} shows the result for some selected time steps.
 An easy way to visualize the results is the command
 \begin{verbatim}
-mayavi -d T.1.vtu -m Surface
+mayavi2 -d T.1.vtu -m Surface
 \end{verbatim}
-Use the \emph{Configure Data} window in mayavi to move forward and backward in time.
+Use the \emph{Configure Data} window in \mayavi to move forward and backward in time.
 
diff --git a/doc/user/dirac.tex b/doc/user/dirac.tex
new file mode 100644
index 0000000..3ddc342
--- /dev/null
+++ b/doc/user/dirac.tex
@@ -0,0 +1,84 @@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Copyright (c) 2003-2015 by The University of Queensland
+% http://www.uq.edu.au
+%
+% Primary Business: Queensland, Australia
+% Licensed under the Open Software License version 3.0
+% http://www.opensource.org/licenses/osl-3.0.php
+%
+% Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+% Development 2012-2013 by School of Earth Sciences
+% Development from 2014 by Centre for Geoscience Computing (GeoComp)
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Point Sources}
+\label{POINT SOURCES}
+In the chapter we will show the usage of point sources and sinks. 
+A simple example is a blockm of material with heat source at a location
+$p$ and heat sink at a location $q$. Under the assumption of a constant
+conductivity the steady heat diffusion equation for the temperature $u$
+is given as 
+\begin{equation}
+        -u_{,ii} = s(p_{in}) \; \delta_{p_{in}} + s(p_1) \; \delta_{p_1}
+        \label{EX:DIRAC1}
+\end{equation}
+where $\delta_{p_{in}}$ and $\delta_{p_{out}}$ refer to the Dirac $\delta$-function and
+$s({p_{in}})$ and $s({p_{out}})$ define the heat production and heat extraction rates at 
+locations ${p_{in}}$ and ${p_{out}}$, respectively.
+
+First the locations of  point sources and sinks need to be added to the 
+domain. This is done at generation time:  
+\begin{python}
+mydomain=Rectangle(30,30, l0=3, l1=2, 
+                diracPoints=[(1.,1.), (2.,1.)],  diracTags=['in', 'out'])
+\end{python}
+In this case the points are located at $p_{in}=(1.,1.)$ and $p_{out}=(2.,1.)$.
+For easier reference the points are tagged with the name \var{in} and \var{out}. 
+
+The values at the point source locations are defined using a \Data object. One possible
+way to define the values at the locations defined through the \var{diracPoint} list is using tagging: 
+\begin{python}
+s=Scalar(0., DiracDeltaFunctions(mydomain))
+s.setTaggedValue('in', +1.)
+s.setTaggedValue('out', -1.)
+\end{python}
+Here we set value $1$ at locations tagged with \var{in} (in this case this is just point $p_{in}=(1.,1.)$ ) and value $-1$ at locations tagged with \var{out} (in this case this is just point $p_{out}=(2.,1.)$). The point source in the right hande side of the PDE~\eqn{EX:DIRAC1} is then set as
+\begin{python}
+mypde = LinearSinglePDE(domain=mydomain)
+mypde.setValue(y_dirac=s)
+\end{python}
+Under the assumption that we fix the temperature to zero on the entire boundary 
+the script to solve the PDE is given as follows:
+\begin{python}
+from esys.escript import *
+from esys.weipa import *
+from esys.finley import Rectangle
+from esys.escript.linearPDEs import LinearSinglePDE
+
+mydomain=Rectangle(30,30, l0=3, l1=2, 
+                diracPoints=[(1.,1.), (2.,1.)],  diracTags=['in', 'out'])
+x = mydomain.getX()
+gammaD = whereZero(x[0])+whereZero(x[1])+whereZero(x[0]-3.)+whereZero(x[1]-2.)
+
+s=Scalar(0., DiracDeltaFunctions(mydomain))
+s.setTaggedValue('in', +1.)
+s.setTaggedValue('out', -1.)
+
+mypde = LinearSinglePDE(domain=mydomain)
+mypde.setValue(q=gammaD, A=kronecker(2), y_dirac=s)
+u = mypde.getSolution()
+saveVTK("u.vtu",sol=u)
+\end{python}
+%
+Result is shown in Figure~\ref{FIG:EX:DIRAC}.
+%
+\begin{figure}[ht]
+\center
+\includegraphics[height=5cm]{diracplot}
+%\includegraphics[scale=0.25]{stokes-fluid-colorbar}
+\caption{Results diffusion problem with nodal source and sink.}
+\label{FIG:EX:DIRAC}
+\end{figure}
+
diff --git a/doc/user/escript.tex b/doc/user/escript.tex
index f4c5676..5277a99 100644
--- a/doc/user/escript.tex
+++ b/doc/user/escript.tex
@@ -54,6 +54,7 @@ The following generators for function spaces on a \Domain \var{mydomain} are com
 \item \var{ContinuousFunction(mydomain)}: continuous functions, e.g. a temperature distribution
 \item \var{Function(mydomain)}: general functions which are not necessarily continuous, e.g. a stress field
 \item \var{FunctionOnBoundary(mydomain)}: functions on the boundary of the domain, e.g. a surface pressure
+\item \var{DiracDeltaFunctions(mydomain)}: functions defined on a set of points
 \item \var{FunctionOnContact0(mydomain)}: functions on side $0$ of a discontinuity
 \item \var{FunctionOnContact1(mydomain)}: functions on side $1$ of a discontinuity
 \end{itemize}
diff --git a/doc/user/esys.bib b/doc/user/esys.bib
index d5e8ef5..ccc48cc 100644
--- a/doc/user/esys.bib
+++ b/doc/user/esys.bib
@@ -1,528 +1,528 @@
+ at comment{x-kbibtex-personnameformatting=<%l><, %f>}
+
 Created by Kbib version 0.6.5
-\#Last modified: Wed May 20 10:08:05 2009
+#Last modified: Wed May 20 10:08:05 2009
 
- at Book{ SAAD,
-	author = "Y. Saad",
-	title = "{Iterative Methods for Sparse Linear Systems}",
+ at book{SAAD,
+	address = "20 Park Plaza, Boston, MA 02116, USA",
+	author = "Saad, Y.",
 	publisher = "PWS Publishing Company",
-	year = "1996",
-	address = "20 Park Plaza, Boston, MA 02116, USA"
+	title = "{Iterative Methods for Sparse Linear Systems}",
+	year = "1996"
 }
 
- at Book{ WEISS,
-	author = "R. Weiss",
-	title = "{Parameter-Free Iterative Linear Solvers}",
+ at book{WEISS,
+	address = "Berlin",
+	author = "Weiss, R.",
 	publisher = "Akademie Verlag",
-	year = "1996",
 	series = "{Mathematical Research, vol.\ 97}",
-	address = "Berlin"
+	title = "{Parameter-Free Iterative Linear Solvers}",
+	year = "1996"
 }
 
- at Book{ Multigrid,
-	author = {U. Trottenberg and C. W. Oosterlee and A. Sch{\"u}ller},
-	title = "{Multigrid}",
+ at book{Multigrid,
+	author = "Trottenberg, U. and Oosterlee, C. W. and Sch{\"u}ller, A.",
 	publisher = "Academic Press",
+	title = "{Multigrid}",
 	year = "2001"
 }
 
- at Book{ AMG,
-	author = "Y. Shapira",
-	title = "{Matrix-Based Multigrid}",
+ at book{AMG,
+	author = "Shapira, Y.",
 	publisher = "Springer",
+	title = "{Matrix-Based Multigrid}",
 	year = "2008"
 }
 
- at Book{ NumHand,
-	author = "P.~G. Ciarlet and J.~L. Lions",
-	title = "{Handbook of Numerical Analysis}",
+ at book{NumHand,
+	address = "Amsterdam",
+	author = "Ciarlet, P.~G. and Lions, J.~L.",
 	publisher = "North Holland",
-	year = "1991",
+	title = "{Handbook of Numerical Analysis}",
 	volume = "2",
-	address = "Amsterdam"
+	year = "1991"
 }
 
- at InProceedings{ NUMARRAY,
-	title = "{An Array Module for Python}",
+ at inproceedings{NUMARRAY,
+	author = "Miller, Todd and Hsu, Jin-Chung and Greenfield, Perry and White, Richard L.",
 	booktitle = "{Astronomical Data Analysis Software and Systems XI}",
-	author = "Todd Miller and Jin-Chung Hsu and Perry Greenfield and Richard L. White",
+	title = "{An Array Module for Python}",
 	year = "2001"
 }
 
- at Book{ Zienc,
-	author = "O.~C. Zienkiewicz",
-	title = "{The Finite Element Method in Engineering Science}",
-	publisher = "McGraw-Hill",
-	year = "1971",
+ at book{Zienc,
 	address = "London",
-	edition = "second"
+	author = "Zienkiewicz, O.~C.",
+	edition = "second",
+	publisher = "McGraw-Hill",
+	title = "{The Finite Element Method in Engineering Science}",
+	year = "1971"
 }
 
- at Article{ SUPG2,
-	author = "H.C. Elman and A. Ramage",
-	title = "{An Analysis of Smoothing Effects of Upwinding Strategies For The Convection-Diffusion Equation}",
+ at article{SUPG2,
+	author = "Elman, H.C. and Ramage, A.",
 	journal = "SIAM J. Numer. Anal.",
-	year = "2002",
-	volume = "40",
 	number = "1",
-	pages = "254--281"
+	pages = "254--281",
+	title = "{An Analysis of Smoothing Effects of Upwinding Strategies For The Convection-Diffusion Equation}",
+	volume = "40",
+	year = "2002"
 }
 
- at Book{ TURCOTTE2002,
-	author = "Donald L. Turcotte and Gerald Schubert",
-	title = "{Geodynamics}",
+ at book{TURCOTTE2002,
+	author = "Turcotte, Donald L. and Schubert, Gerald",
+	edition = "Second",
 	publisher = "Cambridge University Press",
-	year = 2002,
-	edition = "Second"
+	title = "{Geodynamics}",
+	year = 2002
 }
 
- at Article{ MORESI2003,
-	title = "{A {Lagrangian} integration point finite element method for large deformation modeling of viscoelastic geomaterials}",
-	author = "L. Moresi and F. Dufour and H.--B. Muhlhaus",
+ at article{MORESI2003,
+	author = "Moresi, L. and Dufour, F. and Muhlhaus, H.--B.",
 	journal = "Journal of Computational Physics",
-	pages = "476--497",
 	number = 184,
+	pages = "476--497",
+	title = "{A {Lagrangian} integration point finite element method for large deformation modeling of viscoelastic geomaterials}",
 	year = 2003
 }
 
- at Article{ GROSZ99c,
+ at article{GROSZ99c,
 	author = "L.~Grosz",
-	title = "{Preconditioning by Incomplete Block Elimination}",
 	journal = "J. of Num. Lin. Alg. with Appl.",
-	year = "2000",
-	note = "to appear"
+	note = "to appear",
+	title = "{Preconditioning by Incomplete Block Elimination}",
+	year = "2000"
 }
 
- at Book{ VTK,
-	author = "Inc {The Kitware}",
-	title = "{Visualization Toolkit User's Guide.}",
-	publisher = "Kitware, Inc publishers.",
-	year = "1971",
+ at book{VTK,
 	address = "London",
-	edition = "second"
+	author = "{The Kitware}, Inc",
+	edition = "second",
+	publisher = "Kitware, Inc publishers.",
+	title = "{Visualization Toolkit User's Guide.}",
+	year = "1971"
 }
 
- at Article{ BOURGOUIN2006,
-	author = "L. Bourgouin and H. Muhlhaus and A. J. Hale and A. Arsac",
-	title = "{Towards realistic simulations of lava dome growth using the level set method}",
+ at article{BOURGOUIN2006,
+	author = "Bourgouin, L. and Muhlhaus, H. and Hale, A. J. and Arsac, A.",
 	journal = "Acta Geotechnica",
-	year = "2006",
-	volume = "1",
 	number = "4",
-	pages = "225--236"
+	pages = "225--236",
+	title = "{Towards realistic simulations of lava dome growth using the level set method}",
+	volume = "1",
+	year = "2006"
 }
 
- at Article{ SUSSMAN1994,
-	author = "M. Sussman and P. Smereka and S. Osher",
-	title = "{A Level Set Approach for Computing Solutions to Incompressible Two-Phase Flow}",
+ at article{SUSSMAN1994,
+	author = "Sussman, M. and Smereka, P. and Osher, S.",
 	journal = "Journal of Computational Physics",
-	year = "1994",
+	pages = "146--159",
+	title = "{A Level Set Approach for Computing Solutions to Incompressible Two-Phase Flow}",
 	volume = "114",
-	pages = "146--159"
+	year = "1994"
 }
 
- at Article{ LIN2005,
-	author = "C. Lin and H. Lee and T. Lee and L. J. Weber",
-	title = "{A Level Set Characteristic Galerkin Finite Element Method for Free Surface Flows}",
+ at article{LIN2005,
+	author = "Lin, C. and Lee, H. and Lee, T. and Weber, L. J.",
 	journal = "International Journal for Numerical Methods in Fluids",
-	year = "2005",
+	pages = "521--547",
+	title = "{A Level Set Characteristic Galerkin Finite Element Method for Free Surface Flows}",
 	volume = "49",
-	pages = "521--547"
+	year = "2005"
 }
 
- at Article{ SUCKALE2008,
-	author = "J. Suckale and J. C. Have",
+ at article{SUCKALE2008,
+	author = "Suckale, J. and Have, J. C.",
+	pages = "1--43",
 	title = "{An alternative numerical model of buoyancy driven flows}",
-	year = "2008",
-	pages = "1--43"
+	year = "2008"
 }
 
- at Article{ GROSS2006,
-	author = "L. Gross and L. Bourgouin and A. J. Hale and H.-B Muhlhaus",
-	title = "{Interface Modeling in Incompressible Media using Level Sets in Escript}",
+ at article{GROSS2006,
+	author = "Gross, L. and Bourgouin, L. and Hale, A. J. and Muhlhaus, H.-B",
+	doi = "10.1016/j.pepi.2007.04.004",
 	journal = "Physics of the Earth and Planetary Interiors",
-	year = 2007,
-	volume = "163",
-	pages = "23--34",
 	month = "Aug.",
-	doi = "doi:10.1016/j.pepi.2007.04.004"
+	pages = "23--34",
+	title = "{Interface Modeling in Incompressible Media using Level Sets in Escript}",
+	volume = "163",
+	year = 2007
 }
 
- at Article{ VANKEKEN1997,
-	author = "P. E. {van Keken} and S. D. King and H. Schmeling and U. R. Christensen and D. Neumeister and M. P. Doin",
-	title = "{A comparison of methods for the modeling of thermochemical convection}",
+ at article{VANKEKEN1997,
+	author = "{van Keken}, P. E. and King, S. D. and Schmeling, H. and Christensen, U. R. and Neumeister, D. and Doin, M. P.",
 	journal = "J. Geophys. Res.",
-	year = "1997",
-	volume = "102",
 	number = "B10",
-	pages = "22477--22495"
+	pages = "22477--22495",
+	title = "{A comparison of methods for the modeling of thermochemical convection}",
+	volume = "102",
+	year = "1997"
 }
 
- at Article{ AAMIRBERKYAN2008,
-	author = "A. Amirberkyan and L. Gross",
-	title = "{Efficient Solvers for Incompressible Fluid Flows in Geosciences}",
+ at article{AAMIRBERKYAN2008,
+	author = "Amirberkyan, A. and Gross, L.",
 	journal = "ANZIAM Journal",
-	year = "2008",
+	pages = "C189--C203",
+	title = "{Efficient Solvers for Incompressible Fluid Flows in Geosciences}",
 	volume = "50",
-	pages = "C189--C203"
+	year = "2008"
 }
 
- at Article{ MBENZI2005,
-	author = "M. Benzi and G. H. Golub and J. Liesen",
-	title = "{Numerical solution of saddle point problems}",
+ at article{MBENZI2005,
+	author = "Benzi, M. and Golub, G. H. and Liesen, J.",
 	journal = "Acta Numerica",
-	year = "2005",
+	pages = "1--137",
+	title = "{Numerical solution of saddle point problems}",
 	volume = "14",
-	pages = "1--137"
+	year = "2005"
 }
 
- at Misc{ OPENDX,
+ at misc{OPENDX,
+	howpublished = "http://www.opendx.org/",
 	key = "opendx",
 	title = "{OpenDX}",
-	howpublished = "http://www.opendx.org/",
 	url = "http://www.opendx.org/"
 }
 
- at Misc{ netCDF,
+ at misc{netCDF,
+	howpublished = "http://www.unidata.ucar.edu/software/netcdf",
 	key = "netcdf",
 	title = "{netCDF}",
-	howpublished = "http://www.unidata.ucar.edu/software/netcdf",
 	url = "http://www.unidata.ucar.edu/software/netcdf"
 }
 
- at Misc{ UMFPACK,
-	key = "umfpack",
-	howpublished = "http://www.cise.ufl.edu/research/sparse/umfpack/",
-	url = "http://www.cise.ufl.edu/research/sparse/umfpack/"
+ at article{UMFPACK,
+	author = "Davis, Timothy A.",
+	issue = 2,
+	journal = "ACM Transactions on Mathematical Software (TOMS)",
+	pages = "196--199",
+	title = "{Algorithm 832: UMFPACK V4.3---an unsymmetric-pattern multifrontal method}",
+	url = "http://www.cise.ufl.edu/research/sparse/umfpack/",
+	volume = 30,
+	year = 2004
 }
 
- at Misc{ MKL,
+ at misc{MKL,
 	key = "MKL",
 	title = "{INTEL's Math Kernel Library}"
 }
 
- at Misc{ VisIt,
+ at misc{VisIt,
+	howpublished = "https://wci.llnl.gov/codes/visit/home.html",
 	key = "visit",
 	title = "{VisIt homepage}",
-	howpublished = "https://wci.llnl.gov/codes/visit/home.html",
 	url = "https://wci.llnl.gov/codes/visit/home.html"
 }
 
- at Misc{ Sympy,
+ at misc{Sympy,
+	howpublished = "http://sympy.org/en/index.html",
 	key = "Sympy",
 	title = "{Sympy homepage}",
-	howpublished = "http://sympy.org/en/index.html",
 	url = "http://sympy.org/en/index.html"
 }
 
- at Misc{ OPENMP,
+ at misc{OPENMP,
+	howpublished = "http://openmp.org",
 	key = "openmp",
 	title = "{OpenMP}",
-	howpublished = "http://openmp.org",
 	url = "http://openmp.org"
 }
 
- at Misc{ MPI,
+ at misc{MPI,
+	howpublished = "http://www.mpi-forum.org",
 	key = "mpi",
 	title = "{MPI}",
-	howpublished = "http://www.mpi-forum.org",
 	url = "http://www.mpi-forum.org"
 }
 
- at Book{ LBB,
-	author = "V. Girault and P. A. Raviart",
-	title = "{Finite Element Methods for Navier-Stokes Equations- Theory and Algorithms}",
+ at book{LBB,
+	author = "Girault, V. and Raviart, P. A.",
 	publisher = "Springer Verlag, Berlin",
+	title = "{Finite Element Methods for Navier-Stokes Equations- Theory and Algorithms}",
 	year = 1986
 }
 
- at Misc{ SCSL,
+ at misc{SCSL,
 	title = "{SCSL (Scientific Computing Software Library)}",
 	url = "http://www.sgi.com/products/software/irix/scsl.html"
 }
 
- at Manual{ GMSH,
-	title = "{Gmsh Reference Manual}",
-	author = "Christophe Geuzaine and Jean-Francois Remacle",
+ at manual{GMSH,
+	author = "Geuzaine, Christophe and Remacle, Jean-Francois",
 	edition = "1.12",
 	month = "Aug",
-	year = 2003,
-	url = "http://www.geuz.org/gmsh"
+	title = "{Gmsh Reference Manual}",
+	url = "http://www.geuz.org/gmsh",
+	year = 2003
 }
 
- at Article{ RILU,
-	title = "{ARMS: an algebraic recursive multilevel solver for general sparse linear systems}",
-	author = "B. Suchomel and Y. Saad",
+ at article{RILU,
+	author = "Suchomel, B. and Saad, Y.",
 	journal = "Numerical Linear Algebra with Applications",
+	number = 5,
 	pages = "1099--1506",
+	title = "{ARMS: an algebraic recursive multilevel solver for general sparse linear systems}",
 	volume = "9",
-	number = 5,
 	year = 2002
 }
 
- at Article{ PASTIX,
-	author = "P. H{\'e}non and P. Ramet and J. Roman",
-	title = "{{PaStiX}: A High-Performance Parallel Direct Solver for Sparse Symmetric Definite Systems}",
+ at article{PASTIX,
+	author = "H{\'e}non, P. and Ramet, P. and Roman, J.",
 	journal = "Parallel Computing",
-	year = "2002",
-	volume = "28",
+	month = jan,
 	number = "2",
 	pages = "301--321",
-	month = jan
+	title = "{{PaStiX}: A High-Performance Parallel Direct Solver for Sparse Symmetric Definite Systems}",
+	volume = "28",
+	year = "2002"
 }
 
- at Article{ Trilinos,
-	author = "Michael A. Heroux and Roscoe A. Bartlett and Vicki E. Howle and Robert J. Hoekstra and Jonathan J. Hu and Tamara G. Kolda and Richard B. Lehoucq and Kevin R. Long and Roger P. Pawlowski and Eric T. Phipps and Andrew G. Salinger and Heidi K. Thornquist and Ray S. Tuminaro and James M. Willenbring and Alan Williams and Kendall S. Stanley",
-	title = "{An overview of the Trilinos project}",
+ at article{Trilinos,
+	address = "New York, NY, USA",
+	author = "Heroux, Michael A. and Bartlett, Roscoe A. and Howle, Vicki E. and Hoekstra, Robert J. and Hu, Jonathan J. and Kolda, Tamara G. and Lehoucq, Richard B. and Long, Kevin R. and Pawlowski, Roger P. and Phipps, Eric T. and Salinger, Andrew G. and Thornquist, Heidi K. and Tuminaro, Ray S. and Willenbring, James M. and Williams, Alan and Stanley, Kendall S.",
 	journal = "ACM Trans. Math. Softw.",
-	volume = "31",
 	number = "3",
-	year = "2005",
 	pages = "397--423",
 	publisher = "ACM Press",
-	address = "New York, NY, USA"
+	title = "{An overview of the Trilinos project}",
+	volume = "31",
+	year = "2005"
 }
 
- at Article{ SuperLU,
-	author = "James W. Demmel and Stanley C. Eisenstat and John R. Gilbert and Xiaoye S. Li and Joseph W. H. Liu",
-	title = "{A supernodal approach to sparse partial pivoting}",
+ at article{SuperLU,
+	author = "Demmel, James W. and Eisenstat, Stanley C. and Gilbert, John R. and Li, Xiaoye S. and Liu, Joseph W. H.",
 	journal = "SIAM J. Matrix Analysis and Applications",
-	year = "1999",
-	volume = "20",
 	number = "3",
-	pages = "720--755"
+	pages = "720--755",
+	title = "{A supernodal approach to sparse partial pivoting}",
+	volume = "20",
+	year = "1999"
 }
 
- at Misc{ RIGHTHANDRULE,
-        key="RHR",
+ at misc{RIGHTHANDRULE,
+	howpublished = "http://en.wikipedia.org/wiki/Right-hand\_rule",
+	key = "RHR",
 	title = "{Right-hand rule}",
-	howpublished = "http://en.wikipedia.org/wiki/Right-hand_rule",
 	url = "http://en.wikipedia.org/wiki/Right-hand_rule"
 }
 
- at Misc{ TETGEN,
-	title = "{TetGen: A Quality Tetrahedral Mesh Generator and Three-Dimensional Delaunay Triangulator}",
-	author = "Hang Si",
-	month = "Jan",
-	year = 2008,
+ at misc{TETGEN,
+	author = "Si, Hang",
 	howpublished = "http://tetgen.berlios.de",
-	url = "http://tetgen.berlios.de"
+	month = "Jan",
+	title = "{TetGen: A Quality Tetrahedral Mesh Generator and Three-Dimensional Delaunay Triangulator}",
+	url = "http://tetgen.berlios.de",
+	year = 2008
 }
 
- at Misc{ NETGEN,
-	title = "{Netgen 1.4}",
-	author = "Tim Edwards",
+ at misc{NETGEN,
+	author = "Edwards, Tim",
 	howpublished = "http://opencircuitdesign.com/netgen/",
+	title = "{Netgen 1.4}",
 	url = "http://opencircuitdesign.com/netgen/"
 }
 
- at Article{ ELMAN,
-title = "Efficient preconditioning of the linearized Navier-Stokes equations for incompressible flow",
-journal = "Journal of Computational and Applied Mathematics",
-volume = "128",
-number = "1--2",
-pages = "261--279",
-year = "2001",
-note = "",
-issn = "0377-0427",
-doi = "DOI: 10.1016/S0377-0427(00)00515-X",
-url = "http://www.sciencedirect.com/science/article/pii/S037704270000515X",
-author = "David Silvester and Howard Elman and David Kay and Andrew Wathen",
-keywords = "Navier-Stokes equations",
-keywords = "Incompressible flow",
-keywords = "Preconditioning",
-keywords = "Multigrid iteration"
-}
-
- at Article{ LEASTSQUARESFEM1994,
-	author = "A. I. Pehlivanov and G. F. Carey and R. D. Lazarov",
-	title = "{Least-Squares Mixed Finite Elements for Second-Order Elliptic Problems}",
+ at article{ELMAN,
+	author = "Silvester, David and Elman, Howard and Kay, David and Wathen, Andrew",
+	doi = "10.1016/S0377-0427(00)00515-X",
+	issn = "0377-0427",
+	journal = "Journal of Computational and Applied Mathematics",
+	keywords = "Navier-Stokes equations; Incompressible flow; Preconditioning; Multigrid iteration",
+	note = "",
+	number = "1--2",
+	pages = "261--279",
+	title = "{Efficient preconditioning of the linearized Navier-Stokes equations for incompressible flow}",
+	url = "http://www.sciencedirect.com/science/article/pii/S037704270000515X",
+	volume = "128",
+	year = "2001"
+}
+
+ at article{LEASTSQUARESFEM1994,
+	author = "Pehlivanov, A. I. and Carey, G. F. and Lazarov, R. D.",
 	journal = "SIAM Journal on Numerical Analysis",
-	year = 1994,
-	volume = "31",
+	month = "October",
 	number = 5,
 	pages = "1368--1377",
-	month = "October"
+	title = "{Least-Squares Mixed Finite Elements for Second-Order Elliptic Problems}",
+	volume = "31",
+	year = 1994
 }
 
- at Article{ Muhlhaus2005,
-	title = "{Towards a self-consistent plate mantle model that includes elasticity: simple benchmarks and application to basic modes of convection}",
-	author = "Hans-Bernd Muhlhaus and Klaus Regenauer-Lieb",
+ at article{Muhlhaus2005,
+	author = "Muhlhaus, Hans-Bernd and Regenauer-Lieb, Klaus",
 	journal = "Geophysical Journal International",
+	month = "November",
+	number = 2,
 	pages = "788--800(13)",
+	title = "{Towards a self-consistent plate mantle model that includes elasticity: simple benchmarks and application to basic modes of convection}",
 	volume = "163",
-	number = 2,
-	month = "November",
 	year = 2005
 }
 
- at Manual{ NUMPY,
-	title = "{Numpy and Scipy Documentation}",
-	url = "http://docs.scipy.org/doc/",
+ at manual{NUMPY,
+	author = "Community, Scipy",
 	howpublished = "http://docs.scipy.org/doc/",
-	author = "Scipy Community"
+	title = "{Numpy and Scipy Documentation}",
+	url = "http://docs.scipy.org/doc/"
 }
 
- at Manual{ matplotlib,
-	title = "{matplotlib}",
-	author = "John Hunter and Michael Droettboom and Darren Dale",
-	month = "July",
-	year = "2009",
+ at manual{matplotlib,
+	author = "Hunter, John and Droettboom, Michael and Dale, Darren",
 	howpublished = "http://matplotlib.sourceforge.net/",
-	url = "http://matplotlib.sourceforge.net/"
+	month = "July",
+	title = "{matplotlib}",
+	url = "http://matplotlib.sourceforge.net/",
+	year = "2009"
 }
 
- at Manual{ mayavi,
-	key="mayavi",
-	title = "{Mayavi2: The next generation scientific data visualization}",
-	year = "2009",
-	howpublished = "https://svn.enthought.com/enthought/wiki/MayaVi",
-	url = "https://svn.enthought.com/enthought/wiki/MayaVi"
+ at manual{mayavi,
+	howpublished = "http://docs.enthought.com/mayavi/mayavi",
+	key = "mayavi",
+	title = "{Mayavi: 3D scientific data visualization and plotting in Python}",
+	year = "2015",
+	howpublished = "http://docs.enthought.com/mayavi/mayavi",
+	url = "http://docs.enthought.com/mayavi/mayavi"
 }
 
- at Misc{ gnuplot,
-	title = "{gnuplot homepage}",
-	author = "Thomas Williams and Colin Kelley",
-	month = mar,
-	year = "2009",
+ at misc{gnuplot,
+	author = "Williams, Thomas and Kelley, Colin",
 	howpublished = "http://www.gnuplot.info/",
-	url = "http://www.gnuplot.info/"
+	month = mar,
+	title = "{gnuplot homepage}",
+	url = "http://www.gnuplot.info/",
+	year = "2009"
 }
 
- at Manual{ SCIPY,
-	title = "{Numpy and Scipy Documentation}",
-	author = "The Scipy community community {community community}",
+ at manual{SCIPY,
+	author = "{The Scipy community}",
 	howpublished = "http://docs.scipy.org/doc/",
+	title = "{Numpy and Scipy Documentation}",
 	url = "http://docs.scipy.org/doc/"
 }
 
- at Misc{ SCSL,
-	title = "{SCSL (Scientific Computing Software Library)}",
+ at misc{SCSL,
 	howpublished = "http://www.sgi.com/products/software/irix/scsl.html",
+	title = "{SCSL (Scientific Computing Software Library)}",
 	url = "http://www.sgi.com/products/software/irix/scsl.html"
 }
 
- at Misc{ IDEAS,
-	key="IDEAS",
-	title = "{I\_DEAS}",
+ at misc{IDEAS,
 	howpublished = "http://www.plm.automation.siemens.com/en\\\_us/products/nx/",
+	key = "IDEAS",
+	title = "{I\_DEAS}",
 	url = "http://www.plm.automation.siemens.com/en_us/products/nx/"
 }
 
- at Misc{ VRML,
-	key="VRML",
-	title = "{VRML}",
+ at misc{VRML,
 	howpublished = "http://www.w3.org/MarkUp/VRML/",
+	key = "VRML",
+	title = "{VRML}",
 	url = "http://www.w3.org/MarkUp/VRML/"
 }
 
- at Misc{ STL,
-	key="STL",
-	title = "{STL}",
+ at misc{STL,
 	howpublished = "http://en.wikipedia.org/wiki/STL\_(file\_format)",
+	key = "STL",
+	title = "{STL}",
 	url = "http://en.wikipedia.org/wiki/STL_(file_format)"
 }
 
- at Misc{ NASTRAN,
-	key="NASTRAN",
-	title = "{Nastran}",
+ at misc{NASTRAN,
 	howpublished = "http://simcompanion.mscsoftware.com/",
+	key = "NASTRAN",
+	title = "{Nastran}",
 	url = "http://simcompanion.mscsoftware.com/"
 }
 
- at Misc{ MEDIT,
-	key="medit",
-	title = "{Medit}",
+ at misc{MEDIT,
 	howpublished = "http://www-rocq.inria.fr/OpenFEM/Doc/",
+	key = "medit",
+	title = "{Medit}",
 	url = "http://www-rocq.inria.fr/OpenFEM/Doc/"
 }
 
- at Misc{ CGNS,
-	key="cgns",
-	title = "{CGNS}",
+ at misc{CGNS,
 	howpublished = "http://cgns.sourceforge.net/",
+	key = "cgns",
+	title = "{CGNS}",
 	url = "http://cgns.sourceforge.net/"
 }
 
- at Misc{ PLOT3D,
-	key="plot3d",
-	title = "{Plot3D}",
+ at misc{PLOT3D,
 	howpublished = "http://www.plot3d.net/",
+	key = "plot3d",
+	title = "{Plot3D}",
 	url = "http://www.plot3d.net/"
 }
 
- at Misc{ DIFFPACK,
-	key="diffpack",
-	title = "{Diffpack}",
+ at misc{DIFFPACK,
 	howpublished = "http://www.diffpack.com/",
+	key = "diffpack",
+	title = "{Diffpack}",
 	url = "http://www.diffpack.com/"
 }
 
- at Misc{ GOCAD,
-    key = "gocad",
-    title = "{Paradigm GOCAD homepage}",
-    howpublished = {\url{http://www.pdgm.com/Products/GOCAD}},
-    url = {http://www.pdgm.com/Products/GOCAD}
+ at misc{GOCAD,
+	howpublished = "\url{http://www.pdgm.com/Products/GOCAD}",
+	key = "gocad",
+	title = "{Paradigm GOCAD homepage}",
+	url = "http://www.pdgm.com/Products/GOCAD"
 }
 
- at InProceedings{ lazyauspdc,
-	author = "Joel Fenwick and Lutz Gross",
-	title = "{Lazy Evaluation of PDE Coefficients in the EScript System}",
+ at inproceedings{lazyauspdc,
+	author = "Fenwick, Joel and Gross, Lutz",
 	booktitle = "{Parallel and Distributed Computing 2010 (AusPDC2010)}",
+	editor = "Chen, Jinjun and Ranjan, Rajiv",
+	issn = "1445-1336",
+	month = "January",
 	pages = "71--76",
-	year = "2010",
-	editor = "Jinjun Chen and Rajiv Ranjan",
-	volume = "107",
 	series = "{Conferences in Research and Practice in Information Technology}",
-	month = "January",
-	issn = "1445-1336"
+	title = "{Lazy Evaluation of PDE Coefficients in the EScript System}",
+	volume = "107",
+	year = "2010"
 }
 
- at InProceedings{ LoulaCorrea2006a,
-	title = "{NUMERICAL ANALYSIS OF STABILIZED MIXED FINITE ELEMENT METHODS FOR DARCY FLOW }",
+ at inproceedings{LoulaCorrea2006a,
+	author = "Loula, Abimael F. D. and Correa, Maicon R.",
 	booktitle = "{III European Conference on Computational Mechanics Solids, Structures and Coupled Problems in Engineering }",
-	author = "Abimael F. D. Loula and Maicon R. Correa",
-	editor = "C.A. Mota Soares and others",
-	year = "2006 ",
-	localfile = "LoulaCorrea2006a.pdf"
+	editor = "Soares, C.A. Mota and others",
+	localfile = "LoulaCorrea2006a.pdf",
+	title = "{NUMERICAL ANALYSIS OF STABILIZED MIXED FINITE ELEMENT METHODS FOR DARCY FLOW }",
+	year = "2006"
 }
 
- at Article{ MasudHughes2002a,
-	title = "{A stabilized mixed finite element method for Darcy flowArif Masud
-
-}",
-	author = "Arif Masud and Thomas J.R. Hughes",
+ at article{MasudHughes2002a,
+	author = "Masud, Arif and Hughes, Thomas J.R.",
 	journal = "Comput. Methods Appl. Mech. Engrg.",
-	pages = "4341--4370",
+	localfile = "MasudHughes2002a.pdf",
 	number = "191",
-	year = "2002",
-	localfile = "MasudHughes2002a.pdf
-"
+	pages = "4341--4370",
+	title = "{A stabilized mixed finite element method for Darcy flowArif Masud }",
+	year = "2002"
 }
 
- at Book{ Kelley2004a,
-	author = "C. T. Kelley",
-	title = "{Iterative Methods for Linear and Nonlinear Equations}",
-	Series= "{Frontiers in Applied Mathematics}",
-	volume="18", 
+ at book{Kelley2004a,
+	author = "Kelley, C. T.",
 	publisher = "Cambridge University Press",
+	series = "{Frontiers in Applied Mathematics}",
+	title = "{Iterative Methods for Linear and Nonlinear Equations}",
+	volume = "18",
 	year = 2004
 }
 
- at ARTICLE{Schoenauer1981a,
-  author = {Sch{\"{o}}nauer, W. and Raith, K. and Glotz, G.},
-  title = {The principle of the difference of difference quotients as a key
-	to the selfadaptive solution of nonlinear partial differential equations},
-  journal = {Computer Methods in Applied Mechanics and Engineering},
-  year = {1981},
-  volume = {28},
-  pages = {327-359}
+ at article{Schoenauer1981a,
+	author = "Sch{\"o}nauer, W. and Raith, K. and Glotz, G.",
+	journal = "Computer Methods in Applied Mechanics and Engineering",
+	pages = "327--359",
+	title = "{The principle of the difference of difference quotients as a key to the selfadaptive solution of nonlinear partial differential equations}",
+	volume = "28",
+	year = "1981"
 }
 
-
- at Misc{gaussfilter,
-    title = {Gaussian blur},
-    howpublished = {\url{http://en.wikipedia.org/wiki/Gaussian_blur}},
-    url = {http://en.wikipedia.org/wiki/Gaussian_blur},
-    urldate = {20130221},
+ at misc{gaussfilter,
+	howpublished = "\url{http://en.wikipedia.org/wiki/Gaussian\_blur}",
+	title = "{Gaussian blur}",
+	url = "http://en.wikipedia.org/wiki/Gaussian_blur",
+	urldate = "20130221"
 }
 
diff --git a/doc/user/figures/EscriptDiagram1.pdf b/doc/user/figures/EscriptDiagram1.pdf
index 7843e7a..ae9c1af 100644
Binary files a/doc/user/figures/EscriptDiagram1.pdf and b/doc/user/figures/EscriptDiagram1.pdf differ
diff --git a/doc/user/figures/EscriptDiagram1.svg b/doc/user/figures/EscriptDiagram1.svg
index baa89fe..4a49bfb 100644
--- a/doc/user/figures/EscriptDiagram1.svg
+++ b/doc/user/figures/EscriptDiagram1.svg
@@ -2,13 +2,47 @@
 <!-- Created with Inkscape (http://www.inkscape.org/) -->
 
 <svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
    xmlns:svg="http://www.w3.org/2000/svg"
    xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
    version="1.1"
-   width="420"
+   width="571"
    height="275"
    id="svg2"
-   xml:space="preserve"><defs
+   xml:space="preserve"
+   inkscape:version="0.48.5 r10040"
+   sodipodi:docname="EscriptDiagram1.svg"><metadata
+     id="metadata78"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1538"
+     inkscape:window-height="1015"
+     id="namedview76"
+     showgrid="false"
+     showguides="true"
+     inkscape:guide-bbox="true"
+     inkscape:zoom="0.84047619"
+     inkscape:cx="212.3796"
+     inkscape:cy="137.5"
+     inkscape:window-x="0"
+     inkscape:window-y="27"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g5692-3"><sodipodi:guide
+       orientation="0,1"
+       position="502.09632,610.36827"
+       id="guide3055" /></sodipodi:namedview><defs
      id="defs6"><marker
        refX="0"
        refY="0"
@@ -18,7 +52,8 @@
          d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
          transform="scale(0.2,0.2)"
          id="path4526"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -27,7 +62,8 @@
          d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
          transform="scale(0.8,0.8)"
          id="path4520"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -36,7 +72,8 @@
          d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
          transform="matrix(-0.3,0,0,-0.3,0.69,0)"
          id="path4410"
-         style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round" /></marker><marker
+         style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -45,7 +82,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
          id="path4392"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -54,7 +92,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          id="path4380"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -63,7 +102,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(0.8,0,0,0.8,10,0)"
          id="path4377"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -72,7 +112,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.4,0,0,-0.4,-4,0)"
          id="path4386"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -81,7 +122,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(0.4,0,0,0.4,4,0)"
          id="path4383"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -90,7 +132,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          id="path4380-0"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -99,7 +142,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(0.8,0,0,0.8,10,0)"
          id="path4377-1"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -108,7 +152,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          id="path4380-2"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -117,7 +162,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          id="path4380-7"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -126,7 +172,8 @@
          d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          id="path4380-00"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -135,7 +182,8 @@
          d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
          transform="scale(0.2,0.2)"
          id="path4526-1"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker><marker
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker><marker
        refX="0"
        refY="0"
        orient="auto"
@@ -144,193 +192,229 @@
          d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
          transform="scale(0.2,0.2)"
          id="path4526-4"
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none" /></marker></defs><g
-     transform="matrix(1.25,0,0,-1.25,-24.423729,753.74313)"
-     id="g12"><g
-       transform="matrix(0.84158922,0,0,0.84038693,8.2252651,77.15955)"
-       id="g4351"><text
-         x="111.30137"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+         inkscape:connector-curvature="0" /></marker></defs><g
+     id="g4351"
+     transform="matrix(1.0780678,0,0,-1.0504837,51.354809,657.29369)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351"
+       transform="scale(1,-1)"
+       y="-542.36786"
+       x="111.30137"><tspan
+         id="tspan3353"
          y="-542.36786"
-         transform="scale(1,-1)"
-         id="text3351"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="111.30137"
-           y="-542.36786"
-           id="tspan3353">Reduced</tspan><tspan
-           x="111.30137"
-           y="-530.47705"
-           id="tspan3355">ContinuousFunction()</tspan></text>
+         x="111.30137">Reduced</tspan><tspan
+         id="tspan3355"
+         y="-530.47705"
+         x="111.30137">ContinuousFunction()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="37.765621"
-         y="-556.26538"
-         transform="scale(1,-1)"
-         id="rect3537"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,8.2252651,168.63677)"
-       id="g4357"><text
-         x="111.30137"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537"
+       transform="scale(1,-1)"
+       y="-556.26538"
+       x="37.765621"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g4357"
+     transform="matrix(1.0780678,0,0,-1.0504837,51.354809,542.94717)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6"
+       transform="scale(1,-1)"
+       y="-491.93771"
+       x="111.30137"><tspan
+         id="tspan3355-8"
          y="-491.93771"
-         transform="scale(1,-1)"
-         id="text3351-6"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="111.30137"
-           y="-491.93771"
-           id="tspan3355-8">ContinuousFunction()</tspan></text>
+         x="111.30137">ContinuousFunction()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="37.765621"
-         y="-511.83524"
-         transform="scale(1,-1)"
-         id="rect3537-7"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,6.72959,70.43646)"
-       id="g5637"><text
-         x="321.81552"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-7"
+       transform="scale(1,-1)"
+       y="-511.83524"
+       x="37.765621"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g5637"
+     transform="matrix(1.0780678,0,0,-1.0504837,49.438863,665.69755)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9"
+       transform="scale(1,-1)"
+       y="-608.76324"
+       x="321.81552"><tspan
+         id="tspan3355-8-8"
          y="-608.76324"
-         transform="scale(1,-1)"
-         id="text3351-6-9"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="321.81552"
-           y="-608.76324"
-           id="tspan3355-8-8">Solution()</tspan></text>
+         x="321.81552">Solution()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="248.27975"
-         y="-628.68652"
-         transform="scale(1,-1)"
-         id="rect3537-0"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,183.89601,210.83142)"
-       id="g4367"><text
-         x="111.29199"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0"
+       transform="scale(1,-1)"
+       y="-628.68652"
+       x="248.27975"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g4367"
+     transform="matrix(1.0780678,0,0,-1.0504837,276.38735,490.20385)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3"
+       transform="scale(1,-1)"
+       y="-378.17078"
+       x="111.29199"><tspan
+         id="tspan3355-8-8-4"
          y="-378.17078"
-         transform="scale(1,-1)"
-         id="text3351-6-9-3"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="111.29199"
-           y="-378.17078"
-           id="tspan3355-8-8-4">ReducedSolution()</tspan></text>
+         x="111.29199">ReducedSolution()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="37.765621"
-         y="-398.09409"
-         transform="scale(1,-1)"
-         id="rect3537-3"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><path
-       d="m 167.9257,584.58778 42.62681,0"
-       id="path4372"
-       style="fill:none;stroke:#000000;stroke-width:0.79999995px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)" /><path
-       d="m 101.33117,567.64223 0,-18.0559"
-       id="path5392"
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)" /><path
-       d="m 277.00192,567.6424 0,-18.05623"
-       id="path5392-0"
-       style="fill:none;stroke:#000000;stroke-width:0.79999995px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:none;marker-end:url(#Arrow1Lend)" /><g
-       transform="matrix(0.84158922,0,0,0.84038693,-1.371258,62.93849)"
-       id="g5697"><text
-         x="122.48392"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-3"
+       transform="scale(1,-1)"
+       y="-398.09409"
+       x="37.765621"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:1.01232028px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)"
+     id="path4372"
+     d="m 255.92954,23.008405 54.60454,0" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:1.01232028px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
+     id="path5392"
+     d="m 170.62259,44.190342 0,22.569875" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:1.01232028px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:none;marker-end:url(#Arrow1Lend)"
+     id="path5392-0"
+     d="m 395.65513,44.19013 0,22.570288" /><g
+     id="g5697"
+     transform="matrix(1.0780678,0,0,-1.0504837,-22.938245,675.07002)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3-1"
+       transform="scale(1,-1)"
+       y="-450.23294"
+       x="122.48392"><tspan
+         id="tspan3355-8-8-4-7"
          y="-450.23294"
-         transform="scale(1,-1)"
-         id="text3351-6-9-3-1"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="122.48392"
-           y="-450.23294"
-           id="tspan3355-8-8-4-7">FunctionOnBoundary()</tspan></text>
+         x="122.48392">FunctionOnBoundary()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="49.16848"
-         y="-469.76483"
-         transform="scale(1,-1)"
-         id="rect3537-0-2"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,162.88813,116.49536)"
-       id="g5692"><text
-         x="136.04321"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0-2"
+       transform="scale(1,-1)"
+       y="-469.76483"
+       x="49.16848"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g5692"
+     transform="matrix(1.0780678,0,0,-1.0504837,139.47646,608.12393)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3-1-2"
+       transform="scale(1,-1)"
+       y="-386.13849"
+       x="136.04321"><tspan
+         id="tspan3355-8-8-4-7-9"
          y="-386.13849"
-         transform="scale(1,-1)"
-         id="text3351-6-9-3-1-2"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="136.04321"
-           y="-386.13849"
-           id="tspan3355-8-8-4-7-9">Function()</tspan></text>
+         x="136.04321">Function()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="62.727787"
-         y="-406.03601"
-         transform="scale(1,-1)"
-         id="rect3537-0-2-1"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,-13.923761,118.77437)"
-       id="g5687"><text
-         x="137.39915"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0-2-1"
+       transform="scale(1,-1)"
+       y="-406.03601"
+       x="62.727787"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g5687"
+     transform="matrix(1.0780678,0,0,-1.0504837,6.982119,605.27517)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3-1-2-7"
+       transform="scale(1,-1)"
+       y="-330.54526"
+       x="137.39915"><tspan
+         id="tspan3355-8-8-4-7-9-8"
          y="-330.54526"
-         transform="scale(1,-1)"
-         id="text3351-6-9-3-1-2-7"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="137.39915"
-           y="-330.54526"
-           id="tspan3355-8-8-4-7-9-8">FunctionOnContactZero()</tspan></text>
+         x="137.39915">FunctionOnContactZero()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="64.083717"
-         y="-350.44278"
-         transform="scale(1,-1)"
-         id="rect3537-0-2-8"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><g
-       transform="matrix(0.84158922,0,0,0.84038693,175.44064,178.02877)"
-       id="g5682"><text
-         x="121.12797"
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0-2-8"
+       transform="scale(1,-1)"
+       y="-350.44278"
+       x="64.083717"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><g
+     id="g5682"
+     transform="matrix(1.0780678,0,0,-1.0504837,249.5561,531.20717)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3-1-2-7-2"
+       transform="scale(1,-1)"
+       y="-260.03677"
+       x="121.12797"><tspan
+         id="tspan3355-8-8-4-7-9-8-3"
          y="-260.03677"
-         transform="scale(1,-1)"
-         id="text3351-6-9-3-1-2-7-2"
-         xml:space="preserve"
-         style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"><tspan
-           x="121.12797"
-           y="-260.03677"
-           id="tspan3355-8-8-4-7-9-8-3">FunctionOnContactOne()</tspan></text>
+         x="121.12797">FunctionOnContactOne()</tspan></text>
 <rect
-         width="145.73087"
-         height="33.766907"
-         ry="11.551837"
-         x="47.812534"
-         y="-279.9343"
-         transform="scale(1,-1)"
-         id="rect3537-0-2-87"
-         style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" /></g><path
-       d="m 167.9257,399.09327 42.62681,0"
-       id="path4372-1"
-       style="fill:none;stroke:#000000;stroke-width:0.79999995px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)" /><path
-       d="m 24.457468,493.03429 328.955512,0"
-       id="path5736"
-       style="fill:none;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1.60000001, 3.20000002;stroke-dashoffset:0" /><path
-       d="m 272.10539,506.0961 0,-25.3902"
-       id="path5392-6"
-       style="fill:none;stroke:#000000;stroke-width:11.19999981;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)" /><path
-       d="m 186.75034,506.0961 0,-25.3902"
-       id="path5392-6-6"
-       style="fill:none;stroke:#000000;stroke-width:11.19999981;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)" /><path
-       d="m 101.39528,506.0961 0,-25.3902"
-       id="path5392-6-7"
-       style="fill:none;stroke:#000000;stroke-width:11.19999981;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)" /></g></svg>
\ No newline at end of file
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0-2-87"
+       transform="scale(1,-1)"
+       y="-279.9343"
+       x="47.812534"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:1.01232028px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)"
+     id="path4372-1"
+     d="m 239.92954,254.87654 54.60454,0" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:0.80567092;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:0.80567093, 1.61134187;stroke-dashoffset:0"
+     id="path5736"
+     d="m 41.388836,136.26047 496.069184,0" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:14.17248344;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)"
+     id="path5392-6"
+     d="m 389.38272,121.12301 0,31.73774" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:14.17248344;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)"
+     id="path5392-6-6"
+     d="m 280.04372,121.12301 0,31.73774" /><path
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#000000;stroke-width:14.17248344;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutS)"
+     id="path5392-6-7"
+     d="m 170.70471,121.12301 0,31.73774" /><g
+     id="g5692-3"
+     transform="matrix(1.0780678,0,0,-1.0504837,313.58902,608.58595)"><text
+       sodipodi:linespacing="125%"
+       style="font-size:9.51262283px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
+       xml:space="preserve"
+       id="text3351-6-9-3-1-2-5"
+       transform="scale(1,-1)"
+       y="-386.13849"
+       x="136.04321"><tspan
+         id="tspan3355-8-8-4-7-9-0"
+         y="-386.13849"
+         x="136.04321">DiracDeltaFunctions()</tspan></text>
+<rect
+       style="fill:none;stroke:#000000;stroke-width:1.90252447;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       id="rect3537-0-2-1-8"
+       transform="scale(1,-1)"
+       y="-407.93988"
+       x="62.727787"
+       ry="11.551837"
+       height="33.766907"
+       width="145.73087" /></g></svg>
\ No newline at end of file
diff --git a/doc/user/figures/diracplot.png b/doc/user/figures/diracplot.png
new file mode 100644
index 0000000..aa57762
Binary files /dev/null and b/doc/user/figures/diracplot.png differ
diff --git a/doc/user/finley.tex b/doc/user/finley.tex
index 737d224..09758bf 100644
--- a/doc/user/finley.tex
+++ b/doc/user/finley.tex
@@ -17,13 +17,16 @@
 %\declaremodule{extension}{finley}
 %\modulesynopsis{Solving linear, steady partial differential equations using finite elements}
 
-{\it finley} is a C++ library for solving linear, steady partial differential
+The \finley library allows the creation of domains for solving
+linear, steady partial differential
 equations\index{partial differential equations} (PDEs) or systems
 of PDEs using isoparametrical finite elements\index{FEM!isoparametrical}.
 It supports unstructured 1D, 2D and 3D meshes.
-The module \finley provides access to the library through the \LinearPDE class
-of \escript supporting its full functionality.
-{\it finley} is parallelized using the \OPENMP paradigm.
+The PDEs themselves are represented by the \LinearPDE class
+of \escript.
+\finley is parallelized under both \OPENMP and \MPI.
+A more restricted form of this library ({\it dudley}) is described in 
+Section~\ref{sec:dudley}.
 
 \section{Formulation}
 For a single PDE that has a solution with a single component the linear PDE is
@@ -497,7 +500,7 @@ running on multiple CPUs with \MPI.
 \end{funcdesc}
 
 \begin{funcdesc}{Brick}{n0,n1,n2,order=1,l0=1.,l1=1.,l2=1., integrationOrder=-1,
-  periodic0=\False, periodic1=\False, \\ periodic2=\False, useElementsOnFace=\False, optimize=\False}
+  periodic0=\False, periodic1=\False, \\ periodic2=\False, useElementsOnFace=\False,useFullElementOrder=\False, optimize=\False}
 generates a \Domain object representing a three-dimensional brick between
 $(0,0,0)$ and $(l0,l1,l2)$ with orthogonal faces. The brick is filled with
 \var{n0} elements along the $x_0$-axis,
@@ -544,3 +547,15 @@ The corresponding face elements are removed from the mesh.
 \function{JoinFaces} is not supported under \MPI with more than one rank.
 \end{funcdesc}
 
+\section{\dudley}
+\label{sec:dudley}
+The {\it dudley} library is a restricted version of {\it finley}.
+So in many ways it can be used as a ``drop-in'' replacement.
+Dudley domains are simpler in that only triangular (2D), tetrahedral (3D) and line elements are supported.
+Note, this also means that dudley does not support:
+\begin{itemize}
+\item dirac delta functions
+\item contact elements
+\item macro elements
+\end{itemize}
+
diff --git a/doc/user/firststep.tex b/doc/user/firststep.tex
index de5c81a..7f3b159 100644
--- a/doc/user/firststep.tex
+++ b/doc/user/firststep.tex
@@ -424,7 +424,7 @@ will produce correct results when run on more than one processor under \MPI.}.
 
 As an alternative to \MATPLOTLIB, {\it escript} supports exporting data to
 \VTK and \SILO files which can be read by visualization tools such as
-mayavi\cite{mayavi} and \VisIt~\cite{VisIt}. This method is \MPI safe and
+\mayavi\cite{mayavi} and \VisIt~\cite{VisIt}. This method is \MPI safe and
 works with large 2D and 3D problems.
 
 To write the solution \var{u} of the Poisson problem in the \VTK file format
diff --git a/doc/user/linearPDE.tex b/doc/user/linearPDE.tex
index 9697c91..a1b8f58 100644
--- a/doc/user/linearPDE.tex
+++ b/doc/user/linearPDE.tex
@@ -26,16 +26,18 @@ denotes the outer normal field on $\Gamma$.
 For a single PDE with a solution that has a single component the linear PDE is
 defined in the following form:
 \begin{equation}\label{LINEARPDE.SINGLE.1}
--(A_{jl} u_{,l})_{,j}-(B_{j} u)_{,j}+C_{l} u_{,l}+D u =-X_{j,j}+Y \; .
+-(A_{jl} u_{,l})_{,j}-(B_{j} u)_{,j}+C_{l} u_{,l}+D u + \sum_p d^{dirac}(p) \; u(p) =-X_{j,j}+Y + \sum_p y^{dirac}(p)  \; .
 \end{equation}
 $u_{,j}$ denotes the derivative of $u$ with respect to the $j$-th spatial direction.
 Einstein's summation convention, i.e. summation over indexes appearing twice
-in a term of a sum, is used in this chapter.
+in a term of a sum, is used in this chapter. $y^{dirac}(p)$ represent a nodal source term 
+at point $p$, cf. $y^{dirac}(p) $ and similar $d^{dirac}(p)$ define Dirac delta-function 
+terms. 
 The coefficients $A$, $B$, $C$, $D$, $X$ and $Y$ have to be specified through
 \Data objects in the \Function on the PDE or objects that can be converted
-into such \Data objects.
+into such \Data objects. $d^{dirac}$
 $A$ is a \RankTwo, $B$, $C$ and $X$ are each a \RankOne and $D$ and $Y$ are
-scalars.
+scalars. $y^{dirac}$ and $d^{dirac}$ are each scalars in the \DiracDeltaFunctions. 
 The following natural boundary conditions are considered\index{boundary condition!natural} on $\Gamma$:
 \begin{equation}\label{LINEARPDE.SINGLE.2}
 n_{j}(A_{jl} u_{,l}+B_{j} u)+d u=n_{j}X_{j} + y  \;.
@@ -54,9 +56,11 @@ condition set by \eqn{LINEARPDE.SINGLE.1} or \eqn{LINEARPDE.SINGLE.2}.
 
 For a system of PDEs and a solution with several components the PDE has the form
 \begin{equation}\label{LINEARPDE.SYSTEM.1}
--(A_{ijkl} u_{k,l})_{,j}-(B_{ijk} u_{k})_{,j}+C_{ikl} u_{k,l}+D_{ik} u_{k} =-X_{ij,j}+Y_{i} \; .
+-(A_{ijkl} u_{k,l})_{,j}-(B_{ijk} u_{k})_{,j}+C_{ikl} u_{k,l}+D_{ik} u_{k}+
+ \sum_p d^{dirac}_{ik}(p) \;  u_i(p)
+ =-X_{ij,j}+Y_{i} +\sum_p \; y^{dirac}_i(p)   \; .
 \end{equation}
-$A$ is a \RankFour, $B$ and $C$ are each a \RankThree, $D$ and $X$ are each a \RankTwo and $Y$ is a \RankOne.
+$A$ is a \RankFour, $B$ and $C$ are each a \RankThree, $D$, $d^{dirac}$ and $X$ are each a \RankTwo and $Y$ and $y^{dirac}$ is a \RankOne.
 The natural boundary conditions\index{boundary condition!natural} take the form:
 \begin{equation}\label{LINEARPDE.SYSTEM.2}
 n_{j}(A_{ijkl} u_{k,l}+B_{ijk} u_{k})+d_{ik} u_{k}=n_{j}X_{ij}+y_{i}  \;.
@@ -201,7 +205,8 @@ coefficient is defined.
 \optional{, X}\optional{, Y}
 \optional{, d}\optional{, y}
 \optional{, d_contact}\optional{, y_contact}
-\optional{, q}\optional{, r}}
+\optional{, d_dirac}\optional{, y_dirac}
+\optional{, q}\optional{, r}, }
 assigns new values to coefficients. By default all values are assumed to be
 zero\footnote{In fact, it is assumed they are not present by assigning the
 value \code{escript.Data()}. This can be used by the solver library to reduce
@@ -500,7 +505,8 @@ sets the solver method to be used.
 Use \var{method}=\member{SolverOptions.DIRECT} to indicate that a direct
 rather than an iterative solver should be used and use
 \var{method}=\member{SolverOptions.ITERATIVE} to indicate that an iterative
-rather than a direct solver should be used.
+rather than a direct solver should be used. Note that SolverOptions needs to be
+imported from linearPDEs and is not the same as the object returned by pde.getSolverOptions().
 The value of \var{method} must be one of the constants:\\
  \member{SolverOptions.DEFAULT} -- use default solver depending on other options\\
  \member{SolverOptions.DIRECT} -- use a direct solver if available\\
diff --git a/doc/user/ripley.tex b/doc/user/ripley.tex
index f346289..ff63a3e 100644
--- a/doc/user/ripley.tex
+++ b/doc/user/ripley.tex
@@ -13,12 +13,11 @@
 %
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
-\chapter{The \ripley Module}\label{chap:ripley}
+\chapter{The \ripley Module}\label{chap:ripley}\index{ripley}
 %\declaremodule{extension}{ripley}
 %\modulesynopsis{Solving linear, steady partial differential equations using finite elements}
 
-\ripley is a specialised form of \finley, supporting structured, 
-uniform meshes with rectangular elements in 2D and hexahedral elements 3D.
+\ripley is an alternative domain library to \finley; it supports structured, uniform meshes with rectangular elements in 2D and hexahedral elements in 3D.
 Uniform meshes allow a straightforward division of elements among processes
 with \MPI and allow for a number of optimizations when solving PDEs.
 \ripley also supports fast assemblers for certain types of PDE (specifically
@@ -33,7 +32,12 @@ since only one element type is supported and all elements need to be equally
 sized.
 For the same reasons, \ripley does not allow assigning coordinates via
 \function{setX}.
-Other than that \ripley and \finley are generally interchangeable in a script
+
+While \ripley cannot be used with mesh files, it can be used to read in \GOCAD \index{GOCAD} \index{ripley!GOCAD}
+data. A script with an example of a voxet reader is included in the examples as
+\texttt{voxet_reader.py}.
+
+Other than use of meshfiles, \ripley and \finley are generally interchangeable in a script
 with both modules having the \class{Rectangle} or \class{Brick} functions
 available. Consider the following example which creates a 2D \ripley domain:
 
@@ -42,6 +46,31 @@ available. Consider the following example which creates a 2D \ripley domain:
  dom = Rectangle(9, 9)
 \end{python}
 
+
+
+
+\index{ripley!multi-resolution domains} \index{multi-resolution domains}
+Multi-resolution domains are supported in \ripley via use of \class{MultiBrick}
+and \class{MultiRectangle}. Each level of one of
+these domains has twice the elements in each axis of the next lower resolution.
+The \class{MultiBrick} is not currently supported when running \escript with
+multiple processes using \MPI. Interpolation between these multi-resolution
+domains is possible providing they have matching dimensions and subdivisions,
+along with a compatible number of elements. To simplify these conditions the
+use of \class{MultiResolutionDomain} is highly recommended. The following
+example creates two 2D domains of different resolutions and interpolates
+between them:
+
+\begin{python}
+ from esys.ripley import MultiResolutionDomain
+ mrd = MultiResolutionDomain(2, n0=10, n1=10)
+ ten_by_ten = mrd.getLevel(0)
+ data10 = Vector(..., Function(ten_by_ten))
+ ...
+ forty_by_forty = mrd.getLevel(2)
+ data40 = interpolate(data10, Function(forty_by_forty))
+\end{python}
+
 \section{Formulation}
 For a single PDE that has a solution with a single component the linear PDE is
 defined in the following form:
@@ -125,6 +154,14 @@ case the coordinates will range between \var{x0} and \var{x1}. For example:
 bottom-left node is located at $(5.5, 9.0)$ and the top-right node has
 coordinates $(15.5, 14.0)$, see Figure~\ref{fig:ripleyrect}.
 
+The \class{MultiResolutionDomain} class is available as a wrapper, taking the
+dimension of the domain followed by the same arguments as \class{Brick} (if
+a two-dimensional domain is requested, any extra arguments over those used by
+\class{Rectangle} are ignored). All of these standard arguments to
+\class{MultiResolutionDomain} must be supplied as keyword arguments
+(e.g. \var{d0}=...). The \class{MultiResolutionDomain} can then generate
+compatible domains for interpolation.
+
 \section{Linear Solvers in \SolverOptions}
 Currently direct solvers and GPU-based solvers are not supported under \MPI
 when running with more than one rank.
@@ -138,7 +175,9 @@ ill-posed equations, \ripley uses the \MKL\footnote{If the stiffness matrix is
 non-regular \MKL may return without a proper error code. If you observe
 suspicious solutions when using \MKL, this may be caused by a non-invertible
 operator.} solver package. If \MKL is not available \UMFPACK is used.
-If \UMFPACK is not available a suitable iterative solver from \PASO is used.
+If \UMFPACK is not available a suitable iterative solver from \PASO is used, but
+if a direct solver was requested via the \SolverOptions an exception will be
+raised.
 
 
 
diff --git a/doc/user/speckley.tex b/doc/user/speckley.tex
index 1663023..fc6de9f 100644
--- a/doc/user/speckley.tex
+++ b/doc/user/speckley.tex
@@ -13,7 +13,7 @@
 %
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
-\chapter{The \speckley Module}\label{chap:speckley}
+\chapter{The \speckley Module}\label{chap:speckley}\index{speckley}
 %\declaremodule{extension}{speckley}
 %\modulesynopsis{Solving linear, steady partial differential equations using spectral elements}
 
diff --git a/doc/user/subworlds.tex b/doc/user/subworlds.tex
new file mode 100644
index 0000000..24f7f85
--- /dev/null
+++ b/doc/user/subworlds.tex
@@ -0,0 +1,293 @@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Copyright (c) 2015 by The University of Queensland
+% http://www.uq.edu.au
+%
+% Primary Business: Queensland, Australia
+% Licensed under the Open Software License version 3.0
+% http://www.opensource.org/licenses/osl-3.0.php
+%
+% Development from 2014 by Centre for Geoscience Computing (GeoComp)
+%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%!TEX root = user.tex
+\chapter{Escript subworlds}
+\label{CHAP:subworld}
+\section{Introduction}
+By default, when \escript is used with multiple processes\footnote{That is, when MPI is employed 
+(probably in a cluster setting).
+This discussion is not concerned with using multiple threads within a single process (OpenMP).
+The functionality described here is compatible with OpenMP but orthogonal to it (in the same sense that MPI and OpenMP are
+orthogonal).}
+, it will use all the resources available to it to solve each PDE.
+With high resolution domains, this is what you want.
+However, in some cases, for example inversion, it may be desirable to solve a number of lower resolution problems but the
+total computational work is still significant.
+In such cases, \escript now allows you to subdivide \escript's processes into smaller groups which can be assigned separate 
+tasks.
+For example, if escript is started with 8 MPI processes\footnote{see the \texttt{run-escript} launcher or 
+your system's mpirun for details.}, then these could be subdivided into 2 groups of 4 processes, 4 groups of 2 or 8 groups of 1 node
+\footnote{1 group of 8 processes is also possible, but perhaps not particularly useful aside from debugging scripts.}.
+
+
+
+
+\subsection{Concepts}
+
+\begin{description}
+ \item[Job] describes a task which you wish to perform, the task is represented by a python function.
+There are some restrictions on the type of parameters which can be passed to the function. 
+The same function can be executed many times (possibly with different parameters) but each execution is 
+handled by a separate Job instance.
+\item[Domain object] the representation of the discretized domain the PDEs are defined on.
+Behind the scenes, it also contains information about the processes which share information about the domain.
+Normally in escript, this will be all of them but for subworlds this can change.
+\item[Subworld] the context in which a job executes.
+All the processes which will be executing a particular job belong to the same subworld.
+Each subworld supplies the domain which jobs running on it will use as well as some other variables.
+Python scripts will not normally interact with subworlds directly.
+\item[SplitWorld] a partition of available processes into subworlds; and the python interface to that partition.
+For example:
+\begin{python}
+sw=SplitWorld(4) 
+\end{python}
+will create 4 equal sized subworlds.
+If there are not enough processes to do this or the number of processes is not divisible by the 
+number of subworlds, then an exception will be raised.
+
+Note that creating a SplitWorld object does not prevent escript from solving PDEs normally.
+You can have scripts which use both modes of operation.
+
+Each subworld has its own domain object but those objects must represent the same content (and be created in the same way).
+To do this call \texttt{buildDomains}:
+\begin{python}
+buildDomains(sw, Rectangle,  10, 10, optimize=True)
+\end{python}
+describes the creation of a domain which would normally be made constructed with:
+\begin{python}
+Rectangle(10, 10, optimize=True)
+\end{python}.
+Note that you do not create domains and pass them into the SplitWorld\footnote{
+This would not work because such a domain would expect to use all the processes rather than a subset of them.}.
+Also note that \texttt{buildDomains} (and the later \texttt{addJob} and \texttt{addVariable calls)} are not methods of SplitWorld\footnote{
+This has to do with a limitation of the \texttt{boost::python} library used in the development of \escript.}.
+
+The fact that each subworld has its own domain means that objects build directly (or indirectly) on domains 
+can't be passed directly into, out of or between subworlds.
+This includes: Data, FunctionSpace, LinearPDE.
+
+To submit a job for execution, use one of the following calls:
+
+\begin{python}
+addJob(sw, FunctionJob, myfunction, par1=value1, par2=value2)  
+addJobPerWorld(sw, FunctionJob, myfunction, par1=value1, par2=value2)   
+\end{python}
+
+The first call adds a single job to execute on an available subworld.
+The second creates a job instance on each subworld\footnote{
+Note that this is not the same as calling \texttt{addJob} $n$ times where $n$ is the number of subworlds.
+It is better not to make assumptions about how SplitWorld distributes jobs to subworlds.
+}.
+One use for this would be to perform some setup on each subworld. 
+The keyword parameters are illustrative only.
+Since task functions run by jobs must have the parameter list \texttt{(self, **kwargs)}, keyword arguments are the best way
+to get extra information into your function.
+
+To execute submitted jobs:
+\begin{python}
+sw.runJobs() 
+\end{python}
+
+If any of the jobs raised an exception (or there was some other problem), then an exception will be raised in the 
+top level python script.
+This is intended to help diagnose the fault, it is not intended to allow resuming of the calculation under script control\footnote{
+Some things preventing this:
+\begin{itemize}
+ \item Only one of the exceptions will be reported (if multiple jobs raise, you will only see one message).
+ \item The exception does not contain the payload and type of the original exception.
+ \item We do not guarantee that all jobs have been attempted if an exception is raised.
+ \item Variables may be reset and so values may be lost.
+\end{itemize}
+}.
+
+
+
+
+
+\item[Variable] a value declared to persist between jobs and the main way to get complex information into and out of a job.
+The task function for a job can read/write to variables with the \texttt{importValue}/\texttt{exportValue} calls.
+\begin{python}
+def mywork(self, **kwargs):
+   x=self.importValue("x")
+   v=kwargs['translate']
+   self.exportValue('nx', x+v)
+\end{python}
+
+At present, \escript supports three types of variables:
+\begin{python}
+addVariable(sw, name, makeLocalOnly)
+addVariable(sw, name, makeScalarReducer, op) # op=="SET" or "SUM"
+addVariable(sw, name, makeDataReducer, op)
+\end{python}
+. 
+These calls are made in the top level python script and ensure that all subworlds know about all the possible variables.
+The difference between the types is how they interact with other subworlds and what sort of values they can store.
+\begin{itemize}
+ \item ``LocalOnly'' variables can store any python object but each subworld has its own value independent of the others.
+ Because they could be storing anything (and it is not guaranteed to be consistent across worlds), we do not provide an 
+ interface to extract these variables from subworlds.
+ \item ``ScalarReducer'' variables store a floating point value.
+Any values which are exported by jobs during a \texttt{runJobs} call are gathered together and combined (reduced) and then 
+shared with all interested worlds\footnote{
+In this version, this will be all subworlds, but the capability exists to be 
+more selective.}.
+  There are currently two possible reduction operators: \emph{SET} and \emph{SUM}.
+  SUM adds all exported values together.
+  SET only accepts one value per \texttt{runJobs} call and raises an exception if more than one set is attempted.
+It is possible to read these variables outside the splitword with:
+\begin{python}
+ z=sw.getDoubleVariable(name)
+\end{python}
+\item ``DataReducer'' variables store Data objects and support the same SET and SUM operations as ScalarReducer.
+We do not provide an interface to extract Data objects from subworlds.
+\end{itemize}
+
+Note that all the normal save/load functions will work inside subworlds so while Data objects can't be passed out of SplitWorlds, 
+they can be saved from inside the subworlds.
+
+\end{description}
+
+\section{Example}
+\begin{python}
+from esys.escript import *
+from esys.escript.linearPDEs import Poisson
+from esys.ripley import Rectangle 
+
+# Each node is in its own world
+sw=SplitWorld(getMPISizeWorld())
+buildDomains(sw, Rectangle, 100, 100)
+
+#describe the work we want to do
+# In this case we solve a Poisson equation
+def task(self, **kwargs):
+    v=kwargs['v']
+    dom=self.domain
+    pde=Poisson(dom)
+    x=dom.getX()
+    gammaD=whereZero(x[0])+whereZero(x[1])
+    pde.setValue(f=v, q=gammaD)
+    soln=pde.getSolution()
+    soln.dump('soln%d.ncdf'%v)
+
+# Now we add some jobs
+for i in range(1,20):
+    addJob(sw, FunctionJob, task, v=i)
+# Run them
+sw.runJobs() 
+\end{python}
+
+\section{Classes and Functions}
+
+\begin{methoddesc}[SplitWorld]{SplitWorld}{n}
+Returns a SplitWorld which contains $n$ subworlds; will raise an exception if this is not possible.
+\end{methoddesc}
+
+\begin{funcdesc}{addVariable}{splitword, name, constructor, args}
+Adds a variable to each of the subworlds built by the function \var{constructor} with arguments \var{args}. 
+\end{funcdesc}
+
+\begin{funcdesc}{makeLocalOnly}{}
+Used to create a variable to store a python object which will be local to each subworld.
+These values will not be transferred between or out of subworlds.
+An example use case is calculating a list of values once on each world and caching them for use by later jobs.
+\end{funcdesc}
+
+\begin{funcdesc}{makeScalarReducer}{name, op}
+Used to create a variable to share and combine floating point values.
+The operation can currently be ``SET''(allows only one assignment for each \texttt{runJobs} call) or ``SUM''.
+\end{funcdesc}
+
+\begin{funcdesc}{makeDataReducer}{name, op}
+Used to create a variable to share and combine Data values.
+The operation can currently be ``SET''(allows only one assignment for each \texttt{runJobs} call) or ``SUM''.
+\end{funcdesc}
+
+\begin{funcdesc}{buildDomains}{splitworld, constructor, args}
+Indicates how subworlds should construct their domains.
+\emph{Note that the splitworld code does not support multi-resolution ripley domains yet.}
+\end{funcdesc}
+
+\begin{funcdesc}{addJob}{splitworld, FunctionJob, function, args}
+Submit a job to run \var{function} with \var{args} to be executed in an available subworld.
+\end{funcdesc}
+
+\begin{funcdesc}{addJobPerWorld}{splitworld, FunctionJob, function, args}
+Submit a job to run \var{function} with \var{args} to be executed in each subworld.
+Individual jobs can use properties of \var{self} such as \member{swid} or \member{jobid} to distinguish between
+themselves.
+\end{funcdesc}
+
+\subsection{SplitWorld methods}
+All of these methods are only to be called by the top level python script.
+Do not attempt to use them inside a job.
+
+\begin{methoddesc}[SplitWorld]{removeVariable}{name}
+Removes a variable and its value from all subworlds.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{clearVariable}{name}
+Clears the value of the named variable on all worlds.
+The variable no longer has a value but a new value can be exported for it.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{getVarList}{}
+Return a python list of pairs \texttt{[name, hasvalue]} (one for each variable).
+\var{hasvalue} is True if the variable currently has a value.
+Mainly useful for debugging.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{getDoubleVariable}{name}
+Extracts a floating point value of the named variable to the top level python script.
+If the named variable does not support this an exception will be raised.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{copyVariable}{source, dest}
+Copies\footnote{ This is a deep copy for Data objects.} the value into the named variable.
+This avoids the need to create jobs merely to importValue+exportValue into the new name.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{getSubWorldCount}{}
+Returns the number of subworlds.
+\end{methoddesc}
+
+\begin{methoddesc}[SplitWorld]{getSubWorldID}{}
+Returns the id of the local world.
+\end{methoddesc}
+
+\subsection{FunctionJob methods and members}
+A FunctionJob instance will be the \var{self} parameter in your task function.
+There are other features of the class but the following are relevant to task functions.
+
+
+\begin{memberdesc}[FunctionJob]{jobid}
+Each job is given an id from an increasing sequence.
+\end{memberdesc}
+
+\begin{memberdesc}[FunctionJob]{swcount}
+The number of subworlds in the SplitWorld. 
+\end{memberdesc}
+
+\begin{memberdesc}[FunctionJob]{swid}
+The id of the subworld this job is running in.
+\end{memberdesc}
+
+\begin{methoddesc}[FunctionJob]{importValue}{name}
+Retrieves the value for the named variable.
+This is a shallow copy so modifications made in the function may affect the variable (LocalOnly). 
+Do not abuse this, use \texttt{exportValue} instead.
+\end{methoddesc}
+
+\begin{methoddesc}[FunctionJob]{exportValue}{name, value}
+Contributes a new value for the named variable.
+\end{methoddesc}
diff --git a/doc/user/user.tex b/doc/user/user.tex
index f9d2b8f..ebcc471 100644
--- a/doc/user/user.tex
+++ b/doc/user/user.tex
@@ -99,6 +99,7 @@ Some relevant references can be found in Appendix~\ref{app:ourrefs}.
 \include{speckley}
 \include{weipa}
 \include{symbolic}
+%\include{subworlds}
 
 \esysappendix %So hyperef builds table of contents links properly 
 
diff --git a/doc/user/user_defs.tex b/doc/user/user_defs.tex
index 7e74e9a..1d8ee0a 100644
--- a/doc/user/user_defs.tex
+++ b/doc/user/user_defs.tex
@@ -103,6 +103,7 @@
 \newcommand{\FunctionOnContactZero}{contact \class{FunctionSpace} on side 0\xspace}
 \newcommand{\FunctionOnContactOne}{contact \class{FunctionSpace} on side 1\xspace}
 \newcommand{\ContinuousFunction}{continuous \class{FunctionSpace}\xspace}
+\newcommand{\DiracDeltaFunctions}{Dirac delta-function \class{FunctionSpace}\xspace}
 \newcommand{\RankOne}{{rank-1 \Data object}\xspace}
 \newcommand{\RankTwo}{{rank-2 \Data object}\xspace}
 \newcommand{\RankThree}{{rank-3 \Data object}\xspace}
@@ -134,7 +135,7 @@
 \newcommand{\MATPLOTLIB}{\module{matplotlib}\index{visualization!matplotlib}\index{matplotlib}\xspace}
 \newcommand{\VTKUrl}{\url{http://www.vtk.org/}\index{visualization!VTK}\index{VTK}\xspace}
 \newcommand{\VisIt}{{\it VisIt}\index{visualization!VisIt}\index{VisIt}\xspace}
-\newcommand{\mayavi}{{\it mayavi}\index{visualization!mayavi}\index{mayavi}\xspace}
+\newcommand{\mayavi}{{\it Mayavi2}\index{visualization!mayavi}\index{mayavi}\xspace}
 \newcommand{\GOCAD}{{\it GOCAD}\index{visualization!GOCAD}\index{GOCAD}\xspace}
 \newcommand{\OpenDX}{{\it OpenDX}\index{visualization!OpenDX}\index{OpenDX}\cite{OPENDX}\xspace}
 
diff --git a/doc/user/verinfo.tex b/doc/user/verinfo.tex
deleted file mode 100644
index e509a29..0000000
--- a/doc/user/verinfo.tex
+++ /dev/null
@@ -1,7 +0,0 @@
-
-\newcommand{\relver}{development}
-\newcommand{\reldate}{\today}
-
-
-%\newcommand{\relver}{4.0}
-%\newcommand{\reldate}{\today}
diff --git a/doc/user/verinfo.tex b/doc/user/verinfo.tex
new file mode 120000
index 0000000..256af4d
--- /dev/null
+++ b/doc/user/verinfo.tex
@@ -0,0 +1 @@
+../verinfo.tex
\ No newline at end of file
diff --git a/doc/verinfo.tex b/doc/verinfo.tex
index e509a29..6097256 100644
--- a/doc/verinfo.tex
+++ b/doc/verinfo.tex
@@ -1,7 +1,7 @@
 
-\newcommand{\relver}{development}
-\newcommand{\reldate}{\today}
+%\newcommand{\relver}{development}
+%\newcommand{\reldate}{\today}
 
 
-%\newcommand{\relver}{4.0}
-%\newcommand{\reldate}{\today}
+\newcommand{\relver}{4.1}
+\newcommand{\reldate}{24 July 2015}
diff --git a/downunder/py_src/__init__.py b/downunder/py_src/__init__.py
index 01aeab7..d23290e 100644
--- a/downunder/py_src/__init__.py
+++ b/downunder/py_src/__init__.py
@@ -16,6 +16,8 @@
 
 """Data inversion module built on escript"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -32,11 +34,11 @@ from .splitinversioncostfunctions import *
 from .inversions import *
 from .mappings import *
 from .minimizers import *
+from .splitminimizers import *
 from .regularizations import *
 from .splitregularizations import *
 from .coordinates import *
 from .seismic import *
-from .domaingeneratordcresistivity import *
 from .dcresistivityforwardmodeling import *
 
 import logging
diff --git a/downunder/py_src/coordinates.py b/downunder/py_src/coordinates.py
index 28e246f..4432cad 100644
--- a/downunder/py_src/coordinates.py
+++ b/downunder/py_src/coordinates.py
@@ -16,6 +16,8 @@
 
 """Functions to deal with coordinate systems"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/costfunctions.py b/downunder/py_src/costfunctions.py
index e19d90a..74cc981 100644
--- a/downunder/py_src/costfunctions.py
+++ b/downunder/py_src/costfunctions.py
@@ -16,6 +16,8 @@
 
 """General cost functions for minimization"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/datasources.py b/downunder/py_src/datasources.py
index 6b2e882..87dd0b7 100644
--- a/downunder/py_src/datasources.py
+++ b/downunder/py_src/datasources.py
@@ -16,6 +16,8 @@
 
 """Data readers/providers for inversions"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -624,7 +626,7 @@ class NetCdfData(DataSource):
             except KeyError:
                 raise ValueError("Invalid data variable name supplied")
         else:
-            for n in f.variables.keys():
+            for n in sorted(f.variables.keys()):
                 dims=f.variables[n].dimensions
                 if (lat_name in dims) and (lon_name in dims):
                     self.__data_name=n
@@ -766,6 +768,12 @@ class NetCdfData(DataSource):
                     FS, shape=(), fill=0., first=first, numValues=nValues,
                     multiplier=multiplier, reverse=reverse)
         else:
+            # arithmetics with NaN produces undesired results so we replace
+            # NaNs by a large positive number which (hopefully) is not present
+            # in the real dataset
+            if np.isnan(self.__null_value):
+                data.replaceNaN(1.e300)
+                self.__null_value = 1.e300
             sigma = self.__error_value * whereNonZero(data-self.__null_value)
 
         data = data * self.__scale_factor
diff --git a/downunder/py_src/dcresistivityforwardmodeling.py b/downunder/py_src/dcresistivityforwardmodeling.py
index 8bf160e..b607e65 100644
--- a/downunder/py_src/dcresistivityforwardmodeling.py
+++ b/downunder/py_src/dcresistivityforwardmodeling.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 from esys.escript import Data, kronecker, whereZero,inf,sup,ContinuousFunction,grad,Function,Lsup, Scalar, DiracDeltaFunctions
 from esys.escript.linearPDEs import LinearPDE
 from esys.escript.pdetools import Locator
diff --git a/downunder/py_src/domainbuilder.py b/downunder/py_src/domainbuilder.py
index 9b37da4..8187c18 100644
--- a/downunder/py_src/domainbuilder.py
+++ b/downunder/py_src/domainbuilder.py
@@ -16,6 +16,8 @@
 
 """Domain construction from survey data for inversions"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/forwardmodels/__init__.py b/downunder/py_src/forwardmodels/__init__.py
index 619f5e2..f92c981 100644
--- a/downunder/py_src/forwardmodels/__init__.py
+++ b/downunder/py_src/forwardmodels/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """Collection of forward models that define the inversion problem"""
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
diff --git a/downunder/py_src/forwardmodels/acoustic.py b/downunder/py_src/forwardmodels/acoustic.py
index 2640ff8..7503581 100644
--- a/downunder/py_src/forwardmodels/acoustic.py
+++ b/downunder/py_src/forwardmodels/acoustic.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import division
 ##############################################################################
 
 """Forward model for acoustic wave forms"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/downunder/py_src/forwardmodels/base.py b/downunder/py_src/forwardmodels/base.py
index fe94a72..a797757 100644
--- a/downunder/py_src/forwardmodels/base.py
+++ b/downunder/py_src/forwardmodels/base.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -17,6 +15,8 @@ from __future__ import division
 
 """Base classes for forward models"""
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/forwardmodels/dcresistivity.py b/downunder/py_src/forwardmodels/dcresistivity.py
index d39f4a6..9313155 100644
--- a/downunder/py_src/forwardmodels/dcresistivity.py
+++ b/downunder/py_src/forwardmodels/dcresistivity.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import division
 ##############################################################################
 
 """Forward model for DC Resistivity"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/downunder/py_src/forwardmodels/gravity.py b/downunder/py_src/forwardmodels/gravity.py
index e9691f1..20c7ea7 100644
--- a/downunder/py_src/forwardmodels/gravity.py
+++ b/downunder/py_src/forwardmodels/gravity.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import division
 ##############################################################################
 
 """Forward model for gravity (Bouguer) anomaly"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/downunder/py_src/forwardmodels/magnetic.py b/downunder/py_src/forwardmodels/magnetic.py
index e3ddd81..2fd349e 100644
--- a/downunder/py_src/forwardmodels/magnetic.py
+++ b/downunder/py_src/forwardmodels/magnetic.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +12,7 @@ from __future__ import division
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 """Forward models for magnetic fields"""
 
diff --git a/downunder/py_src/forwardmodels/magnetotelluric2d.py b/downunder/py_src/forwardmodels/magnetotelluric2d.py
index 2594723..9a328fc 100644
--- a/downunder/py_src/forwardmodels/magnetotelluric2d.py
+++ b/downunder/py_src/forwardmodels/magnetotelluric2d.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -17,6 +15,8 @@ from __future__ import division
 
 """Forward models for 2D MT (TE and TM mode)"""
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -64,11 +64,12 @@ class MT2DBase(ForwardModel):
                      `fixAboveLevel`
         :type Ftop: ``float``, ``complex`` or ``Data`` of shape (2,)
         :param fixAtTop: if true F is set to Ftop at the top of the domain.
-                         Use fixAtTop *or* fixAboveLevel, not both.
+                         If both `fixAtTop` and `fixAboveLevel` are set, then
+                         `fixAboveLevel` takes precedence.
         :type fixAtTop: ``bool``
         :param fixAboveLevel: level above which F is set to Ftop (typically
                               the level of the air layer).
-                              Use fixAtTop *or* fixAboveLevel, not both.
+                              Use `fixAtTop` *or* `fixAboveLevel`, not both.
         :type fixAboveLevel : ``float`` or ``None``
         :param Fbottom: value of field at base of the domain
         :type Fbottom: ``float``, ``complex`` or ``Data`` of shape (2,)
@@ -137,11 +138,11 @@ class MT2DBase(ForwardModel):
         self._r=Vector(0.,Solution(domain))
         #====================================
         if fixAtTop or fixAboveLevel is not None:
-            if fixAtTop:
-                m=whereZero(z-self._ztop)
-            else:
+            if fixAboveLevel is not None:
                 m=whereNonNegative(z-fixAboveLevel)
-            if isinstance(Ftop, float) or isinstance(Ftop, int) :
+            else:
+                m=whereZero(z-self._ztop)
+            if isinstance(Ftop, float) or isinstance(Ftop, int):
                 d = Data((Ftop,0), Solution(domain))
             elif isinstance(Ftop, tuple):
                 d = Data((Ftop[0],Ftop[1]), Solution(domain))
@@ -452,7 +453,7 @@ class MT2DModelTMMode(MT2DBase):
         D[1,0] =  f
         
         A= pde.getCoefficient('A')
-        for i in xrange(DIM):
+        for i in range(DIM):
             A[0,i,0,i]=rho
             A[1,i,1,i]=rho
         
@@ -516,7 +517,7 @@ class MT2DModelTMMode(MT2DBase):
         Y=pde.getCoefficient('Y')
         X=pde.getCoefficient('X')
 
-        for i in xrange(DIM):
+        for i in range(DIM):
             A[0,i,0,i]=rho
             A[1,i,1,i]=rho
 
diff --git a/downunder/py_src/forwardmodels/pressure.py b/downunder/py_src/forwardmodels/pressure.py
index 72fe202..ec50c00 100644
--- a/downunder/py_src/forwardmodels/pressure.py
+++ b/downunder/py_src/forwardmodels/pressure.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import division
 ##############################################################################
 
 """Isostatic Pressure calculation"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -38,7 +37,7 @@ class IsostaticPressure(object):
     """
     class to calculate isostatic pressure field correction due to gravity forces
     """
-    def __init__(self, domain, p0=0., level0=0, gravity0=-9.81*U.m*U.sec**(-3),
+    def __init__(self, domain, p0=0., level0=0, gravity0=-9.81*U.m*U.sec**(-2),
                  background_density=2670* U.kg*U.m**(-3),
                  gravity_constant=U.Gravitational_Constant,
                  coordinates=None, tol=1e-8):
diff --git a/downunder/py_src/forwardmodels/subsidence.py b/downunder/py_src/forwardmodels/subsidence.py
index e264e3a..70a3607 100644
--- a/downunder/py_src/forwardmodels/subsidence.py
+++ b/downunder/py_src/forwardmodels/subsidence.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-from __future__ import division
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +14,7 @@ from __future__ import division
 ##############################################################################
 
 """Forward model for Subsidence modelling"""
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/downunder/py_src/inversioncostfunctions.py b/downunder/py_src/inversioncostfunctions.py
index 94b169d..87093d3 100644
--- a/downunder/py_src/inversioncostfunctions.py
+++ b/downunder/py_src/inversioncostfunctions.py
@@ -16,6 +16,8 @@
 
 """Cost functions for inversions with one or more forward models"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/inversions.py b/downunder/py_src/inversions.py
index 7ae990f..7fb4a99 100644
--- a/downunder/py_src/inversions.py
+++ b/downunder/py_src/inversions.py
@@ -16,6 +16,8 @@
 
 """Higher-level classes that allow running inversions with minimal set up"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -28,7 +30,7 @@ __all__ = ['InversionDriver', 'GravityInversion','MagneticInversion', 'JointGrav
 import logging
 import numpy as np
 
-from esys.escript import *
+import esys.escript as es
 from esys.escript import unitsSI as U
 from esys.weipa import createDataset
 
@@ -285,11 +287,11 @@ class GravityInversion(InversionDriver):
         g=[]
         w=[]
         for g_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if g_i.getRank()==0:
-                g_i=g_i*kronecker(DIM)[DIM-1]
+                g_i=g_i*es.kronecker(DIM)[DIM-1]
             if w_i.getRank()==0:
-                w_i=w_i*kronecker(DIM)[DIM-1]
+                w_i=w_i*es.kronecker(DIM)[DIM-1]
             g.append(g_i)
             w.append(w_i)
             self.logger.debug("Added gravity survey:")
@@ -319,20 +321,20 @@ class GravityInversion(InversionDriver):
         else:
             super(GravityInversion,self).setInitialGuess()
 
-    def siloWriterCallback(self, k, m, Jm, g_Jm):
+    def siloWriterCallback(self, k, x, Jx, g_Jx, norm_dJ=None, norm_dx=None):
         """
         callback function that can be used to track the solution
 
         :param k: iteration count
-        :param m: current m approximation
-        :param Jm: value of cost function
-        :param g_Jm: gradient of f at x
+        :param x: current approximation
+        :param Jx: value of cost function
+        :param g_Jx: gradient of f at x
         """
         fn='inv.%d'%k
-        ds=createDataset(density=self.getCostFunction().getProperties(m))
+        ds=createDataset(density=self.getCostFunction().getProperties(x))
         ds.setCycleAndTime(k,k)
         ds.saveSilo(fn)
-        self.logger.debug("J(m) = %e"%Jm)
+        self.logger.debug("J(m) = %e"%Jx)
 
 class MagneticInversion(InversionDriver):
     """
@@ -391,12 +393,12 @@ class MagneticInversion(InversionDriver):
 
         #====================================================================
         self.logger.info("Retrieving magnetic field surveys...")
-        d_b=normalize(domainbuilder.getBackgroundMagneticFluxDensity())
+        d_b=es.normalize(domainbuilder.getBackgroundMagneticFluxDensity())
         surveys=domainbuilder.getMagneticSurveys()
         B=[]
         w=[]
         for B_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if B_i.getRank()==0:
                 B_i=B_i*d_b
             if w_i.getRank()==0:
@@ -432,20 +434,20 @@ class MagneticInversion(InversionDriver):
         else:
             super(MagneticInversion,self).setInitialGuess()
 
-    def siloWriterCallback(self, k, m, Jm, g_Jm):
+    def siloWriterCallback(self, k, x, Jx, g_Jx, norm_dJ=None, norm_dx=None):
         """
         callback function that can be used to track the solution
 
         :param k: iteration count
-        :param m: current m approximation
-        :param Jm: value of cost function
-        :param g_Jm: gradient of f at x
+        :param x: current approximation
+        :param Jx: value of cost function
+        :param g_Jx: gradient of f at x
         """
         fn='inv.%d'%k
-        ds=createDataset(susceptibility=self.getCostFunction().getProperties(m))
+        ds=createDataset(susceptibility=self.getCostFunction().getProperties(x))
         ds.setCycleAndTime(k,k)
         ds.saveSilo(fn)
-        self.logger.debug("J(m) = %e"%Jm)
+        self.logger.debug("J(m) = %e"%Jx)
 
 class JointGravityMagneticInversion(InversionDriver):
     """
@@ -488,9 +490,9 @@ class JointGravityMagneticInversion(InversionDriver):
         :param k_beta: exponent for  depth weighting for susceptibility, see `SusceptibilityMapping`. If not specified, zero is used.
         :type k_beta: ``float`` or `Scalar`
         :param w0: weighting factors for level set term regularization, see `Regularization`. If not set zero is assumed.
-        :type w0: `Data` or ``ndarray`` of shape (2,)
+        :type w0: `es.Data` or ``ndarray`` of shape (2,)
         :param w1: weighting factor for the gradient term in the regularization see `Regularization`. If not set zero is assumed
-        :type w1: `Data` or ``ndarray`` of shape (2,DIM)
+        :type w1: `es.Data` or ``ndarray`` of shape (2,DIM)
         :param w_gc: weighting factor for the cross gradient term in the regularization, see `Regularization`. If not set one is assumed
         :type w_gc: `Scalar` or `float`
         :param k_at_depth: value for susceptibility at depth, see `DomainBuilder`.
@@ -532,13 +534,13 @@ class JointGravityMagneticInversion(InversionDriver):
         if w1 is None:
             w1=np.ones((2,DIM))
 
-        wc=Data(0.,(2,2), Function(dom))
+        wc=es.Data(0.,(2,2), es.Function(dom))
         if w_gc is  None:
             wc[0,1]=1
         else:
             wc[0,1]=w_gc
 
-        reg_mask=Data(0.,(2,), Solution(dom))
+        reg_mask=es.Data(0.,(2,), es.Solution(dom))
         reg_mask[self.DENSITY] = rho_mask
         reg_mask[self.SUSCEPTIBILITY] = k_mask
         regularization=Regularization(dom, numLevelSets=2,\
@@ -549,11 +551,11 @@ class JointGravityMagneticInversion(InversionDriver):
         g=[]
         w=[]
         for g_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if g_i.getRank()==0:
-                g_i=g_i*kronecker(DIM)[DIM-1]
+                g_i=g_i*es.kronecker(DIM)[DIM-1]
             if w_i.getRank()==0:
-                w_i=w_i*kronecker(DIM)[DIM-1]
+                w_i=w_i*es.kronecker(DIM)[DIM-1]
             g.append(g_i)
             w.append(w_i)
             self.logger.debug("Added gravity survey:")
@@ -566,12 +568,12 @@ class JointGravityMagneticInversion(InversionDriver):
         gravity_model.rescaleWeights(rho_scale=rho_scale_mapping)
         #====================================================================
         self.logger.info("Retrieving magnetic field surveys...")
-        d_b=normalize(domainbuilder.getBackgroundMagneticFluxDensity())
+        d_b=es.normalize(domainbuilder.getBackgroundMagneticFluxDensity())
         surveys=domainbuilder.getMagneticSurveys()
         B=[]
         w=[]
         for B_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if B_i.getRank()==0:
                 B_i=B_i*d_b
             if w_i.getRank()==0:
@@ -606,21 +608,21 @@ class JointGravityMagneticInversion(InversionDriver):
         """
         super(JointGravityMagneticInversion,self).setInitialGuess(rho, k)
 
-    def siloWriterCallback(self, k, m, Jm, g_Jm):
+    def siloWriterCallback(self, k, x, Jx, g_Jx, norm_dJ=None, norm_dx=None):
         """
         callback function that can be used to track the solution
 
         :param k: iteration count
-        :param m: current m approximation
-        :param Jm: value of cost function
-        :param g_Jm: gradient of f at x
+        :param x: current approximation
+        :param Jx: value of cost function
+        :param g_Jx: gradient of f at x
         """
         fn='inv.%d'%k
-        p=self.getCostFunction().getProperties(m)
+        p=self.getCostFunction().getProperties(x)
         ds=createDataset(density=p[self.DENSITY], susceptibility=p[self.SUSCEPTIBILITY])
         ds.setCycleAndTime(k,k)
         ds.saveSilo(fn)
-        self.logger.debug("J(m) = %e"%Jm)
+        self.logger.debug("J(m) = %e"%Jx)
 
 class StrongJointGravityMagneticInversion(InversionDriver):
     """
@@ -678,7 +680,7 @@ class StrongJointGravityMagneticInversion(InversionDriver):
         :type w0: ``Scalar`` or ``float``
         :param w1: weighting factor for the gradient term in the regularization
                    see `Regularization`.  If not set zero is assumed.
-        :type w1: `Data` or ``ndarray`` of shape (DIM,)
+        :type w1: `es.Data` or ``ndarray`` of shape (DIM,)
         :param w_gc: weighting factor for the cross gradient term in the
                      regularization, see `Regularization`. If not set one is
                      assumed.
@@ -728,11 +730,11 @@ class StrongJointGravityMagneticInversion(InversionDriver):
         g=[]
         w=[]
         for g_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if g_i.getRank()==0:
-                g_i=g_i*kronecker(DIM)[DIM-1]
+                g_i=g_i*es.kronecker(DIM)[DIM-1]
             if w_i.getRank()==0:
-                w_i=w_i*kronecker(DIM)[DIM-1]
+                w_i=w_i*es.kronecker(DIM)[DIM-1]
             g.append(g_i)
             w.append(w_i)
             self.logger.debug("Added gravity survey:")
@@ -745,12 +747,12 @@ class StrongJointGravityMagneticInversion(InversionDriver):
         gravity_model.rescaleWeights(rho_scale=rho_scale_mapping)
         #====================================================================
         self.logger.info("Retrieving magnetic field surveys...")
-        d_b=normalize(domainbuilder.getBackgroundMagneticFluxDensity())
+        d_b=es.normalize(domainbuilder.getBackgroundMagneticFluxDensity())
         surveys=domainbuilder.getMagneticSurveys()
         B=[]
         w=[]
         for B_i,sigma_i in surveys:
-            w_i=safeDiv(1., sigma_i)
+            w_i=es.safeDiv(1., sigma_i)
             if B_i.getRank()==0:
                 B_i=B_i*d_b
             if w_i.getRank()==0:
@@ -784,19 +786,19 @@ class StrongJointGravityMagneticInversion(InversionDriver):
         """
         super(StrongJointGravityMagneticInversion,self).setInitialGuess(rho, k)
 
-    def siloWriterCallback(self, k, m, Jm, g_Jm):
+    def siloWriterCallback(self, k, x, Jx, g_Jx, norm_dJ=None, norm_dx=None):
         """
         callback function that can be used to track the solution
 
         :param k: iteration count
-        :param m: current m approximation
-        :param Jm: value of cost function
-        :param g_Jm: gradient of f at x
+        :param x: current m approximation
+        :param Jx: value of cost function
+        :param g_Jx: gradient of f at x
         """
         fn='inv.%d'%k
-        p=self.getCostFunction().getProperties(m)
+        p=self.getCostFunction().getProperties(x)
         ds=createDataset(density=p[self.DENSITY], susceptibility=p[self.SUSCEPTIBILITY])
         ds.setCycleAndTime(k,k)
         ds.saveSilo(fn)
-        self.logger.debug("J(m) = %e"%Jm)
+        self.logger.debug("J(m) = %e"%Jx)
 
diff --git a/downunder/py_src/magtel1d.py b/downunder/py_src/magtel1d.py
new file mode 100644
index 0000000..383edb8
--- /dev/null
+++ b/downunder/py_src/magtel1d.py
@@ -0,0 +1,451 @@
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# Copyright (c) 2015 by University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+__copyright__="""Copyright (c) 2015 by University of Queensland
+http://www.uq.edu.au
+Primary Business: Queensland, Australia"""
+__license__="""Licensed under the Open Software License version 3.0
+http://www.opensource.org/licenses/osl-3.0.php"""
+__url__="https://launchpad.net/escript-finley"
+
+"""
+Some models for flow
+
+:var __author__: name of author
+:var __copyright__: copyrights
+:var __license__: licence agreement
+:var __url__: url entry point on documentation
+:var __version__: version
+:var __date__: date of the version
+"""
+
+__author__="Ralf Schaa, r.schaa at uq.edu.au"
+
+import sys
+import numpy
+import cmath
+
+class MT_1D(object):
+  """
+  Calculates the electromagnetic fields in the subsurface for a 1D layered earth.
+
+  Partly based on Fortran code by  Phil Wannamaker in MT2D
+  (http://marineemlab.ucsd.edu/Projects/Occam/2DMT/index.html)
+
+  """
+  def __init__(self, freq, depths, rho, zcoord):
+    """
+    DESCRIPTION:
+    -----------
+    Constructor which initialises the 1D magnetotelluric class:
+    (*) check for argument type
+    (*) check for valid argument values
+    (*) initialises required data lists
+
+    ARGUMENTS:
+    ----------
+    param freq        :: sounding frequency
+    type  freq        :: ``float``
+    param depths      :: layer depth interfaces
+    type  depths      :: ``list`` (number)
+    param rho         :: layer resistivities
+    type  rho         :: ``list`` (number)
+    param zcoord      :: sample coordinate points
+    type  zcoord      :: ``list`` (number)
+
+
+    DATA ATTRIBUTES:
+    ---------------
+    self.f  = freq    :: sounding frequency
+    self.z  = zcoord  :: sample coordinate points
+    self.zl = zl      :: layer depths
+    self.dl = dl      :: layer thicknesses
+    self.rl = rl      :: layer resistivities
+    """
+
+    # ---
+    # Check input types:
+    # ---
+
+    #make python3 compatible, since long disappeared in python 3
+    if sys.version_info[0] == 3:
+        long_type = int
+    else:
+        long_type = long
+
+    if not isinstance(freq, (int,long_type,float) ):
+      raise ValueError("Input parameter FREQ must be a number")
+    if not isinstance(depths, list) or not all(isinstance(d,(int,long_type,float)) for d in depths):
+      raise ValueError("Input parameter DEPTHS must be a list of numbers")
+    if not isinstance(rho, list) or not all(isinstance(d,(int,long_type,float)) for d in rho):
+      raise ValueError("Input parameter RHO must be a list of numbers")
+    if not isinstance(zcoord, list) or not all(isinstance(d,(int,long_type,float)) for d in zcoord):
+      raise ValueError("Input parameter ZCOORD must be a list of numbers")
+
+
+    # ---
+    # Check valid input values:
+    # ---
+
+    if not freq > 0:
+      raise ValueError("Input parameter FREQ must be larger than 0")
+    if not all(x>y for x, y in zip(depths, depths[1:])):
+      raise ValueError("Input parameter DEPTHS must be all strictly decreasing")
+    if not len(depths) > 1:
+      raise ValueError("Input parameter DEPTHS must have more than 1 element")
+    if not len(rho) == len(depths)-1:
+      raise ValueError("Input parameter RHO must be exactly the size of DEPTHS minus 1")
+    if not all(x>0 for x in rho):
+      raise ValueError("Input parameter RHO must be all positive")
+    if not all(x<y for x, y in zip(zcoord, zcoord[1:])):
+      raise ValueError("Input parameter ZCOORD must be all strictly increasing")
+
+
+    # ---
+    # Now initialise the required lists for mt1d
+    # ---
+
+    # Setup layer thicknesses from interface coordinates.
+    dl = []
+    for i in range(0,len(depths)-1):
+        # Don't include air-layer:
+        if rho[i] < 1.0e+10:
+            dl.append( abs(depths[i+1] - depths[i]) )
+
+    # Setup list for cumulative layer depths:
+    zl = [0] * (len(dl)) ; zl[0] = dl[0]
+    if len(dl)-1 >=1:
+        for n in range(1,len(dl)):
+            zl[n] = zl[n-1] + dl[n]
+
+    # Setup resistivity list without air-layer.
+    rl = list(rho)
+    if rl[0] > 1.0e+10:
+          rl.pop(0)
+
+
+    # ---
+    # initialise all required variables as data attributes
+    # ---
+
+    self.f  = freq
+    self.z  = zcoord
+    self.zl = zl
+    self.dl = dl
+    self.rl = rl
+
+#__________________________________________________________________________________________________
+
+
+
+  def mt1d(self):
+    """
+    DESCRIPTION:
+    -----------
+    Public method to calculate the MT-1D EM-fields at sample coordinates.
+
+    USES:
+    -----
+    self.f  :: sounding frequency
+    self.z  :: sample coordinate points
+    self.zl :: layer depths
+    self.dl :: layer thicknesses
+    self.rl :: layer resistivities
+
+    """
+
+    # Compute the transmission & reflection coefficients;
+    an, rn = self.__coeff(self.f, self.dl, self.rl)
+
+    # Number of evaluation sample points:
+    nz = len(self.z)
+
+    # Initialise output arrays.
+    te = numpy.zeros( nz, dtype=complex )
+    tm = numpy.zeros( nz, dtype=complex )
+
+    # Calculate the fields at the sample points:
+    for i in range( nz ):
+        z = self.z[i]
+        te[i], tm[i] = self.__field(z, an, rn, self.f, self.zl, self.dl, self.rl)
+
+    #<Note>: return reverse list -> [::-1] so that the first value is the bottom value:
+    return te[::-1], tm[::-1]
+
+#__________________________________________________________________________________________________
+
+
+
+  def __coeff(self, f, dl, rl):
+    """
+    DESCRIPTION:
+    -----------
+    Computes the transmission and reflection coefficients.
+    Based on Wannamaker's subroutine 'COAMP' in 'MT2D'
+
+    ARGUMENTS:
+    -----------
+    f    :: sounding frequency.
+    dl   :: layer thicknesses.
+    rl   :: layer resistivities.
+    """
+
+    # ---
+    # Initialise (return) lists for coefficients.
+    # ---
+
+    # Number of layers.
+    nl = len(rl)
+
+    # Transmission and Reflection coefficients "an" and "rn"
+    an = [ complex(0.0,00) ]*(nl)
+    rn = [ complex(0.0,00) ]*(nl)
+
+
+
+    # ---
+    # Constant values.
+    # ---
+
+    pi = cmath.pi    # Ratio of circle circumference to it's diameter.
+    ra = 1.0e+14     # Resistivity of air.
+    mu = 4*pi*1e-7   # Free space permeability.
+    w  = 2*pi*f      # Angular frequency.
+    wm = w*mu        # Shortcut of product.
+
+
+
+    # ---
+    # Calculate intrinsic wave numbers <quasi-static>.
+    # ---
+
+    # Wave number of air:
+    k0 = cmath.sqrt( -1j*wm/ra )
+
+    # Cycle layers and compute wave numbers of other layers:
+    k = [None]*nl
+    for i in range(nl):
+        k[i] = cmath.sqrt( -1j*wm/rl[i] )
+
+
+
+    # ---
+    # Reflection & transmission coefficients for half-space.
+    # ---
+
+    # Half-space case:
+    if nl == 1:
+        an[0] = 2*k0/(k[0] + k0) # = 1+Ro
+        rn[0] = (k0 - k[0])/(k0 + k[0])
+
+        # All done, return the coefficients.
+        return an, rn
+
+
+
+
+    # ---
+    # Prepare calculations for layers.
+    # ---
+
+    # Initialise lists for computed values with complex zeros.
+    arg = [ complex(0.0,00) ]*(nl-1)
+    exp = [ complex(0.0,00) ]*(nl-1)
+    ex2 = [ complex(0.0,00) ]*(nl-1)
+    tnh = [ complex(0.0,00) ]*(nl-1)
+
+    # Setup arguments for the exponential for each layer..
+    # .. and compute the tanh function and also exp(-2kh).
+    for j in range(nl-1):
+        arg[j] = 1j*k[j]*dl[j]
+        tnh[j]= cmath.tanh(arg[j])
+        # Save also exponentials for coefficient calculations later:
+        exp[j] = cmath.exp( -arg[j] )
+        ex2[j] = exp[j]*exp[j]
+
+    # ---
+    # Reflection & transmission coefficients for layers.
+    # ---
+
+    #<Note>: Following section is based on the formulae by Wannamaker's code.
+
+
+    # Initialise recursion with intrinsic impedance of basement.
+    zn = wm/k[nl-1]
+
+    # Compute the reflection coefficients for all sub-surface layers..
+    # ..start the loop at the basement and cycle up to the first layer:
+    for j in range(nl-1,0,-1):
+        # Wave impedance of next layer-up:
+        zu = wm/k[j-1]
+        # Ratio of layer impedances of current-layer and layer-up::
+        rn[j] = (zn - zu)/(zn + zu)
+        # New apparent impedance for up-layer via Wait's formula:
+        zn = zu*(zn + zu*tnh[j-1])/(zu + zn*tnh[j-1])
+        # <Note>: "zn" is the surface impedance when finishing the loop.
+
+    # For the first sub-surface layer, we also ..
+    # ..have to mind the air-layer at index '0':
+    zu = wm/k0 ; rn[0] = (zn - zu)/(zn + zu)
+
+
+    # Transmission coefficient of first layer takes into account air-layer:
+    an[0] = (1+rn[0]) / (1+rn[1]*ex2[0]) # exp[0]*
+    #<Note>: Wannamaker does not multiply with exp!
+
+    # And now compute the transmission coefficients for rest of the layers:
+    if (nl-1) > 1:
+        for n in range(1,nl-1):
+            #<Note>: Wannamaker uses num: ~ exp[n-1]!
+            num   = (1+rn[n] )*exp[n-1]
+            den   = (1+rn[n+1]*ex2[n])
+            an[n] = an[n-1]*num/den
+    # And mind the basement as well (numerator is 1):
+    an[nl-1] = an[nl-2]*exp[nl-2]*(1+rn[nl-1])
+
+
+    # Return the coefficients.
+    return an, rn
+#__________________________________________________________________________________________________
+
+
+
+  def __field(self, z, an, rn, f, zl, dl, rl):
+    """
+    DESCRIPTION:
+    -----------
+    Computes the electric and magnetic field for 1D-MT.
+    Based on Wannamaker's subroutine 'ZLFLD' in 'MT2D'
+
+    ARGUMENTS:
+    -----------
+    z    :: sample coordinate
+    an   :: transmission coefficients
+    rn   :: reflection coefficients
+    f    :: sounding frequency.
+    zl   :: layer depths
+    dl   :: layer thicknesses
+    rl   :: layer resistivities
+    """
+
+    # ---------------------------------------------------------------------------------------------
+    # Initialisations.
+    # ---------------------------------------------------------------------------------------------
+
+    # Number of layers.
+    nl = len(rl)
+
+    # Return values.
+    ex = complex(0.0, 0.0)
+    hy = complex(0.0, 0.0)
+
+    # Constant values.
+    pi = cmath.pi    # Ratio of circle circumference to it's diameter.
+    ra = 1.0e+14     # Resistivity of air.
+    mu = 4*pi*1e-7   # Free space permeability.
+    w  = 2*pi*f      # Angular frequency.
+    wm = w*mu        # Shortcut of product.
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Calculate intrinsic wave numbers <quasi-static>.
+    # ---------------------------------------------------------------------------------------------
+
+    # Free space wave number & amplitude factor of E-field:
+    k0 = cmath.sqrt( -1j*wm*1/ra )
+    e0 = wm/(2*k0)
+
+    # Cycle layers and compute wave numbers of other layers:
+    k = [None]*nl
+    for i in range(nl):
+        k[i] = cmath.sqrt( -1j*wm/rl[i] )
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Air-layer EM fields.
+    # ---------------------------------------------------------------------------------------------
+
+    if z < 0:
+        # Compute the argument and fields.
+        kz = 1j*k0*z
+        ex = e0*cmath.exp(-kz)*(1+rn[0]*cmath.exp(2*kz))
+        hy = e0*cmath.exp(-kz)*(1-rn[0]*cmath.exp(2*kz))*(k0/wm)
+
+        # All done, leave.
+        return ex, hy
+
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Uniform half-space EM fields.
+    # ---------------------------------------------------------------------------------------------
+    if nl == 1:
+
+        # Compute the argument and fields; <Note>:z<0.
+        kz = 1j*k[0]*z
+        ex = e0*an[0]*cmath.exp(-kz)
+        hy = e0*an[0]*cmath.exp(-kz)*(k[0]/wm)
+
+        # All done, leave.
+        return ex, hy
+
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Layered half-space EM fields.
+    # ---------------------------------------------------------------------------------------------
+
+    if nl > 1:
+
+        # First get the layer index for the current
+        # depth 'z' via cycling over layer depths:
+        n = 0
+        for i in range(1,nl):
+            if z > zl[i-1]:
+                n = i
+
+        # This handles the case when 'z' is in the basement layer (no reflection):
+        if n == nl-1:
+
+            # Compute the fields.
+            kh = 1j*k[n]*(z-zl[n-1])
+            ex = e0*an[n]*cmath.exp(-kh)
+            hy = e0*an[n]*cmath.exp(-kh)*(k[n]/wm)
+
+            # All done, leave.
+            return ex, hy
+
+        else: # Other layers:
+
+            # Compute the fields.
+            kh = 1j*k[n]*(z-zl[n])
+            kd = 1j*k[n]*dl[n]
+            exp = cmath.exp(-kh)
+            aex = cmath.exp(-kd)
+            ex  = e0*an[n]*(exp + rn[n+1]/exp)*aex
+            hy  = e0*an[n]*(exp - rn[n+1]/exp)*aex*(k[n]/wm)
+
+            # All done, leave.
+            return ex, hy
+
+    return ex, hy
+#__________________________________________________________________________________________________
diff --git a/downunder/py_src/magtel2d.py b/downunder/py_src/magtel2d.py
new file mode 100644
index 0000000..249f4e1
--- /dev/null
+++ b/downunder/py_src/magtel2d.py
@@ -0,0 +1,1005 @@
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# Copyright (c) 2015 by University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+__copyright__="""Copyright (c) 2015 by University of Queensland
+http://www.uq.edu.au
+Primary Business: Queensland, Australia"""
+__license__="""Licensed under the Open Software License version 3.0
+http://www.opensource.org/licenses/osl-3.0.php"""
+__url__="https://launchpad.net/escript-finley"
+
+"""
+2D Magnetotelluric modelling for TE and TM mode.
+
+:var __author__: name of author
+:var __copyright__: copyrights
+:var __license__: licence agreement
+:var __url__: url entry point on documentation
+:var __version__: version
+:var __date__: date of the version
+"""
+
+__author__="Ralf Schaa, r.schaa at uq.edu.au"
+
+import os, sys
+import numpy
+import math
+import cmath
+import types
+from . import magtel1d         as mt1d
+import esys.weipa              as weipa
+import esys.escript            as escript
+import esys.finley             as finley
+import esys.escript.pdetools   as pdetools
+import esys.escript.linearPDEs as pde
+
+class MT_2D(object):
+
+   # class options:
+  _debug   = False    #
+  _solver = "DEFAULT" #
+
+   # 'private' field:
+  __version = 0.1     #
+
+  """
+  DESCRIPTION:
+  ------------
+  solves the scalar 2-D electromagnetic diffusion equation,
+  (where 'u' is the electric field E or magnetic field H).
+
+  [1]  -div( k*grad(u) ) + q*u = 0  (+ Boundary Conditions)
+
+  In 2D the equation is solved for the transverse electric
+  field (TE mode) or transverse magnetic field (TM mode).
+  These fields are parallel to the 2D strike direction.
+  Based on the actual mode, the coefficients are given by:
+
+  TE: k = 1/mu   , q = i*w*sigma
+  TM: k = 1/sigma, q = i*w*mu
+
+  'mu'    is the vacuum permeability,
+  'i'     is the imaginary unit
+  'w'     is the angular frequency
+  'sigma' is the conductivity
+
+  The EM diffusion equation is complex and is solved as
+  a coupled PDE for the real and imaginary parts. The
+  coupled PDE is given by the following equations, with
+  Er, Ei and Hr, Hi are the real and imaginary components
+  of the electric and magnetic field, respectively:
+
+  TE:
+  [2] div( grad(Er) ) + w*mu*sigma*Ei = 0
+  [3] div( grad(Ei) ) - w*mu*sigma*Er = 0
+
+     the complementary magnetic fields
+     are calculated via Faraday's Law:
+
+  [4] Hr =-d/dz(Ei) / (w*mu)
+  [5] Hi = d/dz(Er) / (w*mu)
+
+
+  TM:
+  [6] div( rho*grad(Hr) ) + w*mu*Hi = 0
+  [7] div( rho*grad(Hi) ) - w*mu*Hr = 0
+
+     (resistivity 'rho' is 1/sigma)
+     the complementary electric fields
+     are calculated via Ampere's Law:
+
+  [8] Er = d/dz(Hr) * rho
+  [9] Ei = d/dz(Hi) * rho
+
+
+  Based on the ratio of electric to magnetic field
+  apparent resistivity and phase is calculated, viz:
+
+  rho_a = (1/w*mu) * [ (Er)^2 + (Ei)^2 ] / [ (Hr)^2 + (Hi)^2 ]
+  phase = arctan( [Ei*Hr - Er*Hi] / [Er*Hr + Ei*Hi] )
+
+
+  Boundary conditions:
+  --------------------
+  the source term on the right-hand-side of equation [1] is zero,
+  i.e. no artificial source is employed but instead the 'source'
+  is provided via the boundary conditions of the PDE which are
+  given as Dirichlet conditions at all boundaries. To calculate
+  the Dirichlet values, a 1D response is calculated at the left
+  and right boundary (based on the 1D recursion formula for MT).
+  Interpolation from the left to the right sides then provides
+  the values at the top and bottom boundary. See module 'mt1d'
+  for details of the computation of the 1D response. Once the
+  values on the boundaries have been calculated, the values
+  inside the domain are solved in this class.
+  """
+
+  def __init__(self, domain, mode, freq_def, tags, rho, rho_1d, ifc_1d,
+        xstep=100, zstep=100, maps=None, plot=False, limits=None):
+    """
+    DESCRIPTION:
+    -----------
+    Constructor which initialises the 2D magnetotelluric class:
+    (*) check for argument type
+    (*) check for valid argument values
+    (*) initialises required values
+
+    ARGUMENTS:
+    ----------
+    param  domain       :: the 2d mesh domain
+    type   domain       :: ``escript data object``
+
+    param  mode         :: TE or TM mode
+    type   mode         :: ``string``
+
+    param  freq_def     :: highest/lowest frequency & points per decade
+    type   freq_def     :: ``dictionary``
+
+    param  tags         :: the tag names of the regions defined in the mesh
+    type   tags         :: ``list``
+
+    param  rho          :: the resistivity values of the regions in the mesh
+    type   rho          :: ``list``
+
+    param  rho_1d       :: the resistivity values at the left & right boundary
+    type   rho_1d       :: ``dictionary``
+
+    param  ifc_1d       :: the layer interface depths of the left & right boundary
+    type   ifc_1d       :: ``dictionary``
+
+    param  xstep        :: user-defined step size for horizontal sample list
+    type   xstep        :: ``number``  (optional)
+
+    param  zstep        :: user-defined step size for vertical sample list
+    type   zstep        :: ``number``  (optional)
+
+    param  maps         :: list with user-defined  functions which map the resistivity for each region
+    type   maps         :: ``list``    (optional)
+
+    param  plot         :: user-defined flag to show a plot of apparent resistivity and phase at each frequency
+    type   plot         :: ``boolean`` (optional)
+
+
+
+    DATA ATTRIBUTES:
+    ---------------
+    self.domain         :: escript data object of mesh
+    self.X              :: escript data object with all mesh coordinates
+    self.mode           :: string with TE or TM mode
+    self.xmin           :: float with x-coordinate minimum
+    self.xmax           :: float with x-coordinate maximum
+    self.zmin           :: float with z-coordinate minimum
+    self.zmax           :: float with z-coordinate maximum
+    self.zstep          :: number with sample step in vertical direction
+    self.xstep          :: number with sample step in horizontal direction
+    self.rho            :: list with resistivity values of all regions
+    self.rho_1d         :: dictionary with resistivity values at boundaries left/right
+    self.ifc_1d         :: dictionary with interface depths at boundaries left/right
+    self.plot           :: boolean flag to show plots of apparent resistivity and phase
+    self.sigma          :: escript data object with the conductivity model (based on 'rho' and 'maps')
+    self.frequencies    :: list of sounding frequencies
+    self.boundary_mask  :: Dirichlet mask at boundaries
+    """
+
+    #make python3 compatible, since long disappeared in python 3
+    if sys.version_info[0] == 3:
+        long_type = int
+    else:
+        long_type = long
+    # ---
+    # Checks
+    # ---
+
+    # Types:
+    if not isinstance(domain, finley.finleycpp.MeshAdapter ):
+      raise ValueError("Input parameter DOMAIN must be an Escript mesh")
+    if not isinstance(mode, str):
+      raise ValueError("Input parameter MODE must be a string")
+    if not isinstance(freq_def, dict) or len(freq_def) != 3:
+      raise ValueError("Input parameter FREQ_DEF must be a dictionary with length 3")
+    if not isinstance(tags, list) or not all(isinstance(t,str) for t in tags):
+      raise ValueError("Input parameter TAGS must be a list of strings")
+    if not isinstance(rho, list) or not all(isinstance(d,(int,long_type,float)) for d in rho):
+      raise ValueError("Input parameter RHO must be a list of numbers")
+    if not isinstance(rho_1d, dict) or len(rho_1d) != 2:
+      raise ValueError("Input parameter RHO_1D must be a dictionary with length 2")
+    if not isinstance(ifc_1d, dict) or len(ifc_1d) != 2:
+      raise ValueError("Input parameter IFC_1D must be a dictionary with length 2")
+    if not isinstance(xstep, (int,long_type,float)):
+        raise ValueError("Optional input parameter XSTEP must be a number")
+    if not isinstance(zstep, (int,long_type,float)):
+        raise ValueError("Optional input parameter ZSTEP must be a number")
+    if maps is not None:
+      if not isinstance(maps, list) or not all(isinstance(m,(types.FunctionType, types.NoneType)) for m in maps):
+        raise ValueError("Optional input parameter MAPS must be a list of Functions or Nones")
+    if plot is not None:
+      if not isinstance(plot, bool):
+        raise ValueError("Optional input parameter PLOT must be True or False")
+
+    # Values:
+    if mode.upper() != "TE" and mode.upper() != "TM": # Check mode:
+      raise ValueError("Input parameter mode must be either 'TE' or 'TM'")
+    if not 'high' in freq_def and not 'low' in freq_def and not 'step' in freq_def:
+       raise ValueError("Input dictionary FREQ_DEF must have keys 'high', 'low' and 'step' defined" )
+    if freq_def['high'] < freq_def['low']:
+      raise ValueError("High frequency value is smaller than low frequency value in input parameter FREQ_DEF")
+    if freq_def['step'] < 1:
+      raise ValueError("Step frequency value is smaller than 1 in input parameter FREQ_DEF")
+    if not all(r>0 for r in rho): # Check resistivity values:
+      raise ValueError("Input parameter RHO must be all positive")
+    if len(rho) != len(tags): # Check resistivity list-length:
+      raise ValueError("Input parameter RHO must have the same length as input parameter TAGS")
+    if not 'left' in rho_1d and not 'right' in rho_1d:
+       raise ValueError("Input dictionary RHO_1D must have keys 'left' and 'right' defined" )
+    if not 'left' in ifc_1d and not 'right' in ifc_1d:
+      raise ValueError("Input dictionary IFC_1D must have keys 'left' and 'right' defined" )
+    if len(ifc_1d['left'])-1 != len(rho_1d['left']) and len(ifc_1d['right'])-1 != len(rho_1d['right']):
+      raise ValueError("Lists with values in input dictionary RHO_1D must have length equal to IFC_1D" )
+    if xstep < 0.5: # Step size should be non-zero but should not be smaller than 0.5m:
+      raise ValueError("Input parameter XSTEP must be at least 0.5" )
+    if zstep < 0.5: # Step size should be non-zero but should not be smaller than 0.5m:
+      raise ValueError("Input parameter ZSTEP must be at least 0.5" )
+
+
+
+    # ---
+    # Domain coordinates & tags:
+    # ---
+
+    # Extract the model coordinates..
+    X = escript.Solution(domain).getX()
+
+    # Get the Min/Max coordinates:
+    xmin = escript.inf(X[0])
+    xmax = escript.sup(X[0])
+    zmin = escript.inf(X[1])
+    zmax = escript.sup(X[1])
+
+    # Get the tag names from the mesh file
+    mesh_tags = escript.getTagNames(domain)
+
+    if xmin >= xmax or zmin >= zmax: raise ValueError("The mesh limits are not valid (min >= max)" )
+    if zmin >= 0                   : raise ValueError("The mesh must be defined with a negative vertical axis" )
+    if not set(mesh_tags) == set(tags)       :
+        print("user-tags:", tags)
+        print("mesh-tags:", mesh_tags)
+        raise ValueError("Input parameter TAGS does not match the tags defined in the mesh")
+
+
+
+    # ---
+    # Define the boundary mask:
+    # ---
+
+    boundary_mask = self.__setBoundaryMask(X)
+
+
+    # ---
+    # Calculate list of sounding frequencies:
+    # ---
+
+    frequencies = self.__getSoundingFrequencies(freq_def)
+
+
+
+    # ---
+    # Tag the domain with conductivity maps:
+    # ---
+
+    sigma_model = self.__tagDomain(domain, X, tags, rho, maps)
+
+    # Check for valid values
+    if  escript.inf(sigma_model) < 0 or escript.sup(sigma_model) < 0:
+       raise ValueError("Negative conductivity encountered" )
+    if cmath.isnan( escript.inf(sigma_model) ) or \
+       cmath.isnan( escript.sup(sigma_model) ) or \
+       cmath.isinf( escript.sup(sigma_model) ):
+       raise ValueError("The conductivity model contains NaNs or INFs" )
+
+
+
+    # ---
+    # Projector and Locator objects.
+    # ---
+
+    print("Setting up Escript Locator and Projector objects...")
+
+    # Setup a list with sample points along the vertical mesh extent, bottom to top:
+    xsample = self.__getSamplePoints(escript.inf(X[0]),escript.sup(X[0]),xstep, constant=0.0)
+
+    # Get the locations of mesh points at the surface via the Locator object
+    # operating on the continuous function-space (i.e. nodes) of the domain.
+    loc  = pdetools.Locator(escript.ContinuousFunction(domain),xsample )
+
+    # Instantiate the Projector class with smoothing on (fast=False);
+    # the Projector is used to calculate the gradient correctly.
+    proj = pdetools.Projector(domain, reduce=False, fast=False)
+
+
+
+
+    # ---
+    # Print information:
+    # ---
+
+    print("")
+    print("="*72)
+    print("Escript MT2D, version", self.__version)
+    print("="*72)
+    print("Mesh XMin/XMax       : ", xmin, xmax)
+    print("Mesh ZMin/ZMax       : ", zmin, zmax)
+    print("Number of Tags       : ", len( tags ))
+    print("Mapping              : ", {True: 'Yes', False: 'No'}[maps is not None])
+    print("Conductivity Model   : ", sigma_model)
+    print("Nr of Frequencies    : ", len( frequencies ))
+    print("Start/End/Step (Hz)  : ", freq_def["high"], freq_def["low"], freq_def["step"])
+    print("Mode                 : ", mode.upper())
+    print("Solver               : ", MT_2D._solver)
+    print("Show plots           : ", plot)
+    print("="*72)
+    print("")
+
+    if self._debug:
+      print("Mesh-Info     : ")
+      print(domain.print_mesh_info(full=False))
+
+
+
+    # ---
+    # Set all required variables as data attributes
+    # ---
+
+    self.domain         = domain
+    self.X              = X
+    self.mode           = mode
+    self.xmin           = xmin
+    self.xmax           = xmax
+    self.zmin           = zmin
+    self.zmax           = zmax
+    self.zstep          = zstep
+    self.xstep          = xstep
+    self.rho            = rho
+    self.rho_1d         = rho_1d
+    self.ifc_1d         = ifc_1d
+    self.plot           = plot
+    self.limits         = limits
+    self.sigma          = sigma_model
+    self.frequencies    = frequencies
+    self.boundary_mask  = boundary_mask
+    self.proj           = proj
+    self.loc            = loc
+
+
+#_______________________________________________________________________________
+
+
+  def __interpolLinear(self,dx,x0,x1,y0,y1):
+    """
+    DESCRIPTION:
+    -----------
+    Function for simple 1D interpolation using the line-equation.
+
+    ARGUMENTS:
+    ----------
+    dx :: interpolation step.
+    x0 :: first coordinate point of known value y0.
+    x1 :: last coordinate point of known value y1.
+    y0 :: known value at first coordinate.
+    y1 :: known value at last coordinate.
+
+    RETURNS:
+    --------
+    y  :: list with interpolated values
+    """
+    # Initialise return lists.
+    y = []
+
+    # Test for long enough interval.
+    if abs(x1-x0) <= dx: return y
+    # Test for correct abscissae.
+    if x0 >= x1: return y
+
+    x = x0
+    while x <= x1:
+        y.append( y0 + (y1-y0)*(x-x0)/(x1-x0)  )
+        x = x + dx
+
+    return y
+
+#_______________________________________________________________________________
+
+
+  def __getSamplePoints(self, min,max,step,constant=None):
+    """
+    DESCRIPTION:
+    -----------
+    Function to setup a list with sample points. If a
+    constant value was passed a 2D list is returned
+    where the second column is set to the constant.
+
+    ARGUMENTS:
+    ----------
+    min        :: minimum value.
+    max        :: maximum value.
+    step       :: step value.
+    constant   :: optional constant value for 2nd column.
+
+    RETURNS:
+    --------
+    sample     :: list with samples.
+    """
+
+    # Initialise return list.
+    sample = []
+
+    # Cycle with step-size and fill sample list.
+    dp = min
+    while dp <= max:
+        if constant is not None:
+            sample.append([dp,constant])
+        else:
+            sample.append(dp)
+        # Increment the step.
+        dp = dp + step
+
+    # Return the list:
+    return sample
+    #___________________________________________________________________________
+
+
+  def __getSoundingFrequencies(self, frequencies):
+    """
+    DESCRIPTION:
+    -----------
+    Defines the sounding frequencies in Hz.
+
+    ARGUMENTS:
+    ----------
+    frequencies :: dictionary with frequency start/stop/step
+
+    RETURNS:
+    --------
+    sounding_frequencies  :: list with frequency values
+    """
+    # Output list with frequencies in Hertz:
+    sounding_frequencies = []
+
+    # Period definition (from freq to time):
+    tme_1 = 1.0/frequencies["high"]
+    tme_n = 1.0/frequencies["low"]
+
+    # Number of points per decade:
+    tme_p = frequencies["step"]
+
+    # Number of periods in range:
+    nt = int(math.log10(tme_n/tme_1) * tme_p) + 1
+    # Fill list with times:
+    for n in range(nt):
+      # Sounding period in seconds:
+      period = tme_1*10**( (n)/float(tme_p))
+      # And store as frequency in Hertz:
+      sounding_frequencies.append( 1.0/period )
+
+    return sounding_frequencies
+
+#_______________________________________________________________________________
+
+
+  def __getGradField(self, proj, mt2d_field, wm):
+    """
+    DESCRIPTION:
+    -----------
+    Calculates the complementary fields via Faraday's Law (TE-mode)
+    or via Ampere's Law (TM-mode). Partial derivative w.r.t. the
+    vertical coordinate are taken at the surface for which an Escript
+    'Projector' object is used to calculate the gradient.
+
+    ARGUMENTS:
+    ----------
+    proj       :: escript Projection object
+    mt2d_field :: calculated magnetotelluric field
+    wm         :: number with actual angular sounding frequency * mu
+
+    RETURNS:
+    --------
+    mt2d_grad  :: dictionary with computed gradient fields
+    """
+
+    # Define the derived gradient fields:
+    if self.mode.upper() == 'TE':
+      # H = -(dE/dz) / iwm
+      grad_real =-proj.getValue( escript.grad(mt2d_field["imag"])/wm )
+      grad_imag = proj.getValue( escript.grad(mt2d_field["real"])/wm )
+       #<Note the coupled dependency on real/imaginary part>:
+    else:
+      # E = (dH/dz) / sigma
+      grad_real = proj.getValue( escript.grad(mt2d_field["real"])/self.sigma )
+      grad_imag = proj.getValue( escript.grad(mt2d_field["imag"])/self.sigma )
+      #<'sigma' is an Escript data-object and as such the division
+      # will use the tagged sigma values of the associated domains>
+
+
+    # And return as dictionary for real and imaginary parts:
+    mt2d_grad = {"real": grad_real[1], "imag":grad_imag[1] }
+    #<Note>: the derivative w.r.t. 'z' is used (i.e. '[1]').
+
+    return mt2d_grad
+
+#_______________________________________________________________________________
+
+
+  def __tagDomain(self, domain, X, tags, rho, maps):
+    """
+    DESCRIPTION:
+    -----------
+    Defines the conductivity model. Conductivities of tagged regions can be mapped
+    via user-defined functions passed in 'maps'. If no user-defined functions are
+    passed, a constant value is applied as provided in list 'rho' for each region.
+    User-defined functions have 3 arguments: x-coordinate, z-coordinate, resistivity.
+
+    ARGUMENTS:
+    ----------
+    domain  :: escript object of mesh
+    X       :: escript object with all coordinates
+    tags    :: list with domain tags
+    rho     :: list with domain resistivities
+    maps    :: list with user-defined resistivity mappings
+
+    RETURNS:
+    --------
+    sigma   :: escript object of conductivity model
+
+    """
+    # Setup the conductivity structure (acts on elements and can be discontinuous).
+    sigma = escript.Scalar(0, escript.Function(domain))
+
+    # Setup conductivity domains.
+    for i in range( len(tags) ):
+
+      # Default: assign conductivity which is the inverse of resistivity:
+      m = 1.0/rho[i]
+
+      # Map a user-defined conductivity distribution if given:
+      if maps is not None:
+            # Guard against undefined elements:
+        if maps[i] is not None:
+          # Map the conductivity according to the defined functions:
+          m = maps[i]( X[0], X[1], rho[i] )
+
+      # Tag the mesh with the conductivity distributions at each iteration:
+      sigma += m * escript.insertTaggedValues(escript.Scalar(0,escript.Function(domain)),**{ tags[i] : 1})
+
+
+    if self._debug == True:
+      sigma.expand()
+      mydir = os.getcwd()
+      dbgfl = mydir + os.sep + "mt2d_sigma_dbg.silo"
+      print("")
+      print("DEBUG: writing SILO debug output of conductivity model:")
+      print(dbgfl)
+      print("")
+      weipa.saveSilo(dbgfl, sigma = sigma)
+
+
+    # All done:
+    return sigma
+
+#_______________________________________________________________________________
+
+
+  def __setBoundaryMask(self, X):
+    """
+    DESCRIPTION:
+    -----------
+    Define Dirichlet model boundaries conditions.
+
+    ARGUMENTS:
+    ----------
+    X :: escript object with all coordinates
+
+    RETURNS:
+    --------
+    boundary_mask :: escript object with mask values at boundaries
+
+    """
+    # Boundaries are defined as masks (1 or 0) for all mesh coordinates;
+    # values at the boundary are '1', whereas all other values are '0'.
+    mask_l = escript.whereZero( X[0] - escript.inf(X[0]) )
+    mask_r = escript.whereZero( X[0] - escript.sup(X[0]) )
+    mask_t = escript.whereZero( X[1] - escript.inf(X[1]) )
+    mask_b = escript.whereZero( X[1] - escript.sup(X[1]) )
+
+    # Combine the mask for all boundaries:
+    boundary_mask = mask_t + mask_b + mask_l + mask_r
+
+    return boundary_mask
+    #<Note>: this boundary mask is used later on as PDE coefficient 'q'.
+
+#_______________________________________________________________________________
+
+
+  def __getBoundaryValues(self, mode, X, rho_1d, ifc_1d, xstep, zstep, frequency):
+    """
+    DESCRIPTION:
+    -----------
+    Returns a list with boundary values along each Dirichlet boundary.
+    Values at the left and right side of the domain are evaluated at
+    sample points and interpolated across the domain. The subroutine
+    expects that layers at the right- and left-hand-side are defined.
+
+    ARGUMENTS:
+    ----------
+    mode      :: string with TE or TM mode
+    X         :: escript object with all coordinates
+    rho_1d    :: dictionary with resistivities at the left/right boundary
+    ifc_1d    :: dictionary with layer interfaces at the left/right boundary
+    xstep     :: number with step size for horizontal sample list
+    zstep     :: number with step size for vertical sample list
+    frequency :: number with actual sounding frequency
+
+    RETURNS:
+    --------
+    bondary_value :: dictionary with lists of boundary values at sample points
+    """
+
+    # ---
+    # Sample lists at vertical and horizontal boundaries.
+    # ---
+
+    # Horizontal extents:
+    xmin = escript.inf(X[0])
+    xmax = escript.sup(X[0])
+
+    # Vertical extents:
+    zmin = escript.inf(X[1])
+    zmax = escript.sup(X[1])
+
+    # Setup a list with sample points along the vertical mesh extent, bottom to top:
+    zsamples = self.__getSamplePoints(-zmax,-zmin,zstep)
+
+
+    # ---
+    # Calculate the 1D response at the left- and right-hand-side boundaries
+    # ---
+
+    # Instantiate an 'mt1d' object for the left- and right-hand-sides:
+    mt1d_left = mt1d.MT_1D( frequency, ifc_1d['left'] , rho_1d['left'] , zsamples )
+    mt1d_rght = mt1d.MT_1D( frequency, ifc_1d['right'], rho_1d['right'], zsamples )
+
+    # Compute the 1D field values at the sample nodes:
+    te1d_left, tm1d_left  = mt1d_left.mt1d(  )
+    te1d_rght, tm1d_rght  = mt1d_rght.mt1d(  )
+
+    # Distinguish TE and TM mode and save 1D values in dictionary:
+    if mode.upper() == "TE":
+      mt_1d = {"left":te1d_left, "right":te1d_rght}
+    else:
+      mt_1d = {"left":tm1d_left, "right":tm1d_rght}
+
+
+    # ---
+    # Interpolation across mesh.
+    # ---
+
+    # Now setup a 2D-table from left to right at each sampled depth for mesh-interpolation.
+    table2d_real = []
+    table2d_imag = []
+
+     # 1D-interpolation of values from left to right at different depths 'i':
+    for i in range( len(zsamples)):
+      table2d_real.append( self.__interpolLinear(xstep, xmin, xmax, mt_1d["left"].real[i], mt_1d["right"].real[i]) )
+      table2d_imag.append( self.__interpolLinear(xstep, xmin, xmax, mt_1d["left"].imag[i], mt_1d["right"].imag[i]) )
+
+    # 2D-interpolation to map the values on the mesh coordinates:
+    bondary_value_real = escript.interpolateTable( table2d_real, X, (xmin,zmin), (xstep,zstep) )
+    bondary_value_imag = escript.interpolateTable( table2d_imag, X, (xmin,zmin), (xstep,zstep) )
+
+    # Return the real and imaginary values as a dictionary:
+    boundary_value = {"real":bondary_value_real, "imag":bondary_value_imag}
+
+
+    return boundary_value
+
+#_______________________________________________________________________________
+
+
+  def __getAppResPhase(self, mt2d_field, mt2d_grad, wm):
+    """
+    DESCRIPTION:
+    -----------
+    Calculates the apparent resistivity and phase.
+
+    ARGUMENTS:
+    ----------
+    mt2d_field :: dictionary with real/imaginary field values
+    mt2d_grad  :: dictionary with real/imaginary gradient values
+
+    RETURNS:
+    --------
+    apparent resistivity and phase
+    """
+
+    # Define the associated modelled fields in readable variables:
+    if self.mode.upper() == 'TE':
+      # Transverse electric field:
+      Er = mt2d_field["real"]
+      Ei = mt2d_field["imag"]
+      Hr = mt2d_grad["real"]
+      Hi = mt2d_grad["imag"]
+    else:
+      # Transverse magnetic field :
+      Hr = mt2d_field["real"]
+      Hi = mt2d_field["imag"]
+      Er = mt2d_grad["real"]
+      Ei = mt2d_grad["imag"]
+
+
+    # Return apparent Resistivity and Phase:
+    arho_2d = ( (Er**2 + Ei**2)/(Hr**2 + Hi**2) ) / wm
+    aphi_2d = escript.atan( (Ei*Hr - Er*Hi)/(Er*Hr + Ei*Hi) ) * 180.0/cmath.pi
+
+    return arho_2d, aphi_2d
+#_______________________________________________________________________________
+
+
+  def __showPlot(self, loc, rho_2d, phi_2d, f, **kwargs):
+    """
+    DESCRIPTION:
+    -----------
+    Plot of apparent resistivity and phase. Requires matplotlib to be available.
+
+    ARGUMENTS:
+    ----------
+    loc     :: escript Locator object
+    rho_2d  :: list with computed apparent resistivities
+    phi_2d  :: list with computed phase values
+    f       :: sounding frequency
+
+    RETURNS:
+    --------
+    Plot in window.
+
+    """
+    try:
+        import matplotlib.pyplot as plt
+    except ImportError:
+        print("Warning: matplotlib not available, plot will not be shown")
+        return
+
+    # Abscissas/Ordinates:
+    x  = numpy.array( loc.getX() )[:,0]
+    y0 = numpy.array( loc.getValue(rho_2d) )
+    y1 = numpy.array( loc.getValue(phi_2d) )
+
+    # Plot labels:
+    title = 'Escript MT-2D ' + '(' + self.mode.upper() + ')' + ' freq: ' + str(f) + ' Hz'
+    ylbl0 = r'Apparent Resistivity $(\Omega\cdot\,m)$'
+    ylbl1 = r'Phase $(^{\circ})$'
+    xlbl1 = 'Easting (m)'
+
+
+    # Setup the plot window with app. res. on top and phase on bottom:
+    f, ax = plt.subplots(2, figsize=(8,8), sharex=True) # Mind shared axis
+    f.subplots_adjust(top=0.9)  # Little extra space for 'suptitle'
+    f.suptitle(title)           # This is actually the plot-title
+
+    # Top: apparent resistivity on semi-log plot
+    ax[0].plot(x,y0, color='red') # semilogy
+    ax[0].grid(b=True, which='both', color='grey',linestyle=':')
+    ax[0].set_title( ylbl0 )
+    # Plot limits in **kwargs:
+    if 'limits' in kwargs:
+        ax[0].set_xlim(kwargs["limits"])
+
+    # Bottom: phase on linear plot
+    ax[1].plot(x,y1, color='blue')
+    ax[1].grid(b=True, which='both', color='grey',linestyle=':')
+    ax[1].set_xlabel( xlbl1 )
+    ax[1].set_title( ylbl1 )
+    # Plot limits in **kwargs:
+    if 'limits' in kwargs:
+        ax[1].set_xlim(kwargs["limits"])
+
+    plt.show()
+
+
+#_______________________________________________________________________________
+
+
+  def __setSolver(self, mode, domain, sigma, boundary_mask, boundary_value, f):
+    """
+    DESCRIPTION:
+    -----------
+    Setups the coupled PDE for real and complex part.
+
+    ARGUMENTS:
+    ----------
+    mode           :: string with TE or TM mode
+    domain         :: escript object with mesh domain
+    sigma          :: escript object with conductivity model
+    boundary_mask  :: escript object with boundary mask
+    boundary_value :: dictionary with real/imag boundary values
+    f              :: sounding frequency
+
+    RETURNS:
+    --------
+    mt2d_fields    :: dictionary with solved PDE, magnetotelluric fields real/imag
+    """
+
+    # Constants:
+    pi  = cmath.pi     # Ratio circle circumference to diameter.
+    mu0 = 4*pi*1e-7    # Free space permeability in V.s/(A.m).
+    wm  = 2*pi*f*mu0   # Angular frequency times mu0.
+
+
+    # ---
+    # Setup the coupled PDE for real/imaginary parts:
+    # ---
+
+    # Initialise the PDE object for two coupled equations (real/imaginary).
+    mtpde = pde.LinearPDE(domain, numEquations=2)
+
+    # If set, solve the 2D case using the direct solver:
+    if MT_2D._solver.upper() == "DIRECT":
+       mtpde.getSolverOptions().setSolverMethod(pde.SolverOptions().DIRECT)
+    else:
+       mtpde.getSolverOptions().setSolverMethod(pde.SolverOptions().DEFAULT)
+
+    # Now initialise the PDE coefficients 'A' and 'D',
+    # as well as the Dirichlet variables 'q' and 'r':
+    A = mtpde.createCoefficient("A")
+    D = mtpde.createCoefficient("D")
+    q = mtpde.createCoefficient("q")
+    r = mtpde.createCoefficient("r")
+
+    # Set the appropriate values for the coefficients depending on the mode:
+    if mode.upper() == "TE":
+        a_val = 1.0
+        d_val = wm*sigma
+    elif mode.upper() == "TM":
+        a_val = 1.0/sigma
+        d_val = wm
+
+
+    # ---
+    # Define the PDE parameters, mind boundary conditions.
+    # ---
+
+
+    # Now define the rank-4 coefficient A:
+    for i in range(domain.getDim()):
+        A[0,i,0,i] = a_val
+        A[1,i,1,i] = a_val
+
+    # And define the elements of 'D' which are decomposed into real/imaginary values:
+    D[0,0] = 0     ; D[1,0] = d_val
+    D[0,1] =-d_val ; D[1,1] = 0
+
+
+    # Set Dirichlet boundaries and values:
+    q[0] = boundary_mask ; r[0] = boundary_value['real']
+    q[1] = boundary_mask ; r[1] = boundary_value['imag']
+
+    # ---
+    # Solve the PDE
+    # ---
+
+    mtpde.setValue(A=A, D=D, q=q, r=r  )
+    pde_solution = mtpde.getSolution()
+
+    # And return the real and imaginary parts individually:
+    mt2d_fields = {"real":pde_solution[0], "imag":pde_solution[1] }
+    #<Note>: the electric field is returned for TE-mode.
+    #        the magnetic field is returned for TM-mode.
+
+    return mt2d_fields
+
+#_______________________________________________________________________________
+
+
+  def pdeSolve(self):
+    """
+    DESCRIPTION:
+    -----------
+    Solves the PDE for either the TE or the TM mode.
+    (TE mode is the transverse Electric field).
+    (TM mode is the transverse Magnetic field).
+
+    ARGUMENTS:
+    ----------
+    (uses `self`)
+
+    RETURNS:
+    --------
+    mt2d  :: list with real/imag fields for each sounding frequency
+    arho  :: list with apparent resistivities for each sounding frequency
+    aphi  :: list with phase values for each sounding frequency
+
+    """
+
+
+    # ---
+    # Constants.
+    # ---
+
+    # Pi & vacuum permeability:
+    pi = cmath.pi
+    mu = 4*pi*1e-7
+
+    # Number of frequencies:
+    nfreq = len(self.frequencies)
+
+
+    # ---
+    # Solve the PDE for all frequencies.
+    # ---
+
+    # Prepare lists to store the values at each frequency:
+    arho = []
+    aphi = []
+    mt2d = []
+
+    # Cycle over all frequencies:
+    print("Solving for frequency: ...")
+    for n in range( nfreq ):
+
+      f = self.frequencies[n] # actual frequency (Hz)
+      wm  = (2*pi*f)*mu       # angular frequency (rad/s)
+      T = 1.0 / f             # sounding period (s)
+
+      print(n+1,":", f, "(Hz)")
+
+      # Calculate 1D Dirichlet boundary values:
+      boundary_value = self.__getBoundaryValues(self.mode.upper(), self.X,
+            self.rho_1d, self.ifc_1d, self.xstep, self.zstep, f)
+
+      # Solve the 2D-MT PDE:
+      fld_2d = self.__setSolver(self.mode.upper(),self.domain, self.sigma,
+            self.boundary_mask, boundary_value, f)
+
+      # Calculate the field gradients:
+      grd_2d = self.__getGradField(self.proj, fld_2d, wm)
+
+      # Calculate the apparent resistivity and phase:
+      rho_2d, phi_2d = self.__getAppResPhase(fld_2d, grd_2d, wm)
+
+      # Save in lists for each frequency:
+      mt2d.append( fld_2d )
+      arho.append( self.loc.getValue(rho_2d) )
+      aphi.append( self.loc.getValue(phi_2d) )
+
+      # Optionally plot the apparent resistivity and phase:
+      if self.plot:
+          self.__showPlot(self.loc, rho_2d, phi_2d, f, limits=self.limits)
+
+
+    # ---
+    # All done
+    # ---
+
+    print("field calculations finished.")
+    return mt2d, arho, aphi
+
+#_______________________________________________________________________________
+
+
+
+
+
+
+
diff --git a/downunder/py_src/mappings.py b/downunder/py_src/mappings.py
index 4a996e9..925392e 100644
--- a/downunder/py_src/mappings.py
+++ b/downunder/py_src/mappings.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """Collection of parametrizations that map physical values to model parameters
    and back"""
 
diff --git a/downunder/py_src/minimizers.py b/downunder/py_src/minimizers.py
index 385f181..f7a2303 100644
--- a/downunder/py_src/minimizers.py
+++ b/downunder/py_src/minimizers.py
@@ -16,6 +16,8 @@
 
 """Generic minimization algorithms"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -214,17 +216,18 @@ class AbstractMinimizer(object):
     def setCallback(self, callback):
         """
         Sets a callback function to be called after every iteration.
-        The arguments to the function are: (k, x, Jx, g_Jxx), where
-        k is the current iteration, x is the current estimate, Jx=f(x) and
-        g_Jxx=grad f(x).
+        It is up to the specific implementation what arguments are passed
+        to the callback. Subclasses should at least pass the current
+        iteration number k, the current estimate x, and possibly f(x),
+        grad f(x), and the current error.
         """
         if callback is not None and not callable(callback):
             raise TypeError("Callback function not callable.")
         self._callback = callback
 
-    def _doCallback(self, *args):
+    def _doCallback(self, **args):
         if self._callback is not None:
-            self._callback(*args)
+            self._callback(**args)
 
     def getResult(self):
         """
@@ -298,6 +301,14 @@ class MinimizerLBFGS(AbstractMinimizer):
 
     def run(self, x):
         """
+        The callback function is called with the following arguments:
+            k       - iteration number
+            x       - current estimate
+            Jx      - value of cost function at x
+            g_Jx    - gradient of cost function at x
+            norm_dJ - |Jx_k - Jx_{k-1}| (only if J_tol is set)
+            norm_dx - ||x_k - x_{k-1}|| (only if m_tol is set)
+
         :param x: Level set function representing our initial guess
         :type x: `Data`
         :return: Level set function representing the solution
@@ -316,8 +327,16 @@ class MinimizerLBFGS(AbstractMinimizer):
         converged = False
         args=self.getCostFunction().getArguments(x)
         g_Jx=self.getCostFunction().getGradient(x, *args)
-        Jx=self.getCostFunction()(x, *args) # equivalent to getValue() for Downunder CostFunctions
+        # equivalent to getValue() for Downunder CostFunctions
+        Jx=self.getCostFunction()(x, *args)
         Jx_0=Jx
+        cbargs = {'k':n_iter, 'x':x, 'Jx':Jx, 'g_Jx':g_Jx}
+        if self._J_tol:
+            cbargs.update(norm_dJ=None)
+        if self._m_tol:
+            cbargs.update(norm_dx=None)
+
+        self._doCallback(**cbargs)
 
         while not converged and not non_curable_break_down and n_iter < self._imax:
           k=0
@@ -325,7 +344,6 @@ class MinimizerLBFGS(AbstractMinimizer):
           s_and_y=[]
           # initial step length for line search
           alpha=1.0
-          self._doCallback(n_iter, x, Jx, g_Jx)
 
           while not converged and not break_down and k < self._restart and n_iter < self._imax:
                 #self.logger.info("\033[1;31miteration %d\033[1;30m"%n_iter)
@@ -362,23 +380,27 @@ class MinimizerLBFGS(AbstractMinimizer):
 
                 converged = True
                 if self._J_tol:
-                    flag=abs(Jx_new-Jx) <= self._J_tol * abs(Jx_new-Jx_0)
+                    dJ = abs(Jx_new-Jx)
+                    JJtol = self._J_tol * abs(Jx_new-Jx_0)
+                    flag = dJ <= JJtol
                     if self.logger.isEnabledFor(logging.DEBUG):
                         if flag:
-                            self.logger.debug("Cost function has converged: dJ, J*J_tol = %e, %e"%(Jx-Jx_new,abs(Jx_new-Jx_0)*self._J_tol))
+                            self.logger.debug("Cost function has converged: dJ=%e, J*J_tol=%e"%(dJ,JJtol))
                         else:
-                            self.logger.debug("Cost function checked: dJ, J*J_tol = %e, %e"%(Jx-Jx_new,abs(Jx_new)*self._J_tol))
-
+                            self.logger.debug("Cost function checked: dJ=%e, J*J_tol=%e"%(dJ,JJtol))
+                    cbargs.update(norm_dJ=dJ)
                     converged = converged and flag
+
                 if self._m_tol:
                     norm_x = self.getCostFunction().getNorm(x_new)
                     norm_dx = self.getCostFunction().getNorm(delta_x)
                     flag = norm_dx <= self._m_tol * norm_x
                     if self.logger.isEnabledFor(logging.DEBUG):
                         if flag:
-                            self.logger.debug("Solution has converged: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
+                            self.logger.debug("Solution has converged: dx=%e, x*m_tol=%e"%(norm_dx, norm_x*self._m_tol))
                         else:
-                            self.logger.debug("Solution checked: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
+                            self.logger.debug("Solution checked: dx=%e, x*m_tol=%e"%(norm_dx, norm_x*self._m_tol))
+                    cbargs.update(norm_dx=norm_dx)
                     converged = converged and flag
 
                 x=x_new
@@ -404,7 +426,8 @@ class MinimizerLBFGS(AbstractMinimizer):
 
                 k+=1
                 n_iter+=1
-                self._doCallback(n_iter, x, Jx, g_Jx)
+                cbargs.update(k=n_iter, x=x, Jx=Jx, g_Jx=g_Jx)
+                self._doCallback(**cbargs)
 
                 # delete oldest vector pair
                 if k>self._truncation: s_and_y.pop(0)
@@ -483,6 +506,15 @@ class MinimizerBFGS(AbstractMinimizer):
                 raise KeyError("Invalid option '%s'"%o)
 
     def run(self, x):
+        """
+        The callback function is called with the following arguments:
+            k     - iteration number
+            x     - current estimate
+            Jx    - value of cost function at x
+            g_Jx  - gradient of cost function at x
+            gnorm - norm of g_Jx (stopping criterion)
+        """
+
         args=self.getCostFunction().getArguments(x)
         g_Jx=self.getCostFunction().getGradient(x, *args)
         Jx=self.getCostFunction()(x, *args)
@@ -494,7 +526,7 @@ class MinimizerBFGS(AbstractMinimizer):
         I=np.eye(n)
         H=self._initial_H*I
         gnorm=Lsup(g_Jx)
-        self._doCallback(k, x, Jx, g_Jx)
+        self._doCallback(k=k, x=x, Jx=Jx, g_Jx=g_Jx, gnorm=gnorm)
 
         while gnorm > self._m_tol and k < self._imax:
             self.logger.debug("iteration %d, gnorm=%e"%(k,gnorm))
@@ -519,8 +551,8 @@ class MinimizerBFGS(AbstractMinimizer):
             delta_g=g_Jx_new-g_Jx
             g_Jx=g_Jx_new
             k+=1
-            self._doCallback(k, x, Jx, g_Jx)
             gnorm=Lsup(g_Jx)
+            self._doCallback(k=k, x=x, Jx=Jx, g_Jx=g_Jx, gnorm=gnorm)
             self._result=x
             if (gnorm<=self._m_tol): break
 
@@ -550,6 +582,15 @@ class MinimizerNLCG(AbstractMinimizer):
     """
 
     def run(self, x):
+        """
+        The callback function is called with the following arguments:
+            k     - iteration number
+            x     - current estimate
+            Jx    - value of cost function at x
+            g_Jx  - gradient of cost function at x
+            gnorm - norm of g_Jx (stopping criterion)
+        """
+
         i=0
         k=0
         args=self.getCostFunction().getArguments(x)
@@ -558,9 +599,10 @@ class MinimizerNLCG(AbstractMinimizer):
         d=r
         delta=self.getCostFunction().getDualProduct(r,r)
         delta0=delta
-        self._doCallback(i, x, Jx, -r)
+        gnorm=Lsup(r)
+        self._doCallback(k=i, x=x, Jx=Jx, g_Jx=-r, gnorm=gnorm)
 
-        while i<self._imax and Lsup(r)>self._m_tol:
+        while i < self._imax and gnorm > self._m_tol:
             self.logger.debug("iteration %d"%i)
             self.logger.debug("grad f(x) = %s"%(-r))
             self.logger.debug("d = %s"%d)
@@ -583,7 +625,8 @@ class MinimizerNLCG(AbstractMinimizer):
                 d=r
                 k=0
             i+=1
-            self._doCallback(i, x, Jx, g_Jx_new)
+            gnorm=Lsup(r)
+            self._doCallback(k=i, x=x, Jx=Jx, g_Jx=g_Jx_new, gnorm=gnorm)
             self._result=x
 
         if i >= self._imax:
diff --git a/downunder/py_src/regularizations.py b/downunder/py_src/regularizations.py
index 40b7293..4266fd1 100644
--- a/downunder/py_src/regularizations.py
+++ b/downunder/py_src/regularizations.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/py_src/seismic.py b/downunder/py_src/seismic.py
index d6977e5..8a207f8 100644
--- a/downunder/py_src/seismic.py
+++ b/downunder/py_src/seismic.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -21,7 +23,9 @@ __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
-__all__ = ['SimpleSEGYWriter', 'Ricker', 'WaveBase', 'SonicWave', 'VTIWave', 'HTIWave', 'createAbsorbtionLayerFunction', 'SonicHTIWave' , "TTIWave"]
+__all__ = ['SimpleSEGYWriter', 'Ricker', 'WaveBase', 'SonicWave', 'VTIWave',
+'HTIWave', 'createAbsorbtionLayerFunction', 'createAbsorbtionLayerFunction',
+'SonicHTIWave' , "TTIWave"]
 
 
 from math import pi
@@ -32,6 +36,15 @@ from esys.escript import *
 import esys.escript.unitsSI as U
 from esys.escript.linearPDEs import LinearSinglePDE, LinearPDESystem, WavePDE, SolverOptions
 
+OBSPY_AVAILABLE = False
+try:
+    from obspy import Trace, Stream, UTCDateTime
+    from obspy.segy.segy import SEGYTraceHeader, SEGYBinaryFileHeader
+    from obspy.core import AttribDict
+    OBSPY_AVAILABLE = True
+except:
+    pass
+
 class Wavelet(object):
         """
         place holder for source wavelet
@@ -44,8 +57,8 @@ class Ricker(Wavelet):
         """
         def __init__(self, f_dom=40, t_dom=None):
                 """
-                Sets up a Ricker wavelet wih dominant frequence `f_dom` and 
-                center at time `t_dom`. If `t_dom` is not given an estimate 
+                Sets up a Ricker wavelet wih dominant frequence `f_dom` and
+                center at time `t_dom`. If `t_dom` is not given an estimate
                 for suitable `t_dom` is calculated so f(0)~0.
 
                 :note: maximum frequence is about 2 x the dominant frequence.
@@ -66,7 +79,7 @@ class Ricker(Wavelet):
 
         def getTimeScale(self):
                 """
-                Returns the time scale which is the inverse of the largest 
+                Returns the time scale which is the inverse of the largest
                 frequence with a significant spectral component.
                 """
                 return 1/self.__f_max
@@ -148,7 +161,7 @@ class SimpleSEGYWriter(object):
                             rg = [(c[0],c[1])  for c in receiver_group]
                 else:
                     raise TypeError("illegal receiver_group type.")
-     
+
                 self.__source=source
                 self.__receiver_group=rg
                 self.__text=text
@@ -179,6 +192,12 @@ class SimpleSEGYWriter(object):
                """
                return self.__sampling_interval
 
+        def obspy_available(self):
+            """
+            for checking if the obspy module is available
+            """
+            return OBSPY_AVAILABLE
+
         def write(self, filename):
             """
             writes to segy file
@@ -187,11 +206,7 @@ class SimpleSEGYWriter(object):
             :note: the function uses the `obspy` module.
             """
 
-            try:
-                from obspy import Trace, Stream, UTCDateTime
-                from obspy.segy.segy import SEGYTraceHeader, SEGYBinaryFileHeader
-                from obspy.core import AttribDict
-            except ImportError as e:
+            if not OBSPY_AVAILABLE:
                 raise RuntimeError("This feature (SimpleSEGYWriter.write())"+\
                         " depends on obspy, which is not installed, see "+\
                         "https://github.com/obspy/obspy for install guide")
@@ -293,7 +308,12 @@ class WaveBase(object):
              self.t_last=t
              return t, self.u + self.v * (t-self.t)
 
-def createAbsorbtionLayerFunction(x, absorption_zone=300*U.m, absorption_cut=1.e-2, top_absorbation=False):
+def createAbsorbtionLayerFunction(x, absorption_zone=300*U.m,
+        absorption_cut=1.e-2, top_absorption=False, top_absorbation=None):
+    print("WARNING: createAbsorbtionLayerFunction(): function is deprecated, use createAbsorptionLayerFunction")
+
+def createAbsorptionLayerFunction(x, absorption_zone=300*U.m,
+        absorption_cut=1.e-2, top_absorption=False, top_absorbation=None):
     """
     Creates a distribution which is one in the interior of the domain of `x`
     and is falling down to the value 'absorption_cut' over a margin of thickness 'absorption_zone'
@@ -301,14 +321,19 @@ def createAbsorbtionLayerFunction(x, absorption_zone=300*U.m, absorption_cut=1.e
 
     :param x: location of points in the domain
     :type x: `Data`
-    :param absorption_zone: thickness of the aborption zone
+    :param absorption_zone: thickness of the absorption zone
     :param absorption_cut: value of decay function on domain boundary
     :return: function on 'x' which is one in the iterior and decays to almost zero over a margin
              toward the boundary.
     """
+    if top_absorbation is not None:
+        print("WARNING: createAbsorptionLayerFunction(): top_absorbation is deprecated, use top_absorption")
+        if top_absorption is False:
+            top_absorption = top_absorbation
+
     if absorption_zone is None or absorption_zone == 0:
         return 1
-    
+
     dom=x.getDomain()
     bb=boundingBox(dom)
     DIM=dom.getDim()
@@ -319,7 +344,7 @@ def createAbsorbtionLayerFunction(x, absorption_zone=300*U.m, absorption_cut=1.e
         x_l=x_i-(bb[i][0]+absorption_zone)
         m_l=whereNegative(x_l)
         f=f*( (exp(-decay*(x_l*m_l)**2)-1) * m_l+1 )
-        if  top_absorbation or not DIM-1 == i:
+        if  top_absorption or not DIM-1 == i:
             x_r=(bb[i][1]-absorption_zone)-x_i
             m_r=whereNegative(x_r)
             f=f*( (exp(-decay*(x_r*m_r)**2)-1) * m_r+1 )
@@ -352,7 +377,7 @@ class SonicWave(WaveBase):
            :param absorption_cut: boundary value of absorption decay factor
            :param lumping: if True mass matrix lumping is being used. This is accelerates the computing but introduces some diffusion.
            """
-           f=createAbsorbtionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+           f=createAbsorptionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
            v_p=v_p*f
 
            if p0 == None:
@@ -426,13 +451,14 @@ class VTIWave(WaveBase):
         :type disable_fast_assemblers: `boolean`
         """
         DIM=domain.getDim()
-        f=createAbsorbtionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+        self.fastAssembler = hasattr(domain, "createAssembler") and not disable_fast_assemblers
+        f=createAbsorptionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+
+        f = interpolate(f, Function(domain))
 
         v_p=v_p*f
         v_s=v_s*f
 
-
-
         if u0 == None:
           u0=Vector(0.,Solution(domain))
         else:
@@ -450,7 +476,6 @@ class VTIWave(WaveBase):
 
         self.__wavelet=wavelet
 
-        self.fastAssembler = hasattr(domain, "createAssembler") and not disable_fast_assemblers
         self.c33=v_p**2 * rho
         self.c44=v_s**2 * rho
         self.c11=(1+2*eps) * self.c33
@@ -459,14 +484,17 @@ class VTIWave(WaveBase):
         self.c12=self.c11-2*self.c66
 
         if self.fastAssembler:
-            self.__mypde=WavePDE(domain, [("c11", self.c11),
+            C = [("c11", self.c11),
                     ("c12", self.c12), ("c13", self.c13), ("c33", self.c33),
-                    ("c44", self.c44), ("c66", self.c66)])
+                    ("c44", self.c44), ("c66", self.c66)]
+            if "speckley" in domain.getDescription().lower():
+                C = [(n, interpolate(d, ReducedFunction(domain))) for n,d in C]
+            self.__mypde=WavePDE(domain, C)
         else:
             self.__mypde=LinearPDESystem(domain)
             self.__mypde.setValue(X=self.__mypde.createCoefficient('X'))
 
-        if lumping: 
+        if lumping:
             self.__mypde.getSolverOptions().setSolverMethod(SolverOptions.HRZ_LUMPING)
         self.__mypde.setSymmetryOn()
         self.__mypde.setValue(D=rho*kronecker(DIM))
@@ -504,7 +532,7 @@ class VTIWave(WaveBase):
 
                 s=self.c44*(du[2,1]+du[1,2])
                 sigma[1,2]=s
-                sigma[2,1]=s             
+                sigma[2,1]=s
 
                 s=self.c44*(du[2,0]+du[0,2])
                 sigma[0,2]=s
@@ -513,7 +541,7 @@ class VTIWave(WaveBase):
                 s=self.c66*(du[0,1]+du[1,0])
                 sigma[0,1]=s
                 sigma[1,0]=s
-                
+
 
             else:
                 e11=du[0,0]
@@ -532,149 +560,152 @@ class VTIWave(WaveBase):
 
 
 class HTIWave(WaveBase):
-        """
-        Solving the HTI wave equation (along the x_0 axis)
+    """
+    Solving the HTI wave equation (along the x_0 axis)
 
-        :note: In case of a two dimensional domain a horizontal domain is considered, i.e. the depth component is dropped.
-        """
-        
-        def __init__(self, domain, v_p, v_s,   wavelet, source_tag,
-                source_vector = [1.,0.,0.], eps=0., gamma=0., delta=0., rho=1.,
-                dt=None, u0=None, v0=None, absorption_zone=None,
-                absorption_cut=1e-2, lumping=True, disable_fast_assemblers=False):
-           """
-           initialize the VTI wave solver
+    :note: In case of a two dimensional domain a horizontal domain is considered, i.e. the depth component is dropped.
+    """
 
-           :param domain: domain of the problem
-           :type domain: `Domain`
-           :param v_p: vertical p-velocity field
-           :type v_p: `Scalar`
-           :param v_s: vertical s-velocity field
-           :type v_s: `Scalar`
-           :param wavelet: wavelet to describe the time evolution of source term
-           :type wavelet: `Wavelet`
-           :param source_tag: tag of the source location
-           :type source_tag: 'str' or 'int'
-           :param source_vector: source orientation vector
-           :param eps: first Thompsen parameter
-           :param delta: second Thompsen parameter
-           :param gamma: third Thompsen parameter
-           :param rho: density
-           :param dt: time step size. If not present a suitable time step size is calculated.
-           :param u0: initial solution. If not present zero is used.
-           :param v0: initial solution change rate. If not present zero is used.
-           :param absorption_zone: thickness of absorption zone
-           :param absorption_cut: boundary value of absorption decay factor
-           :param lumping: if True mass matrix lumping is being used. This is accelerates the computing but introduces some diffusion.
-           :param disable_fast_assemblers: if True, forces use of slower and more general PDE assemblers
-           """
-           DIM=domain.getDim()
-           f=createAbsorbtionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+    def __init__(self, domain, v_p, v_s,   wavelet, source_tag,
+            source_vector = [1.,0.,0.], eps=0., gamma=0., delta=0., rho=1.,
+            dt=None, u0=None, v0=None, absorption_zone=None,
+            absorption_cut=1e-2, lumping=True, disable_fast_assemblers=False):
+       """
+       initialize the VTI wave solver
+
+       :param domain: domain of the problem
+       :type domain: `Domain`
+       :param v_p: vertical p-velocity field
+       :type v_p: `Scalar`
+       :param v_s: vertical s-velocity field
+       :type v_s: `Scalar`
+       :param wavelet: wavelet to describe the time evolution of source term
+       :type wavelet: `Wavelet`
+       :param source_tag: tag of the source location
+       :type source_tag: 'str' or 'int'
+       :param source_vector: source orientation vector
+       :param eps: first Thompsen parameter
+       :param delta: second Thompsen parameter
+       :param gamma: third Thompsen parameter
+       :param rho: density
+       :param dt: time step size. If not present a suitable time step size is calculated.
+       :param u0: initial solution. If not present zero is used.
+       :param v0: initial solution change rate. If not present zero is used.
+       :param absorption_zone: thickness of absorption zone
+       :param absorption_cut: boundary value of absorption decay factor
+       :param lumping: if True mass matrix lumping is being used. This is accelerates the computing but introduces some diffusion.
+       :param disable_fast_assemblers: if True, forces use of slower and more general PDE assemblers
+       """
+       DIM=domain.getDim()
+       self.fastAssembler = hasattr(domain, "createAssembler") and not disable_fast_assemblers
+       f=createAbsorptionLayerFunction(v_p.getFunctionSpace().getX(), absorption_zone, absorption_cut)
+
+       v_p=v_p*f
+       v_s=v_s*f
+
+       if u0 == None:
+          u0=Vector(0.,Solution(domain))
+       else:
+          u0=interpolate(p0, Solution(domain ))
 
-           v_p=v_p*f
-           v_s=v_s*f
+       if v0 == None:
+          v0=Vector(0.,Solution(domain))
+       else:
+          v0=interpolate(v0, Solution(domain ))
 
-           if u0 == None:
-              u0=Vector(0.,Solution(domain))
-           else:
-              u0=interpolate(p0, Solution(domain ))
+       if dt == None:
+            dt=min((1./5.)*min(inf(domain.getSize()/v_p), inf(domain.getSize()/v_s)), wavelet.getTimeScale())
 
-           if v0 == None:
-              v0=Vector(0.,Solution(domain))
-           else:
-              v0=interpolate(v0, Solution(domain ))
+       super(HTIWave, self).__init__( dt, u0=u0, v0=v0, t0=0.)
 
-           if dt == None:
-                dt=min((1./5.)*min(inf(domain.getSize()/v_p), inf(domain.getSize()/v_s)), wavelet.getTimeScale())
+       self.__wavelet=wavelet
 
-           super(HTIWave, self).__init__( dt, u0=u0, v0=v0, t0=0.)
+       self.c33 = v_p**2 * rho
+       self.c44 = v_s**2 * rho
+       self.c11 = (1+2*eps) * self.c33
+       self.c66 = (1+2*gamma) * self.c44
+       self.c13 = sqrt(2*self.c33*(self.c33-self.c44) * delta + (self.c33-self.c44)**2)-self.c44
+       self.c23 = self.c33-2*self.c66
 
-           self.__wavelet=wavelet
-           
-           self.fastAssembler = hasattr(domain, "createAssembler") and not disable_fast_assemblers
-           self.c33 = v_p**2 * rho
-           self.c44 = v_s**2 * rho
-           self.c11 = (1+2*eps) * self.c33
-           self.c66 = (1+2*gamma) * self.c44
-           self.c13 = sqrt(2*self.c33*(self.c33-self.c44) * delta + (self.c33-self.c44)**2)-self.c44
-           self.c23 = self.c33-2*self.c66
-
-           if self.fastAssembler:
-                self.__mypde=WavePDE(domain, [("c11", self.c11),
-                    ("c23", self.c23), ("c13", self.c13), ("c33", self.c33),
-                    ("c44", self.c44), ("c66", self.c66)])
-           else:
-                self.__mypde=LinearPDESystem(domain)
-                self.__mypde.setValue(X=self.__mypde.createCoefficient('X'))
-           
-           if lumping: 
-                self.__mypde.getSolverOptions().setSolverMethod(SolverOptions.HRZ_LUMPING)
-           self.__mypde.setSymmetryOn()
-           self.__mypde.setValue(D=rho*kronecker(DIM))
-           self.__source_tag=source_tag
+       if self.fastAssembler:
+            C = [("c11", self.c11),
+                ("c23", self.c23), ("c13", self.c13), ("c33", self.c33),
+                ("c44", self.c44), ("c66", self.c66)]
+            if "speckley" in domain.getDescription().lower():
+                C = [(n, interpolate(d, ReducedFunction(domain))) for n,d in C]
+            self.__mypde=WavePDE(domain, C)
+       else:
+            self.__mypde=LinearPDESystem(domain)
+            self.__mypde.setValue(X=self.__mypde.createCoefficient('X'))
 
-           if DIM == 2:
-              source_vector= [source_vector[0],source_vector[2]]
+       if lumping:
+            self.__mypde.getSolverOptions().setSolverMethod(SolverOptions.HRZ_LUMPING)
+       self.__mypde.setSymmetryOn()
+       self.__mypde.setValue(D=rho*kronecker(DIM))
+       self.__source_tag=source_tag
 
-           self.__r=Vector(0, DiracDeltaFunctions(self.__mypde.getDomain()))
-           self.__r.setTaggedValue(self.__source_tag, source_vector)
+       if DIM == 2:
+          source_vector= [source_vector[0],source_vector[2]]
 
+       self.__r=Vector(0, DiracDeltaFunctions(self.__mypde.getDomain()))
+       self.__r.setTaggedValue(self.__source_tag, source_vector)
 
-        def setQ(self,q):
-            """
-            sets the PDE q value
 
-            :param q: the value to set
-            """
-            self.__mypde.setValue(q=q)
+    def setQ(self,q):
+        """
+        sets the PDE q value
+
+        :param q: the value to set
+        """
+        self.__mypde.setValue(q=q)
+
+    def  _getAcceleration(self, t, u):
+         """
+         returns the acceleraton for time `t` and solution `u` at time `t`
+         """
+         du = grad(u)
+         if self.fastAssembler:
+            self.__mypde.setValue(du=du, y_dirac= self.__r * self.__wavelet.getValue(t))
+         else:
+             sigma=self.__mypde.getCoefficient('X')
+
+             if self.__mypde.getDim() == 3:
+                e11=du[0,0]
+                e22=du[1,1]
+                e33=du[2,2]
+
+                sigma[0,0]=self.c11*e11+self.c13*(e22+e33)
+                sigma[1,1]=self.c13*e11+self.c33*e22+self.c23*e33
+                sigma[2,2]=self.c13*e11+self.c23*e22+self.c33*e33
+
+                s=self.c44*(du[2,1]+du[1,2])
+                sigma[1,2]=s
+                sigma[2,1]=s
+
+                s=self.c66*(du[2,0]+du[0,2])
+                sigma[0,2]=s
+                sigma[2,0]=s
+
+                s=self.c66*(du[0,1]+du[1,0])
+                sigma[0,1]=s
+                sigma[1,0]=s
 
-        def  _getAcceleration(self, t, u):
-             """
-             returns the acceleraton for time `t` and solution `u` at time `t`
-             """
-             du = grad(u)
-             if self.fastAssembler:
-                self.__mypde.setValue(du=du, y_dirac= self.__r * self.__wavelet.getValue(t))
              else:
-                 sigma=self.__mypde.getCoefficient('X')
-
-                 if self.__mypde.getDim() == 3:
-                    e11=du[0,0]
-                    e22=du[1,1]
-                    e33=du[2,2]
-
-                    sigma[0,0]=self.c11*e11+self.c13*(e22+e33)
-                    sigma[1,1]=self.c13*e11+self.c33*e22+self.c23*e33
-                    sigma[2,2]=self.c13*e11+self.c23*e22+self.c33*e33
-
-                    s=self.c44*(du[2,1]+du[1,2])
-                    sigma[1,2]=s
-                    sigma[2,1]=s
-
-                    s=self.c66*(du[2,0]+du[0,2])
-                    sigma[0,2]=s
-                    sigma[2,0]=s
-
-                    s=self.c66*(du[0,1]+du[1,0])
-                    sigma[0,1]=s
-                    sigma[1,0]=s
-
-                 else:
-                    e11=du[0,0]
-                    e22=du[1,1]
-                    sigma[0,0]=self.c11*e11+self.c13*e22
-                    sigma[1,1]=self.c13*e11+self.c33*e22
-
-                    s=self.c66*(du[1,0]+du[0,1])
-                    sigma[0,1]=s
-                    sigma[1,0]=s
-                 self.__mypde.setValue(X=-sigma, y_dirac= self.__r * self.__wavelet.getValue(t))
-                 
-             return self.__mypde.getSolution()
+                e11=du[0,0]
+                e22=du[1,1]
+                sigma[0,0]=self.c11*e11+self.c13*e22
+                sigma[1,1]=self.c13*e11+self.c33*e22
+
+                s=self.c66*(du[1,0]+du[0,1])
+                sigma[0,1]=s
+                sigma[1,0]=s
+             self.__mypde.setValue(X=-sigma, y_dirac= self.__r * self.__wavelet.getValue(t))
+
+         return self.__mypde.getSolution()
 
 class TTIWave(WaveBase):
         """
-        Solving the 2D TTI wave equation with 
+        Solving the 2D TTI wave equation with
 
         `sigma_xx= c11*e_xx + c13*e_zz + c15*e_xz`
         `sigma_zz= c13*e_xx + c33*e_zz + c35*e_xz`
@@ -684,7 +715,7 @@ class TTIWave(WaveBase):
 
         :note: currently only the 2D case is supported.
         """
-        
+
         def __init__(self, domain, v_p, v_s,   wavelet, source_tag,
                 source_vector = [0.,1.], eps=0., delta=0., theta=0., rho=1.,
                 dt=None, u0=None, v0=None, absorption_zone=300*U.m,
@@ -717,7 +748,7 @@ class TTIWave(WaveBase):
            DIM=domain.getDim()
            if not DIM == 2:
                 raise ValueError("Only 2D is supported.")
-           f=createAbsorbtionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+           f=createAbsorptionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
 
            v_p=v_p*f
            v_s=v_s*f
@@ -759,7 +790,7 @@ class TTIWave(WaveBase):
            self.c33= c0_11*sin(theta)**4 - 2*c0_13*cos(theta)**4 + 2*c0_13*cos(theta)**2 + c0_33*cos(theta)**4 - 4*c0_66*cos(theta)**4 + 4*c0_66*cos(theta)**2
            self.c36= (2*c0_11*cos(theta)**2 - 2*c0_11 + 4*c0_13*sin(theta)**2 - 2*c0_13 + 2*c0_33*cos(theta)**2 + 8*c0_66*sin(theta)**2 - 4*c0_66)*sin(theta)*cos(theta)/2
            self.c66= -c0_11*cos(theta)**4 + c0_11*cos(theta)**2 + 2*c0_13*cos(theta)**4 - 2*c0_13*cos(theta)**2 - c0_33*cos(theta)**4 + c0_33*cos(theta)**2 + c0_66*sin(theta)**4 + 3*c0_66*cos(theta)**4 - 2*c0_66*cos(theta)**2
-           
+
         def  _getAcceleration(self, t, u):
              """
              returns the acceleraton for time `t` and solution `u` at time `t`
@@ -787,70 +818,70 @@ class SonicHTIWave(WaveBase):
         Solving the HTI wave equation (along the x_0 axis) with azimuth (rotation around verticle axis)
         under the assumption of zero shear wave velocities
         The unknowns are the transversal (along x_0) and vertial stress (Q, P)
-        
+
         :note: In case of a two dimensional domain the second spatial dimenion is depth.
         """
-        def __init__(self, domain, v_p, wavelet, source_tag, source_vector = [1.,0.], eps=0., delta=0., azimuth=0.,    
+        def __init__(self, domain, v_p, wavelet, source_tag, source_vector = [1.,0.], eps=0., delta=0., azimuth=0.,
                      dt=None, p0=None, v0=None, absorption_zone=300*U.m, absorption_cut=1e-2, lumping=True):
            """
            initialize the HTI wave solver
-           
+
            :param domain: domain of the problem
-           :type domain: `Doamin`        
-           :param v_p: vertical p-velocity field    
+           :type domain: `Doamin`
+           :param v_p: vertical p-velocity field
            :type v_p: `Scalar`
-           :param v_s: vertical s-velocity field    
-           :type v_s: `Scalar`          
-           :param wavelet: wavelet to describe the time evolution of source term 
-           :type wavelet: `Wavelet`          
+           :param v_s: vertical s-velocity field
+           :type v_s: `Scalar`
+           :param wavelet: wavelet to describe the time evolution of source term
+           :type wavelet: `Wavelet`
            :param source_tag: tag of the source location
            :type source_tag: 'str' or 'int'
            :param source_vector: source orientation vector
            :param eps: first Thompsen parameter
            :param azimuth: azimuth (rotation around verticle axis)
            :param gamma: third Thompsen parameter
-           :param rho: density           
-           :param dt: time step size. If not present a suitable time step size is calculated.           
-           :param p0: initial solution (Q(t=0), P(t=0)). If not present zero is used.           
-           :param v0: initial solution change rate. If not present zero is used.           
-           :param absorption_zone: thickness of absorption zone           
+           :param rho: density
+           :param dt: time step size. If not present a suitable time step size is calculated.
+           :param p0: initial solution (Q(t=0), P(t=0)). If not present zero is used.
+           :param v0: initial solution change rate. If not present zero is used.
+           :param absorption_zone: thickness of absorption zone
            :param absorption_cut: boundary value of absorption decay factor
-           :param lumping: if True mass matrix lumping is being used. This is accelerates the computing but introduces some diffusion. 
+           :param lumping: if True mass matrix lumping is being used. This is accelerates the computing but introduces some diffusion.
            """
            DIM=domain.getDim()
-           f=createAbsorbtionLayerFunction(Function(domain).getX(), absorption_zone, absorption_cut)
+           f=createAbsorptionLayerFunction(v_p.getFunctionSpace().getX(), absorption_zone, absorption_cut)
 
            self.v2_p=v_p**2
            self.v2_t=self.v2_p*sqrt(1+2*delta)
            self.v2_n=self.v2_p*(1+2*eps)
-           
+
            if p0 == None:
               p0=Data(0.,(2,),Solution(domain))
            else:
               p0=interpolate(p0, Solution(domain ))
-              
+
            if v0 == None:
               v0=Data(0.,(2,),Solution(domain))
            else:
               v0=interpolate(v0, Solution(domain ))
-           
+
            if dt == None:
                   dt=min(min(inf(domain.getSize()/sqrt(self.v2_p)), inf(domain.getSize()/sqrt(self.v2_t)), inf(domain.getSize()/sqrt(self.v2_n))) , wavelet.getTimeScale())*0.2
-            
+
            super(SonicHTIWave, self).__init__( dt, u0=p0, v0=v0, t0=0.)
-           
+
            self.__wavelet=wavelet
-           
+
            self.__mypde=LinearPDESystem(domain)
            if lumping: self.__mypde.getSolverOptions().setSolverMethod(SolverOptions.HRZ_LUMPING)
            self.__mypde.setSymmetryOn()
            self.__mypde.setValue(D=kronecker(2), X=self.__mypde.createCoefficient('X'))
            self.__source_tag=source_tag
-           
+
 
            self.__r=Vector(0, DiracDeltaFunctions(self.__mypde.getDomain()))
            self.__r.setTaggedValue(self.__source_tag, source_vector)
- 
+
         def  _getAcceleration(self, t, u):
             """
             returns the acceleraton for time `t` and solution `u` at time `t`
@@ -858,7 +889,7 @@ class SonicHTIWave(WaveBase):
             dQ = grad(u[0])[0]
             dP = grad(u[1])[1:]
             sigma=self.__mypde.getCoefficient('X')
-            
+
             sigma[0,0] = self.v2_n*dQ
             sigma[0,1:] = self.v2_t*dP
             sigma[1,0] = self.v2_t*dQ
diff --git a/downunder/py_src/splitinversioncostfunctions.py b/downunder/py_src/splitinversioncostfunctions.py
index ebf1ef2..948626a 100644
--- a/downunder/py_src/splitinversioncostfunctions.py
+++ b/downunder/py_src/splitinversioncostfunctions.py
@@ -1,4 +1,4 @@
-
+from __future__ import division, print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -29,7 +29,7 @@ from .costfunctions import MeteredCostFunction
 from .mappings import Mapping
 from .forwardmodels import ForwardModel
 from esys.escript.pdetools import ArithmeticTuple
-from esys.escript import Data, inner, addJobPerWorld, FunctionJob
+from esys.escript import Data, inner, FunctionJob, Job
 import numpy as np
 
 
@@ -83,26 +83,69 @@ class SplitInversionCostFunction(MeteredCostFunction):
     # num args, who many of each type
     # splitw is the splitworld jobs are running on
     # worldsinit_fn is run on each world at startup
-    def __init__(self, numLevelSets, numModels, numMappings, sw, worldsinit_fn):
+    def __init__(self, numLevelSets=None, numModels=None, numMappings=None, splitworld=None, worldsinit_fn=None):
         """
         fill this in.
         """
+        import math
+        if numLevelSets==None or numModels==None or numMappings==None or splitworld==None or worldsinit_fn==None:
+            raise ValueError("Please supply all required parameters")
         super(SplitInversionCostFunction, self).__init__()
         if numModels<1 or numModels<1 or numMappings<1:
           raise ValueError("The inversion function requires at least one LevelSet, Mapping and Models.")
         self.numModels=numModels
         self.numMappings=numMappings
         self.numLevelSets=numLevelSets
+        self.splitworld=splitworld
+        
+        splitworld.addVariable("regularization", makeLocalOnly)
+        splitworld.addVariable("mappings", makeLocalOnly)
+        splitworld.addVariable("fwdmodels", makeLocalOnly)
+        splitworld.addVariable("initial_guess", makeLocalOnly)  # Used to load the initial guess
+        splitworld.addVariable("model_args", makeLocalOnly)     # arguments for models stored on that world
+        splitworld.addVariable("props", makeLocalOnly)          # Properties for the current guess
+        splitworld.addVariable("current_point", makeLocalOnly)  # Current approximate solution. Starts out as initial_guess 
+        splitworld.addVariable("mu_model", makeLocalOnly)
+
+        splitworld.addVariable("phi_a", makeScalarReducer, "SUM")
+        splitworld.addVariable("Jx_original", makeScalarReducer,"SET")
+        splitworld.addVariable("Jx", makeScalarReducer, "SUM")
+        splitworld.addVariable("Jx_old", makeScalarReducer,"SET")
+        splitworld.addVariable("g_Jx_0", makeDataReducer, "SUM")
+        splitworld.addVariable("g_Jx_1", makeLocalOnly)        # This component is not merged with values from other worlds
+
+        splitworld.addVariable("old_g_Jx_0", makeDataReducer, "SUM")
+        splitworld.addVariable("old_g_Jx_1", makeLocalOnly)        # This component is not merged with values from other worlds
+        
+        
+        splitworld.addVariable("search_direction", makeDataReducer, "SET")
+
+        splitworld.addVariable("s_and_y", makeLocalOnly)
+        splitworld.addVariable("gphi0", makeLocalOnly)
+        splitworld.addVariable("old_phi_a", makeLocalOnly)
+        splitworld.addVariable("phi0", makeLocalOnly)
+        splitworld.addVariable("base_point", makeLocalOnly)
+        
+        splitworld.addVariable("conv_flag", makeLocalOnly)
+        splitworld.addVariable("dp_result", makeLocalOnly)
+        splitworld.addVariable("break_down", makeLocalOnly)
+        
+        howmany=splitworld.getSubWorldCount()
+        rlen=int(math.ceil(numModels/howmany))
+        rstart=rlen*splitworld.getSubWorldID()
+        extraparams={'rangelen':rlen, 'rangestart':rstart, 'numLevelSets':numLevelSets}        
         # sanity check
-        addJobPerWorld(sw, FunctionJob, worldsinit_fn)
-        sw.runJobs()
-        reqd=["models", "regularization", "mu_model","mappings"]
-        knownvars=sw.getVarList()
-        print(knownvars)
+        splitworld.addJobPerWorld(FunctionJob, worldsinit_fn, **extraparams)
+        splitworld.runJobs()
+        #reqd=["fwdmodels", "regularization", "mappings","mu_model"]
+        reqd=["fwdmodels", "regularization", "mappings", "initial_guess"]     #For our script, mu_model appears not to be used
+        knownvars=splitworld.getVarList()
         for n in reqd:
           if [n,True] not in knownvars:
             raise RuntimeError("Required variable "+n+" was not created by the world init function")
-        self.configured=False
+        if ['mu_model',True] not in knownvars:
+            self.setTradeOffFactorsModels()
+        self.configured=True
 
     # Function to put the (possible list of) forward model(s) into the form expected by the rest of the system
     @staticmethod
@@ -111,7 +154,6 @@ class SplitInversionCostFunction(MeteredCostFunction):
             forward_models = [ forward_models ]
         result=[]
         for i in range(len(forward_models)):
-            print("Doing iteration "+str(i))
             f=forward_models[i]
             if isinstance(f, ForwardModel):
                 idx=[0]
@@ -130,6 +172,46 @@ class SplitInversionCostFunction(MeteredCostFunction):
                 fm=f[0]
             result.append((fm,idx))
         return result      
+
+    # Function to put the (possible list of) forward model(s) into a form expected by the rest of the system
+    @staticmethod
+    def formatMappings(mappings, numLevelSets):
+        if isinstance(mappings, Mapping):
+            mappings = [ mappings ]
+        newmappings = []
+        for i in range(len(mappings)):
+            mm=mappings[i]
+            if isinstance(mm, Mapping):
+                m=mm
+                if numLevelSets>1:
+                    idx=[ p for p in range(numLevelSets)]
+                else:
+                    idx=None
+            elif len(mm) == 1:
+                m=mm[0]
+                if numLevelSets>1:
+                    idx=[ p for p in range(numLevelSets)]
+                else:
+                    idx=None
+            else:
+                m=mm[0]
+                if isinstance(mm[1], int):
+                    idx=[mm[1]]
+                else:
+                    idx=list(mm[1])
+                if numLevelSets>1:
+                    for k in idx:
+                        if  k < 0  or k > numLevelSets-1:
+                            raise ValueError("level set index %s is out of range."%(k,))
+
+                else:
+                    if idx[0] != 0:
+                        raise ValueError("Level set index %s is out of range."%(idx[0],))
+                    else:
+                        idx=None
+            newmappings.append((m,idx))
+        return newmappings
+    
       
     def getDomain(self):
         """
@@ -172,6 +254,18 @@ class SplitInversionCostFunction(MeteredCostFunction):
         else:
           raise RuntimeError("This inversion function has not been configured yet")
 
+    #Written to be executed inside a FunctionJob
+    @staticmethod    
+    def subworld_setMu_model(self, **args):
+          if not isinstance(self, Job):
+             raise RuntimeError("This command should be run inside a Job")
+          extmu=args['mu']
+          chunksize=max(len(extmu)//self.swcount,1)         #In case we have more worlds than models
+          minindex=self.swid*chunksize
+          maxindex=(self.swid+1)*chunksize              # yes this could go off the end but I will slice
+          mymu=extmu[minindex:maxindex]
+          self.exportValue("mu_model", mymu)
+          
     def setTradeOffFactorsModels(self, mu=None):
         """
         sets the trade-off factors for the forward model components.
@@ -195,17 +289,10 @@ class SplitInversionCostFunction(MeteredCostFunction):
                     self.mu_model= [mu, ]
                 else:
                     raise ValueError("Trade-off factor must be positive.")
-        #Now we need to get these values into the subworlds
-        #Getting the mu value in via a closure is safe because it WILL NOT CONTAIN COMPLEX OBJECTS
-        extmu=self.mu_model
-        def setMu(self, **args):
-          chunksize=max(self.worldsize()//len(extmu),1)         #In case we have more worlds than models
-          minindex=self.subworldid()*chunksize
-          maxindex=(self.subworldid()+1)*chunksize              # yes this could go off the end but I will slice
-          mymu=extmu[minindex:maxindex]
-          self.exportValue("mu_model", mymu)
-        addJobPerWorld(sw, setMu)
-        sw.runJobs()
+        self.splitworld.addJobPerWorld( FunctionJob, self.subworld_setMu_model, mu=self.mu_model)
+        self.splitworld.runJobs()
+        
+
         
     def getTradeOffFactorsModels(self):
         """
@@ -239,6 +326,9 @@ class SplitInversionCostFunction(MeteredCostFunction):
         :param mu: list of trade-off factors.
         :type mu: ``list`` of ``float``
         """
+        if not self.configured:
+          raise ValueError("This inversion function has not been configured yet")
+        raise ValueError("setTradeOffFactors not supported yet.")
         if mu is None:
             mu=np.ones((self.__num_tradeoff_factors,))
         self.setTradeOffFactorsModels(mu[:self.numModels])
@@ -256,6 +346,60 @@ class SplitInversionCostFunction(MeteredCostFunction):
         mu2=self.regularization.getTradeOffFactors()
         return [ m for m in mu1] + [ m for m in mu2]
 
+    @staticmethod  
+    def createLevelSetFunctionHelper(self, regularization, mappings, *props):
+        """
+        Returns an object (init-ed) with 0s.
+        Components can be overwritten by physical
+        properties `props`. If present entries must correspond to the
+        `mappings` arguments in the constructor. Use ``None`` for properties
+        for which no value is given.
+        """
+        if not isinstance(self, Job):
+            raise RuntimeError("This function is designed to be run inside a Job.")
+        m=regularization.getPDE().createSolution()
+        if len(props) > 0:
+            numMappings=len(mappings)
+            for i in range(numMappings):
+                if props[i]:
+                    mp, idx=self.mappings[i]
+                    m2=mp.getInverse(props[i])
+                    if idx:
+                        if len(idx) == 1:
+                            m[idx[0]]=m2
+                        else:
+                            for k in range(idx): m[idx[k]]=m2[k]
+                    else:
+                        m=m2
+        return m    
+
+    @staticmethod  
+    def calculatePropertiesHelper(self, m, mappings):
+        """
+        returns a list of the physical properties from a given level set
+        function *m* using the mappings of the cost function.
+
+        :param m: level set function
+        :type m: `Data`
+        :rtype: ``list`` of `Data`        
+        """
+        if not isinstance(self, Job):
+            raise RuntimeError("This function is designed to be run inside a Job.")
+        props=[]
+        for i in range(len(mappings)):
+            mp, idx=mappings[i]
+            if idx:
+                if len(idx)==1:
+                    p=mp.getValue(m[idx[0]])
+                else:
+                    m2=Data(0.,(len(idx),),m.getFunctionSpace())
+                    for k in range(len(idx)): m2[k]=m[idx[k]]
+                    p=mp.getValue(m2)
+            else:
+                p=mp.getValue(m)
+            props.append(p)            
+        return props  
+        
     def createLevelSetFunction(self, *props):
         """
         returns an instance of an object used to represent a level set function
@@ -334,31 +478,65 @@ class SplitInversionCostFunction(MeteredCostFunction):
         raise RuntimeError("Still need to work this one out")        
         return self.regularization.getDualProduct(x, r)
 
-    
+   
+    @staticmethod
+    def update_point_helper(self, newpoint):
+        """
+        Call within a subworld to set 'current_point' to newpoint
+        and update all the cached args info
+        """
+        if not isinstance(self, Job):
+          raise RuntimeError("This function should only be called from within a Job")
+        mods=self.importValue("fwdmodels")
+        reg=self.importValue("regularization")
+        mappings=self.importValue("mappings")
+        props=[]
+        props=SplitInversionCostFunction.calculatePropertiesHelper(self, newpoint, mappings)
+        self.exportValue("props", props)              
+        reg.setPoint(newpoint)
+              #Going to try this - each world stores the args for its
+              #models rather than going the setPoint route.
+        local_args=[]
+        for m,idx in mods:
+            pp=tuple( [props[k] for k in idx] ) # build up collection of properties used by this model
+            local_args.append(m.getArguments(*pp))
+        self.exportValue("current_point", newpoint)
+        self.exportValue("model_args", local_args)
+
     def setPoint(self):
-      self.setPoint()
+      self._setPoint()
     
     def _setPoint(self):
       """
       This should take in a value to set the point to, but that can wait
+      ... It probably the shouldn't actually.   We want all values to 
+      be constructed inside the subworlds, so being able to (easily - we 
+      can't stop closures) pass them 
+      in from outside would defeat the purpose.
+      
+      To modify the point, we probably want a separate move_point()
+      function.
+      
+      There is also the question of how this is expected to get its info.
+      Should it be passed in as a parameter or should it be read from
+      the environment?
+      We can expect the actuall initial guess to come from the world init
+      function, but what about later calls?  (or are we hoping they won't
+      actually happen that often and that relative changes will be done instead?)
+      
       """
       if not self.configured:
         raise ValueError("This inversion function has not been configured yet")
 
-      def load_initial_guess(self, **args):
-          initguess=0
-          mods=self.importValue("models")
+      def load_guess_to_subworlds(self, **args):
           reg=self.importValue("regularization")
-          reg.setPoint(initguess)
-          for m,idx in mods:
-            pp=tuple( [props[k] for k in idx] ) # build up collection of properties used by this model
-            m.setPoint(*pp)
-          # We still need to deal with the props feild and where to get it from
-          self.exportValue("props", props)
+          mappings=self.importValue("mappings")
+          # we are not passing in property values here because we don't have any yet
+          initguess=SplitInversionCostFunction.createLevelSetFunctionHelper(self, reg, mappings)
+          SplitInversionCostFunction.update_point_helper(self, initguess)
             
-      for i in range(0, self.sw.swcount):      
-          self.sw.addJob(FunctionJob, load_initial_guess, imports=["models", "regularization"])
-      self.sw.runJobs()
+      self.splitworld.addJobPerWorld( FunctionJob, load_guess_to_subworlds, imports=["fwdmodels", "regularization", "mappings"])
+      self.splitworld.runJobs()
       
     def _getArguments(self, m):
         """
@@ -389,44 +567,57 @@ class SplitInversionCostFunction(MeteredCostFunction):
 
         return props, args_f, args_reg
 
-    def calculateValue(self, vnames):
-        self.calculate(vnames)
+    def calculateValue(self, vname):
+        self._calculateValue(vname)
         
-    def _calculateValue(self, vnames):
+    def _calculateValue(self, vname):
         
        if not self.configured:
           raise ValueError("This inversion function has not been configured yet")
        #The props is already in each world as a variable
-       #Each model already has its point set
-       #regularization already has its point set
+       #Each world has the arguments for the point for all of its models
+       # as a variable.
+       #Regularization already has its point set
         
-       def calculateValueWorker(self, vnames, **args):
+       def calculateValueWorker(self, **args):
           props=self.importValue("props")
-          mods=self.importValue("models")
+          mods=self.importValue("fwdmodels")
           reg=self.importValue("regularization")
           mu_model=self.importValue("mu_model")
-          
+          local_args=self.importValue("model_args")
+          current_point=self.importValue("current_point")
+          try:
+             vnames=args['vname']
+          except KeyError as e:
+             raise RuntimeError("Function requires vname as kwarg")
           J=None
+          if self.swid==0:    # we only want to add the regularization term once
+            J=reg.getValueAtPoint()    # We actually want to get a value here but
+                                        # I want to distiguish it from the other getValue call          
+                                               
           for i in range(len(mods)):    # note: iterating over local models not ones on other worlds
             m,idx=mods[i]
-            z=m.getDefectAtPoint()
-            z*=self.mu_model[i];   
+            args=local_args[i]
+            z=m.getDefect(current_point, *args)
+            z*=mu_model[i];   
             if J is None:          
               J=z
             else:
-              J+=z
-            
-          if self.worldid==0:    # we only want to add the regularization term once
-            J+=reg.getValueAtPoint()    # We actually want to get a value here but
-                                        # I want to distiguish it from the other getValue call
-          if isinstance(vnames, str):
-            self.exportValue(J, vnames)
+              J+=z  
+          print("Final J =", str(J))
+
+          if isinstance(vname, str):
+            self.exportValue(vname, J)
           else:
-            for n in vnames:
-              self.exportValue(J, n)
-       for i in range(0, self.sw.swcount):      
-          self.sw.addJob(FunctionJob, calculateValueWorker, imports=["models", "regularization", "props"])
-       self.sw.runJobs()              
+            raise ValueError("vname must be a string")
+       # End calculateValueWorker
+
+       self.splitworld.addJobPerWorld(FunctionJob, calculateValueWorker, imports=["fwdmodels", "regularization", "props", 
+            "model_args", "mu_model"], vname=vname)
+       self.splitworld.runJobs()   
+       # The result will now be stored in the named variables
+       # The caller will need to execute splitworld.getDoubleVariable to extract them
+       
 
     def _getValue(self, m, *args):
         """
@@ -500,28 +691,59 @@ class SplitInversionCostFunction(MeteredCostFunction):
 
         return result
 
-    def _calculateGradient(self):
+    @staticmethod
+    def getModelArgs(self, fwdmodels):
+        """
+        Attempts to import the arguments for forward models, if they are not available, 
+        Computes and exports them
+        """
+        if not isinstance(self, Job):
+            raise RuntimeError("This function should only be called inside a Job")
+        args=self.importValue("model_args")
+        p=self.importValue("current_point")
+        if args is not None:
+          return args
+        args=[]
+        for mod in fwdmodels:
+            args.append(mod.getArguments(p))
+        self.exportValue("model_arguments",args)
+        return args
+        
+    def calculateGradient(self, vnames1, vnames2):
+        """
+        The gradient operation produces two components (designated (Y^,X) in the non-split version).
+        vnames1 gives the variable name(s) where the first component should be stored.
+        vnames2 gives the variable name(s) where the second component should be stored.
+        """
+        return self._calculateGradient(vnames1, vnames2)
+        
+    def _calculateGradient(self, vnames1, vnames2):
        if not self.configured:
           raise ValueError("This inversion function has not been configured yet")
 
        numLevelSets=self.numLevelSets   # pass in via closure
-       def calculateGradientWorker(self, vnames1, vnames2, **args):
+       def calculateGradientWorker(self, **args):
           """
           vnames1 gives the names to store the first component of the gradient in
           vnames2 gives the names to store the second component of the gradient in
           """
+          vnames1=args['vnames1']
+          vnames2=args['vnames2']
           props=self.importValue("props")
-          mods=self.importValue("models")
+          mods=self.importValue("fwdmodels")
           reg=self.importValue("regularization")
           mu_model=self.importValue("mu_model")
           mappings=self.importValue("mappings")
+          m=self.importValue("current_point")
+          
+          model_args=SplitInversionCostFunction.getModelArgs(self, mods)
           
           g_J = reg.getGradientAtPoint()
           p_diffs=[]
           # Find the derivative for each mapping
           # If a mapping has a list of components (idx), then make a new Data object with only those
           # components, pass it to the mapping and get the derivative.
-          for i in range(len(numMappings)):
+          for i in range(len(mappings)):
               mm, idx=mappings[i]
               if idx and numLevelSets > 1:
                   if len(idx)>1:
@@ -536,20 +758,20 @@ class SplitInversionCostFunction(MeteredCostFunction):
           #Since we are going to be merging Y with other worlds, we need to make sure the the regularization
           #component is only added once.  However most of the ops below are in terms of += so we need to
           #create a zero object to use as a starting point
-          if self.subworldid==0:
+          if self.swid==0:
              Y=g_J[0]    # Because g_J==(Y,X)  Y_k=dKer/dm_k
           else:
              Y=Data(0, g_J[0].getShape(), g_J[0].getForwardModel())
-          for i in range(self.numModels):
-              mu=self.mu_model[i]
+          for i in range(len(mods)):
+              mu=mu_model[i]
               f, idx_f=mods[i]
-              args=tuple( [ props[k] for k in idx_f]  + list( args_f[i] ) )
-              Ys = f.getGradientAtPoint() # this d Jf/d props
+              args=tuple( [ props[k] for k in idx_f]  + list( model_args[i] ) )
+              Ys = f.getGradient(*args) # this d Jf/d props
               # in this case f depends on one parameter props only but this can
               # still depend on several level set components
               if Ys.getRank() == 0:
                   # run through all level sets k prop j is depending on:
-                  idx_m=self.mappings[idx_f[0]][1]
+                  idx_m=mappings[idx_f[0]][1]
                   # tmp[k] = dJ_f/d_prop * d prop/d m[idx_m[k]]
                   tmp=Ys * p_diffs[idx_f[0]] * mu
                   if idx_m:
@@ -566,7 +788,7 @@ class SplitInversionCostFunction(MeteredCostFunction):
                   # run through all props j forward model f is depending on:
                   for j in range(len(idx_f)):
                       # run through all level sets k prop j is depending on:
-                      idx_m=self.mappings[j][1]
+                      idx_m=mappings[j][1]
                       if p_diffs[idx_f[j]].getRank() == 0 :
                           if idx_m: # this case is not needed (really?)
                               raise RuntimeError("something wrong A")
@@ -598,18 +820,19 @@ class SplitInversionCostFunction(MeteredCostFunction):
                               Y+=inner(Yss, p_diffs[idx_f[j]]) * mu
                           s+=l    
           if isinstance(vnames1, str):
-            self.exportValue(Y, vnames1)
+            self.exportValue(vnames1, Y)
           else:
             for n in vnames1:
-              self.exportValue(Y, n)
+              self.exportValue(n, Y)
           if isinstance(vnames2, str):          #The second component should be strictly local 
-            self.exportValue(g_J[1], vnames2)
+            self.exportValue(vnames2, g_J[1])
           else:
             for n in vnames2:
-              self.exportValue(g_J[1], n)
-              
-       addJobPerWorld(sw, FunctionJob, calculateGradientWorker, vnames, imports=["models", "regularization", "props", "mu_models"])
-       self.sw.runJobs()                 
+              self.exportValue(n, g_J[1])              
+       # End CalculateGradientWorker
+
+       self.splitworld.addJobPerWorld( FunctionJob, calculateGradientWorker, vnames1=vnames1, vnames2=vnames2, imports=["models", "regularization", "props", "mu_models", "current_point"])
+       self.splitworld.runJobs()                 
         
     def _getGradient(self, m, *args):
         """
@@ -745,8 +968,9 @@ class SplitInversionCostFunction(MeteredCostFunction):
         notifies the class that the Hessian operator needs to be updated.
         """
         if not self.configured:
-          raise ValueError("This inversion function has not been configured yet")         
-        self.regularization.updateHessian()
+          raise ValueError("This inversion function has not been configured yet")
+        self.splitworld.addJobPerWorld( FunctionJob, updateHessianWorker, imports=["regularization"]) 
+        self.splitworld.runJobs()
 
     def _getNorm(self, m):
         """
@@ -761,3 +985,7 @@ class SplitInversionCostFunction(MeteredCostFunction):
         raise RuntimeError("Need to have this in a subworld --- one or all?")
         return self.regularization.getNorm(m)
 
+def updateHessianWorker(self, **kwargs):
+    reg=self.importValue("regularization")
+    reg.updateHessian()
+    #self.exportValue(reg, "regularization")
diff --git a/downunder/py_src/splitminimizers.py b/downunder/py_src/splitminimizers.py
index 9757422..c651f3e 100644
--- a/downunder/py_src/splitminimizers.py
+++ b/downunder/py_src/splitminimizers.py
@@ -13,9 +13,13 @@
 #
 ##############################################################################
 
-from .minimizers import AbstractMinimizer
-from esys.escriptcore.splitworld import Job
+from __future__ import print_function, division
 
+from .minimizers import AbstractMinimizer
+from esys.escriptcore.splitworld import Job, FunctionJob
+from .splitinversioncostfunctions import SplitInversionCostFunction
+from esys.escript.pdetools import ArithmeticTuple
+import numpy as np
 
 class SplitMinimizerLBFGS(AbstractMinimizer):
     """
@@ -49,12 +53,202 @@ class SplitMinimizerLBFGS(AbstractMinimizer):
             else:
                 raise KeyError("Invalid option '%s'"%o)
 
+        # This function sets current_point=base_point+alpha*search_direction [m=m+p*a]
+    @staticmethod
+    def move_point_from_base(self, **kwargs):
+            m=self.importValue('base_point')
+            p=self.importValue('search_direction')
+            a=kwargs['alpha']
+            newpoint=m+p*a
+            SplitInversionCostFunction.update_point_helper(self, newpoint)
+
     def run(self):
         """
         This version relies on the costfunction already having an initial guess loaded.
         It also does not return the result, meaning a job needs to be submitted to
         get the result out.
         """
+        
+        # First we'll define our own versions of the helper functions
+
+        # Updates "g_Jx_new_0" and "g_Jx_new_1" to the gradient at the current point
+        # then returns f.dualProduct of search_direction and g_Jx_new
+        # Not a splitworld function
+        def grad_phi(f, **kwargs):
+                f.calculateGradient('g_Jx_0', 'g_Jx_1')
+                # need to call dualProduct here
+                def dual_p_g_Jx_new(self, **kwargs):
+                    p=self.importValue("search_direction")
+                    g_Jx_0=self.importValue("g_Jx_0")
+                    g_Jx_1=self.importValue("g_Jx_1")
+                    reg=self.importValue("regularization")
+                        #again, this assumes that only the regularization term is relevant
+                    res=reg.getDualProduct(p, (g_Jx_0, g_Jx_1))
+                    self.exportValue("dp_result",res)
+                # Now we will only run this on one world and rely on getDouble to ship it
+                f.splitworld.addJob( FunctionJob, dual_p_g_Jx_new)
+                f.splitworld.runJobs()
+                res=f.splitworld.getDoubleVariable("dp_result")
+                return res
+        #End of grad_phi
+
+        def _zoom(f, alpha_lo, alpha_hi, phi_lo, phi_hi, c1, c2,
+                phi0, gphi0, IMAX=25):
+            """
+            Helper function for `line_search` below which tries to tighten the range
+            alpha_lo...alpha_hi. See Chapter 3 of 'Numerical Optimization' by
+            J. Nocedal for an explanation.
+            """
+            i=0
+            while True:
+                alpha=alpha_lo+.5*(alpha_hi-alpha_lo) # should use interpolation...
+                f.splitworld.addJobPerWorld( FunctionJob, SplitMinimizerLBFGS.move_point_from_base, alpha=alpha, imports=['base_point', 'search_direction'])
+                f.splitworld.runJobs()
+                f.calculateValue('phi_a')
+                phi_a=f.splitworld.getDoubleVariable('phi_a')
+                #zoomlogger.debug("iteration %d, alpha=%e, phi(alpha)=%e"%(i,alpha,phi_a))
+                if phi_a > phi0+c1*alpha*gphi0 or phi_a >= phi_lo:
+                    alpha_hi=alpha
+                else:
+                    gphi_a=grad_phi(f)
+                    #zoomlogger.debug("\tgrad(phi(alpha))=%e"%(gphi_a))
+                    if np.abs(gphi_a) <= -c2*gphi0:
+                        break
+                    if gphi_a*(alpha_hi-alpha_lo) >= 0:
+                        alpha_hi = alpha_lo
+                    alpha_lo=alpha
+                    phi_lo=phi_a
+                i+=1
+                if i>IMAX:
+                    gphi_a=None
+                    break
+            return alpha, phi_a, gphi_a
+
+        def line_search(f, alpha=1.0, alpha_truncationax=50.0,
+                        c1=1e-4, c2=0.9, IMAX=15):
+            """
+            Line search method that satisfies the strong Wolfe conditions.
+            See Chapter 3 of 'Numerical Optimization' by J. Nocedal for an explanation.
+
+            This version is converted from the line_search from minimizers.py
+            however, it takes fewer parameters because some of the values needed
+            by the original version will be available as subworld variables rather
+            than as parameters.
+            
+            
+            :param f: callable objective function f(x)
+            :param p: search direction
+            :param alpha: initial step length. If g_Jx is properly scaled alpha=1 is a
+                        reasonable starting value.
+            :param alpha_truncationax: algorithm terminates if alpha reaches this value
+            :param c1: value for Armijo condition (see reference)
+            :param c2: value for curvature condition (see reference)
+            :param IMAX: maximum number of iterations to perform
+                        
+            Removed parameters (now in subworld variables instead):
+            x    - The start value for line search: in the variable "current_point".
+            p    - search direction: in the variable "search_direction"
+            g_Jx - value for the gradient of f at x: in the variables "g_Jx_0" and "g_Jx_1"
+            Jx   - value of f(x): in the variable "Jx"
+            """
+            
+            # This will handle subworld side of work
+            def line_search_init_worker(self, **kwargs):
+                x=self.importValue("current_point")
+                p=self.importValue("search_direction")
+                g_Jx=ArithmeticTuple(self.importValue("g_Jx_0"), self.importValue("g_Jx_1"))
+                Jx=self.importValue("Jx")
+                regular=self.importValue("regularization")
+                phi0=Jx
+               
+                print("Type of phi0=", type(phi0),phi0) 
+                # In the original, this part calls getDualProduct on f
+                # However, since that only ends up referring to the 
+                # regularisation term, I've called that directly
+                # If your dual product operation requires access to
+                # the other models, then  this step needs
+                # a rethink since not all models are local
+                gphi0=regular.getDualProduct(p, g_Jx)
+            
+                #Still need to decide what this worker will do
+                old_phi_a=phi0
+                phi_a=phi0
+                self.exportValue("old_phi_a", old_phi_a)
+                self.exportValue("phi_a", phi_a)
+                self.exportValue("gphi0", gphi0)
+                self.exportValue("phi0", phi0)
+                self.exportValue("base_point", x)       # To ensure we can revert if needed
+            #End of line_search_init_worker
+            
+            old_alpha=0
+            i=1
+            f.splitworld.addJobPerWorld( FunctionJob, line_search_init_worker, imports=['search_direction', 'g_Jx_0', 'g_Jx_1', 'Jx', 'regularization'])       
+            f.splitworld.runJobs()
+
+        
+            # Updates "g_Jx_new_0" and "g_Jx_new_1" to the gradient at the current point
+            # then returns f.dualProduct of search_direction and g_Jx_new
+            # Not a splitworld function
+            def grad_phi(f, **kwargs):
+                f.calculateGradient('g_Jx_0', 'g_Jx_1')
+                # need to call dualProduct here
+                def dual_p_g_Jx_new(self, **kwargs):
+                    p=self.importValue("search_direction")
+                    g_Jx_0=self.importValue("g_Jx_0")
+                    g_Jx_1=self.importValue("g_Jx_1")
+                    reg=self.importValue("regularization")
+                        #again, this assumes that only the regularization term is relevant
+                    res=reg.getDualProduct(p, (g_Jx_0, g_Jx_1))
+                    self.exportValue("dp_result",res)
+                # Now we will only run this on one world and rely on getDouble to ship it
+                f.splitworld.addJob( FunctionJob, dual_p_g_Jx_new)
+                f.splitworld.runJobs()
+                res=f.splitworld.getDoubleValue("dp_result")
+                return res
+            #End of grad_phi
+
+            while i<IMAX and alpha>0. and alpha<alpha_truncationax:
+                alpha_at_loop_start=alpha
+                f.splitworld.addJobPerWorld( FunctionJob, SplitMinimizerLBFGS.move_point_from_base, alpha=alpha, imports=['current_point', 'search_direction'])
+                f.splitworld.runJobs()
+                f.calculateValue('phi_a')
+                #lslogger.debug("iteration %d, alpha=%e, phi(alpha)=%e"%(i,alpha,phi_a))
+                phi_a=f.splitworld.getDoubleVariable('phi_a')
+                phi0=f.splitworld.getDoubleVariable('phi0')
+                gphi0=f.splitworld.getDoubleVariable('gphi0')
+                old_phi_a=f.splitworld.getDoubleVariable('old_phi_a')
+                if (phi_a > phi0+c1*alpha*gphi0) or ((phi_a>=old_phi_a) and (i>1)):
+                    alpha, phi_a, gphi_a = _zoom(f, old_alpha, alpha, old_phi_a, phi_a, c1, c2, phi0, gphi0)
+                    break
+
+                   # Need to check if alpha has changed. If it has, we need to move the point again
+                if alpha_at_loop_start!=alpha:
+                   f.splitworld.addJobPerWorld( FunctionJob, SplitMinimizerLBFGS.move_point_from_base, alpha=alpha, imports=['current_point', 'search_direction'])
+                   f.splitworld.runJobs()
+
+                gphi_a=grad_phi(f)
+                if np.abs(gphi_a) <= -c2*gphi0:
+                    break
+                if gphi_a >= 0:
+                    alpha, phi_a, gphi_a = _zoom(phi, gradphi, phiargs, alpha, old_alpha, phi_a, old_phi_a, c1, c2, phi0, gphi0)
+                    break
+
+                old_alpha=alpha
+                # the factor is arbitrary as long as there is sufficient increase
+                alpha=2.*alpha
+                old_phi_a=phi_a
+                i+=1
+            #return alpha, phi_a, g_Jx_new[0]
+            return alpha, phi_a
+        #End of line_search
+        
+        
+        
+        
+        
+        
+        
+        splitworld=self.getCostFunction().splitworld 
         if self.getCostFunction().provides_inverse_Hessian_approximation:
             self.getCostFunction().updateHessian()
             invH_scale = None
@@ -68,39 +262,40 @@ class SplitMinimizerLBFGS(AbstractMinimizer):
         converged = False
         
 
-	    
-	  
-        self.getCostFunction().setPoint()	# Set point to initial guess value (takes the place of a getArgs call)
+            
+          
+        self.getCostFunction().setPoint()       # Set point to initial guess value (takes the place of a getArgs call)
         #args=self.getCostFunction().getArguments(x)
         
-        self.getCostFunction().calculateValue(["Jx","Jx_0"])	#evaluate the function and store the result in the named variables
-        self.getCostFunction().calculateGradient("g_Jx")        #compute the gradient and store the result
-        
-        #g_Jx=self.getCostFunction().getGradient(x, *args)
-        #Jx=self.getCostFunction()(x, *args) # equivalent to getValue() for Downunder CostFunctions
-        
-        
+        self.getCostFunction().calculateValue("Jx")    #evaluate the function and store the result in the named variable
+                      # note that call sets Jx=Jx_original
+        splitworld.copyVariable("Jx", "Jx_original")
+                      
+        self.getCostFunction().calculateGradient("g_Jx_0","g_Jx_1")        #compute the gradient and store the result
         
-        Jx_0=Jx
-
         while not converged and not non_curable_break_down and n_iter < self._imax:
           k=0
           break_down = False
-          s_and_y=[]
+          reset_s_and_y = True
           # initial step length for line search
           alpha=1.0
-          self._doCallback(n_iter, x, Jx, g_Jx)
+          #self._doCallback(n_iter, x, Jx, g_Jx)
 
           while not converged and not break_down and k < self._restart and n_iter < self._imax:
                 #self.logger.info("\033[1;31miteration %d\033[1;30m"%n_iter)
                 self.logger.info("********** iteration %3d **********"%n_iter)
-                self.logger.info("\tJ(x) = %s"%Jx)
+                #self.logger.info("\tJ(x) = %s"%Jx)
                 #self.logger.debug("\tgrad f(x) = %s"%g_Jx)
                 if invH_scale:
                     self.logger.debug("\tH = %s"%invH_scale)
 
+                splitworld.copyVariable("g_Jx_0", "old_g_Jx_0")
+                splitworld.copyVariable("g_Jx_1", "old_g_Jx_1")
+                splitworld.copyVariable("Jx", "Jx_old")
+
                 # determine search direction
-                p = -self._twoLoop(invH_scale, g_Jx, s_and_y, x, *args)
+                self._twoLoop(self.getCostFunction().splitworld, reset_s_and_y)
+                reset_s_and_y = False
 
                 # determine new step length using the last one as initial value
                 # however, avoid using too small steps for too long.
@@ -114,64 +309,107 @@ class SplitMinimizerLBFGS(AbstractMinimizer):
                     # reset alpha for the case that the cost function does not
                     # provide an approximation of inverse H
                     alpha=1.0
-                alpha, Jx_new, g_Jx_new = line_search(self.getCostFunction(), x, p, g_Jx, Jx, alpha)
+                alpha, phi_a = line_search(self.getCostFunction(), alpha)
                 # this function returns a scaling alpha for the search
                 # direction as well as the cost function evaluation and
                 # gradient for the new solution approximation x_new=x+alpha*p
+                print("alpha=",alpha)
                 self.logger.debug("\tSearch direction scaling alpha=%e"%alpha)
 
                 # execute the step
-                delta_x = alpha*p
-                x_new = x + delta_x
+                # This update operation has already been done in the line_search
+                #delta_x = alpha*p
+                #x_new = x + delta_x
 
+                Jx=splitworld.getDoubleVariable("Jx")
                 converged = True
                 if self._J_tol:
-                    flag=abs(Jx_new-Jx) <= self._J_tol * abs(Jx_new-Jx_0)
+                    Jx_old=splitworld.getDoubleVariable("Jx_old")
+                    Jx_original=splitworld.getDoubleVariable("Jx_original")
+                    flag=abs(Jx-Jx_old) <= self._J_tol * abs(Jx-Jx_original)
+                    #flag=abs(Jx_new-Jx) <= self._J_tol * abs(Jx_new-Jx_0)
                     if self.logger.isEnabledFor(logging.DEBUG):
                         if flag:
-                            self.logger.debug("Cost function has converged: dJ, J*J_tol = %e, %e"%(Jx-Jx_new,abs(Jx_new-Jx_0)*self._J_tol))
+                            self.logger.debug("Cost function has converged: dJ, J*J_tol = %e, %e"%(Jx_old-Jx,abs(Jx-Jx_original)*self._J_tol))
                         else:
-                            self.logger.debug("Cost function checked: dJ, J*J_tol = %e, %e"%(Jx-Jx_new,abs(Jx_new)*self._J_tol))
+                            self.logger.debug("Cost function checked: dJ, J*J_tol = %e, %e"%(Jx_old-Jx,abs(Jx)*self._J_tol))
 
                     converged = converged and flag
                 if self._m_tol:
-                    norm_x = self.getCostFunction().getNorm(x_new)
-                    norm_dx = self.getCostFunction().getNorm(delta_x)
-                    flag = norm_dx <= self._m_tol * norm_x
-                    if self.logger.isEnabledFor(logging.DEBUG):
-                        if flag:
-                            self.logger.debug("Solution has converged: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
-                        else:
-                            self.logger.debug("Solution checked: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
+                    def converged_check(self, **kwargs):
+                        alpha=kwargs["alpha"]
+                        m_tol=kwargs["m_tol"]
+                        reg=self.importValue("regularization")
+                        p=self.importValue("search_direction")
+                        delta_x=alpha*p
+                        x=self.importValue("current_point")
+                        norm_x = reg.getNorm(x)
+                        norm_dx = reg.getNorm(delta_x)
+                        flag = norm_dx <= m_tol * norm_x
+                        #if self.logger.isEnabledFor(logging.DEBUG):
+                            #if flag:
+                                #self.logger.debug("Solution has converged: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
+                            #else:
+                                #self.logger.debug("Solution checked: dx, x*m_tol = %e, %e"%(norm_dx,norm_x*self._m_tol))
+                        self.exportValue('conv_flag', flag)
+                    # End of converged_check 
+                    addJobPerWorld(self.getCostFunction().splitworld, FunctionJob, converged_check, alpha=alpha, m_tol=self._m_tol, imports=["regularization", "search_direction", "current_point"])
+                    self.getCostFunction().splitworld.runJobs()
+                    flag=self.getCostFunction().splitworld.getDoubleVariable("conv_flag")>0.001
                     converged = converged and flag
 
-                x=x_new
+                #Already done in the line_search call
+                #x=x_new
                 if converged:
-                    self.logger.info("\tJ(x) = %s"%Jx_new)
+                    self.logger.info("\tJ(x) = %s"%Jx)
                     break
 
                 # unfortunately there is more work to do!
-                if g_Jx_new is None:
-                    args=self.getCostFunction().getArguments(x_new)
-                    g_Jx_new=self.getCostFunction().getGradient(x_new, args)
-                delta_g=g_Jx_new-g_Jx
-
-                rho=self.getCostFunction().getDualProduct(delta_x, delta_g)
-                if abs(rho)>0:
-                    s_and_y.append((delta_x,delta_g, rho ))
-                else:
-                    break_down=True
+                def run_worker(self, **kwargs):
+                    break_down=False
+                    need_trunc=kwargs["need_trunc"]
+                    # Need to do some imports here
+                    alpha=kwargs["alpha"]
+                    g_Jx=ArithmeticTuple(self.importValue("g_Jx_0"), self.importValue("g_Jx_1"))
+                    old_g_Jx=ArithmeticTuple(self.importValue("old_g_Jx_0"), self.importValue("old_g_Jx_1"))
+                    p=self.importValue("search_direction")
+                    reg=self.importValue("regularization")
+                    s_and_y=self.importValue("s_and_y")
 
+                    ##The original code had this check
+                    #if g_Jx_new is None:
+                        #args=self.getCostFunction().getArguments(x_new)
+                        #g_Jx_new=self.getCostFunction().getGradient(x_new, args)
+                    #delta_g=g_Jx_new-g_Jx
+                    delta_g=g_Jx-old_g_Jx
+                    delta_x=alpha*p;
+                    rho=reg.getDualProduct(delta_x, delta_g)
+                    if abs(rho)>0:
+                        s_and_y.append((delta_x,delta_g, rho ))
+                    else:
+                        break_down=True
+                    if need_trunc:
+                        s_and_y.pop(0)
+                    self.exportValue("break_down", break_down)
+                    self.exportValue("s_and_y",s_and_y)
+                # End run_worker
+                # Only one world has s_and_y - so its important that only single jobs be run that manipulate
+                # s_and_y
+                addJob(self.getCostFunction().splitworld, FunctionJob, run_worker, alpha=alpha, need_trunc=(k>=self._truncation))
+                self.getCostFunction().splitworld.runJobs()
+
+                break_down=(splitworld.getDoubleVariable("break_down")>0.001)
                 self.getCostFunction().updateHessian()
-                g_Jx=g_Jx_new
-                Jx=Jx_new
+                #g_Jx=g_Jx_new
+                #Jx=Jx_new
 
                 k+=1
                 n_iter+=1
-                self._doCallback(n_iter, x, Jx, g_Jx)
+                #self._doCallback(n_iter, x, Jx, g_Jx)
 
                 # delete oldest vector pair
-                if k>self._truncation: s_and_y.pop(0)
+                # Already done in the run_worker
+                #if k>self._truncation: s_and_y.pop(0)
 
                 if not self.getCostFunction().provides_inverse_Hessian_approximation and not break_down:
                     # set the new scaling factor (approximation of inverse Hessian)
@@ -193,7 +431,8 @@ class SplitMinimizerLBFGS(AbstractMinimizer):
               self.logger.debug("Iteration is restarted after %d steps."%n_iter)
 
         # case handling for inner iteration:
-        self._result=x
+        #self._result=x
+        self._result=None
         if n_iter >= self._imax:
             self.logger.warn(">>>>>>>>>> Maximum number of iterations reached! <<<<<<<<<<")
             raise MinimizerMaxIterReached("Gave up after %d steps."%n_iter)
@@ -205,27 +444,60 @@ class SplitMinimizerLBFGS(AbstractMinimizer):
         #This version does nor return the result
         #You need to set up a job to extract the result
 
-    def _twoLoop(self, invH_scale, g_Jx, s_and_y, x, *args):
+    def _twoLoop(self, splitworld, reset):
         """
         Helper for the L-BFGS method.
         See 'Numerical Optimization' by J. Nocedal for an explanation.
+
+        This has been converted to use splitworld.
+          As such it doesn't return a result, instead it stores
+          The "p" result in the "search_direction" variable
+
+        Depends on the following splitworld variables:
+          g_Jx_0, g_Jx_1
+          current_point
+          s_and_y 
+
+        Other conversion notes:
+          The current cost function's inverseHessianApproximation and
+          dualProduct only
+          depend on the regularization term so this function can be 
+          done entirely by one world.   If the Hessian/dp ever needed
+          other worlds, this will need to be reworked.
+          Implicit in the above is that the overall cost function
+          has cf.provides_inverse_Hessian_approximation==True
         """
-        q=g_Jx
-        alpha=[]
-        for s,y, rho in reversed(s_and_y):
-            a=self.getCostFunction().getDualProduct(s, q)/rho
-            alpha.append(a)
-            q=q-a*y
+        # Make a fn to push the work into subworld
+        def two_loop_worker(self, **kwargs):
+            reset=kwargs['reset']
+            x=self.importValue("current_point")
+            reg=self.importValue("regularization")
+            g_Jx=ArithmeticTuple(self.importValue("g_Jx_0"), self.importValue("g_Jx_1"))
+            if reset:
+                s_and_y = []
+                self.exportValue('s_and_y', list())
+            else:
+                s_and_y=self.importValue("s_and_y")
+            q=g_Jx
+            alpha=[]
+            for s,y, rho in reversed(s_and_y):
+                a=reg.getDualProduct(s, q)/rho
+                alpha.append(a)
+                q=q-a*y
+
+            r=reg.getInverseHessianApproximationAtPoint(q)
+
+            for s,y,rho in s_and_y:
+                beta = reg.getDualProduct(r, y)/rho
+                a = alpha.pop()
+                r = r + s * (a-beta)
+            # In the original version, the caller negated the result
+            self.exportValue("search_direction", -r)
+        print ("prior to twoloop call ",splitworld.getVarList())
+        # Note: this is only running on world world (of possibly many)
+        # Any jobs which also need the variables exported here
+        # need to be shared or the jobs need to run on the same world
+        addJob(splitworld, FunctionJob, two_loop_worker, reset=reset, imports=["current_point", "regularization", "g_Jx_0", "g_Jx_1"])
+        splitworld.runJobs() 
 
-        if self.getCostFunction().provides_inverse_Hessian_approximation:
-             r = self.getCostFunction().getInverseHessianApproximation(x, q, *args)
-        else:
-             r = invH_scale * q
 
-        for s,y,rho in s_and_y:
-            beta = self.getCostFunction().getDualProduct(r, y)/rho
-            a = alpha.pop()
-            r = r + s * (a-beta)
-        return r
-        
-    
\ No newline at end of file
diff --git a/downunder/py_src/splitregularizations.py b/downunder/py_src/splitregularizations.py
index 01e38bb..c6830e5 100644
--- a/downunder/py_src/splitregularizations.py
+++ b/downunder/py_src/splitregularizations.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -403,18 +405,66 @@ class SplitRegularization(CostFunction):
     def getArguments(self, m):
         """
         """
+        raise RuntimeError("Please use the setPoint interface")
         self.__pre_args = grad(m)
         self.__pre_input = m
-        return grad(m),
+        return self.__pre_args,
 
-    def getValue(self, m, grad_m):
+        
+    def getValueAtPoint(self):
         """
         returns the value of the cost function J with respect to m.
         This equation is specified in the inversion cookbook.
 
         :rtype: ``float``
         """
+        m=self.__pre_input
+        grad_m=self.__pre_args
         
+        mu=self.__mu
+        mu_c=self.__mu_c
+        DIM=self.getDomain().getDim()
+        numLS=self.getNumLevelSets()
+
+        A=0
+        if self.__w0 is not None:
+            r = inner(integrate(m**2 * self.__w0), mu)
+            self.logger.debug("J_R[m^2] = %e"%r)
+            A += r
+
+        if self.__w1 is not None:
+            if numLS == 1:
+                r = integrate(inner(grad_m**2, self.__w1))*mu
+                self.logger.debug("J_R[grad(m)] = %e"%r)
+                A += r
+            else:
+                for k in range(numLS):
+                    r = mu[k]*integrate(inner(grad_m[k,:]**2,self.__w1[k,:]))
+                    self.logger.debug("J_R[grad(m)][%d] = %e"%(k,r))
+                    A += r
+
+        if numLS > 1:
+            for k in range(numLS):
+                gk=grad_m[k,:]
+                len_gk=length(gk)
+                for l in range(k):
+                    gl=grad_m[l,:]
+                    r = mu_c[l,k] * integrate( self.__wc[l,k] * ( ( len_gk * length(gl) )**2 - inner(gk, gl)**2 ) )
+                    self.logger.debug("J_R[cross][%d,%d] = %e"%(l,k,r))
+                    A += r
+        return A/2       
+    
+    
+    def getValue(self, m, grad_m):
+        """
+        returns the value of the cost function J with respect to m.
+        This equation is specified in the inversion cookbook.
+
+        :rtype: ``float``
+        """
+
+        if m!=self.__pre_input:
+            raise RuntimeError("Attempt to change point using getValue")        
         # substituting cached values
         m=self.__pre_input
         grad_m=self.__pre_args
@@ -453,14 +503,17 @@ class SplitRegularization(CostFunction):
                     A += r
         return A/2
 
-    def getGradient(self, m,  grad_m):
+    def getGradient(self):
+        raise RuntimeError("Split versions do not support getGradient. Use getGradientAtPoint instead.")
+      
+    def getGradientAtPoint(self):
         """
         returns the gradient of the cost function J with respect to m.
 
         :note: This implementation returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
         """
 
-        # substituting cached values
+        # Using cached values
         m=self.__pre_input
         grad_m=self.__pre_args        
         
@@ -504,7 +557,7 @@ class SplitRegularization(CostFunction):
 
         return ArithmeticTuple(Y, X)
 
-    def getInverseHessianApproximation(self, m, r, grad_m, solve=True):
+    def getInverseHessianApproximationAtPoint(self, r, solve=True):
         """
         """
 
@@ -587,4 +640,14 @@ class SplitRegularization(CostFunction):
         :rtype: ``float``
         """
         return sqrt(integrate(length(m)**2)/self.__vol_d)
+    
+    def setPoint(self, m):
+        """
+        sets the point which this function will work with
+        
+        :param m: level set function
+        :type m: `Data`
+        """
+        self.__pre_input = m
+        self.__pre_args = grad(m)
 
diff --git a/downunder/test/python/inversion_acoustictest_2d.py b/downunder/test/python/inversion_acoustictest_2d.py
index 0a0794c..8792067 100644
--- a/downunder/test/python/inversion_acoustictest_2d.py
+++ b/downunder/test/python/inversion_acoustictest_2d.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """this a very simple test for inversion of acoustic data in the freqency domain
 
 
diff --git a/downunder/test/python/ref_data/dip.geo b/downunder/test/python/ref_data/dip.geo
new file mode 100644
index 0000000..6b00d67
--- /dev/null
+++ b/downunder/test/python/ref_data/dip.geo
@@ -0,0 +1,53 @@
+lc=10.000000;
+Point(1)={-350.000000, -350.000000, 0, lc};
+Point(2)={350.000000, -350.000000, 0, lc};
+Point(3)={350.000000, 350.000000, 0, lc};
+Point(4)={-350.000000, 350.000000, 0, lc};
+Line(1) = {1,2} ;
+Line(2) = {3,2} ;
+Line(3) = {3,4} ;
+Line(4) = {4,1} ;
+Line Loop(5) = {4,1,-2,3} ; 
+Plane Surface(6) = {5} ; 
+Point(5)={-22.000000,0.000000,0.000000,1.000000};
+Point(6)={-18.000000,0.000000,0.000000,1.000000};
+Point(7)={-14.000000,0.000000,0.000000,1.000000};
+Point(8)={-10.000000,0.000000,0.000000,1.000000};
+Point(9)={-6.000000,0.000000,0.000000,1.000000};
+Point(10)={-2.000000,0.000000,0.000000,1.000000};
+Point(11)={2.000000,0.000000,0.000000,1.000000};
+Point(12)={6.000000,0.000000,0.000000,1.000000};
+Point(13)={10.000000,0.000000,0.000000,1.000000};
+Point(14)={14.000000,0.000000,0.000000,1.000000};
+Point(15)={18.000000,0.000000,0.000000,1.000000};
+Point(16)={22.000000,0.000000,0.000000,1.000000};
+out0[]=Extrude {0, 0, -400.000000} { Surface {6};};
+Point{5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} In Surface{6};
+Physical Volume("volume-1") = {1} ;
+Physical Surface("Top") = { -6 };
+Physical Surface("Bottom") = { -out0[0] };
+Physical Surface("Left") = { -out0[2] };
+Physical Surface("Right") = { -out0[4] };
+Physical Surface("Front") = { -out0[5] };
+Physical Surface("Back") = { -out0[3] };
+Field[1] = Box;
+Field[1].VIn=lc;
+Field[1].VOut=5*lc;
+Field[1].XMax=50.000000;
+Field[1].XMin=-50.000000;
+Field[1].YMax=50.000000;
+Field[1].YMin=-50.000000;
+Field[1].ZMax=0;
+Field[1].ZMin=-100.000000;
+Field[2] = Attractor;
+Field[2].NodesList = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+Field[3] = Threshold;
+Field[3].IField = 2;
+Field[3].LcMin = lc / 5;
+Field[3].LcMax = 100*lc;
+Field[3].DistMin = 50.0;
+Field[3].DistMax = 100.0;
+Field[4] = Min;
+Field[4].FieldsList = {1, 3};
+Background Field = 4;
+Mesh.CharacteristicLengthExtendFromBoundary = 0;
diff --git a/downunder/test/python/ref_data/pole.geo b/downunder/test/python/ref_data/pole.geo
new file mode 100644
index 0000000..ab6827a
--- /dev/null
+++ b/downunder/test/python/ref_data/pole.geo
@@ -0,0 +1,45 @@
+lc=50.000000;
+Point(1)={-3000.000000, -3000.000000, 0, lc};
+Point(2)={4000.000000, -3000.000000, 0, lc};
+Point(3)={4000.000000, 5000.000000, 0, lc};
+Point(4)={-3000.000000, 5000.000000, 0, lc};
+Line(1) = {1,2} ;
+Line(2) = {3,2} ;
+Line(3) = {3,4} ;
+Line(4) = {4,1} ;
+Line Loop(5) = {4,1,-2,3} ; 
+Plane Surface(6) = {5} ; 
+Point(5)={800.000000,1000.000000,0.000000,5.000000};
+Point(6)={200.000000,1000.000000,0.000000,5.000000};
+Point(7)={600.000000,1000.000000,0.000000,5.000000};
+Point(8)={400.000000,1000.000000,0.000000,5.000000};
+out0[]=Extrude {0, 0, -5000.000000} { Surface {6};};
+Point{5, 6, 7, 8} In Surface{6};
+Physical Volume("volume-1") = {1} ;
+Physical Surface("Top") = { -6 };
+Physical Surface("Bottom") = { -out0[0] };
+Physical Surface("Left") = { -out0[2] };
+Physical Surface("Right") = { -out0[4] };
+Physical Surface("Front") = { -out0[5] };
+Physical Surface("Back") = { -out0[3] };
+Field[1] = Box;
+Field[1].VIn=lc;
+Field[1].VOut=5*lc;
+Field[1].XMax=1000.000000;
+Field[1].XMin=0;
+Field[1].YMax=2000.000000;
+Field[1].YMin=0;
+Field[1].ZMax=0;
+Field[1].ZMin=-2000.000000;
+Field[2] = Attractor;
+Field[2].NodesList = {5, 6, 7, 8};
+Field[3] = Threshold;
+Field[3].IField = 2;
+Field[3].LcMin = lc / 5;
+Field[3].LcMax = 100*lc;
+Field[3].DistMin = 50;
+Field[3].DistMax = 100;
+Field[4] = Min;
+Field[4].FieldsList = {1, 3};
+Background Field = 4;
+Mesh.CharacteristicLengthExtendFromBoundary = 0;
diff --git a/downunder/test/python/ref_data/schlum.geo b/downunder/test/python/ref_data/schlum.geo
new file mode 100644
index 0000000..035e11d
--- /dev/null
+++ b/downunder/test/python/ref_data/schlum.geo
@@ -0,0 +1,53 @@
+lc=10.000000;
+Point(1)={-200.000000, -200.000000, 0, lc};
+Point(2)={200.000000, -200.000000, 0, lc};
+Point(3)={200.000000, 200.000000, 0, lc};
+Point(4)={-200.000000, 200.000000, 0, lc};
+Line(1) = {1,2} ;
+Line(2) = {3,2} ;
+Line(3) = {3,4} ;
+Line(4) = {4,1} ;
+Line Loop(5) = {4,1,-2,3} ; 
+Plane Surface(6) = {5} ; 
+Point(5)={-27.500000,0.000000,0.000000,1.000000};
+Point(6)={-22.500000,0.000000,0.000000,1.000000};
+Point(7)={-17.500000,0.000000,0.000000,1.000000};
+Point(8)={-12.500000,0.000000,0.000000,1.000000};
+Point(9)={-7.500000,0.000000,0.000000,1.000000};
+Point(10)={-2.500000,0.000000,0.000000,1.000000};
+Point(11)={2.500000,0.000000,0.000000,1.000000};
+Point(12)={7.500000,0.000000,0.000000,1.000000};
+Point(13)={12.500000,0.000000,0.000000,1.000000};
+Point(14)={17.500000,0.000000,0.000000,1.000000};
+Point(15)={22.500000,0.000000,0.000000,1.000000};
+Point(16)={27.500000,0.000000,0.000000,1.000000};
+out0[]=Extrude {0, 0, -300.000000} { Surface {6};};
+Point{5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} In Surface{6};
+Physical Volume("volume-1") = {1} ;
+Physical Surface("Top") = { -6 };
+Physical Surface("Bottom") = { -out0[0] };
+Physical Surface("Left") = { -out0[2] };
+Physical Surface("Right") = { -out0[4] };
+Physical Surface("Front") = { -out0[5] };
+Physical Surface("Back") = { -out0[3] };
+Field[1] = Box;
+Field[1].VIn=lc;
+Field[1].VOut=5*lc;
+Field[1].XMax=100.000000;
+Field[1].XMin=-100.000000;
+Field[1].YMax=100.000000;
+Field[1].YMin=-100.000000;
+Field[1].ZMax=0;
+Field[1].ZMin=-200.000000;
+Field[2] = Attractor;
+Field[2].NodesList = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+Field[3] = Threshold;
+Field[3].IField = 2;
+Field[3].LcMin = lc / 5;
+Field[3].LcMax = 100*lc;
+Field[3].DistMin = 70;
+Field[3].DistMax = 100;
+Field[4] = Min;
+Field[4].FieldsList = {1, 3};
+Background Field = 4;
+Mesh.CharacteristicLengthExtendFromBoundary = 0;
diff --git a/downunder/test/python/run_comm1.py b/downunder/test/python/run_comm1.py
new file mode 100644
index 0000000..1863ed3
--- /dev/null
+++ b/downunder/test/python/run_comm1.py
@@ -0,0 +1,615 @@
+##############################################################################
+#
+# Copyright (c) 2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+"""
+Test script to run test model COMMEMI-4
+"""
+
+__copyright__="""Copyright (c) 2015 by The University of Queensland
+http://www.uq.edu.au
+Primary Business: Queensland, Australia"""
+__license__="""Licensed under the Open Software License version 3.0
+http://www.opensource.org/licenses/osl-3.0.php"""
+__url__="https://launchpad.net/escript-finley"
+
+try:
+  import matplotlib
+  # The following line is here to allow automated testing. Remove or comment if
+  # you would like to display the final plot in a window instead.
+  matplotlib.use('agg')
+
+  from matplotlib import pyplot
+  HAVE_MPL=True
+except ImportError:
+  HAVE_MPL=False
+
+import logging
+import esys.escriptcore.utestselect as unittest
+from esys.escriptcore.testing import *
+
+import numpy
+import datetime
+import esys.downunder.magtel2d as mt2d
+import esys.escript            as escript
+import esys.finley             as finley
+import esys.escript.pdetools   as pdetools
+
+
+
+
+# this is mainly to avoid warning messages
+logging.basicConfig(format='%(name)s: %(message)s', level=logging.INFO)
+
+try:
+    from esys.finley import Rectangle as fRect, Brick as fBrick
+    HAVE_FINLEY = True
+except ImportError:
+    HAVE_FINLEY = False
+
+
+
+
+def makeLayerCake(x_start,x_extent,z_layers):
+    # ---------------------------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which sets up a 2D model with N layers.
+    #
+    # ARGUMENTS:
+    # ----------
+    # x_start             :: start coordinate of mesh.
+    # x_extent            :: horizontal extent of mesh.
+    # z_layers            :: list with interface coordinates.
+    #
+    # RETURNS:
+    # --------
+    # borders             :: borders of layers.
+    # air_earth_interface :: line at the air/earth interface.
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa,
+    # University of Queensland
+    #
+    #
+    # HISTORY:
+    # --------
+    #
+    # ---------------------------------------------------------------------------------------------
+
+    import esys.pycad   as pycad     # @UnresolvedImport
+    import esys.weipa   as weipa     # @UnresolvedImport
+    import esys.finley  as finley    # @UnresolvedImport
+    import esys.escript as escript   # @UnresolvedImport
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Point definitions.
+    # ---------------------------------------------------------------------------------------------
+
+    # Loop through all layers and define the vertices at all interfaces.
+    scale = 1.0
+    points = []
+    for i in range(0,len(z_layers)):
+            # Adjust scale at corners of air/earth interface:
+            if z_layers[i] == 0:
+                scale = 0.15
+            else:
+                scale = 1.0
+            points.append( pycad.Point(x_start           , z_layers[i], 0.0, local_scale = scale) ) # Left-Corner.
+            points.append( pycad.Point(x_start + x_extent, z_layers[i], 0.0, local_scale = scale) ) # Right-Corner.
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Line definitions.
+    # ---------------------------------------------------------------------------------------------
+
+    # Now connect the points to define the horizontal lines for all interfaces:
+    hlines = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-1:
+            hlines.append( pycad.Line(points[i],points[i+1]) )
+
+    # Now connect the points to define the vertical lines for all interfaces:
+    vlines_left = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-3:
+            vlines_left.append( pycad.Line(points[i],points[i+2]) )
+
+    vlines_right = []
+    for i in range(0,len(points),2):
+        if i <= len(points)-4:
+            vlines_right.append( pycad.Line(points[i+1],points[i+3]) )
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Curveloop and Area definitions.
+    # ---------------------------------------------------------------------------------------------
+
+    # Join line segments for each layer.
+    borders = []
+    for i in range(0,len(z_layers)-1):
+        border = [ hlines[i],vlines_right[i],-hlines[i+1],-vlines_left[i] ]
+        borders.append( pycad.CurveLoop( border) )
+
+
+
+    # ---------------------------------------------------------------------------------------------
+    # Return values.
+    # ---------------------------------------------------------------------------------------------
+
+    # Explicitly specify the air-earth-boundary:
+    air_earth_interface = hlines[1]
+
+    return borders, air_earth_interface
+
+#__________________________________________________________________________________________________
+
+
+
+
+def setupMesh(mode, x_start, x_extent, a_extent, z_layers, anomaly_coord, elem_sizes):
+    # -----------------------------------------------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which sets up the COMMEMI-1 mesh.
+    #
+    #
+    # ARGUMENTS:
+    # ----------
+    # mode           :: TE or TM mode.
+    # x_start        :: horizontal start-point mesh.
+    # x_extent       :: horizontal extent of mesh.
+    # a_extent       :: vertical extent of air-layer.
+    # z_layers       :: list with coordinates of top-interfaces in Z-direction, incl. basement.
+    # anomaly_coord  :: dictionary with coordinate tuples of anomalies, counterclockwise.
+    # elem_sizes     :: mesh element sizes, large, normal, small.
+    #
+    # RETURNS:
+    # --------
+    # <Nothing> A mesh file is written to the output folder.
+    #
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa,
+    # The University of Queensland
+    #
+    #
+    # HISTORY:
+    # --------
+    #
+    # -----------------------------------------------------------------------------------------------------------------
+
+
+
+    # -----------------------------------------------------------------------------------------------------------------
+    # Imports.
+    # -----------------------------------------------------------------------------------------------------------------
+
+    # System imports.
+    import math
+
+    # Escript modules.
+    import esys.pycad              as pycad     # @UnresolvedImport
+    import esys.finley             as finley    # @UnresolvedImport
+    import esys.escript            as escript   # @UnresolvedImport
+    import esys.weipa              as weipa     # @UnresolvedImport
+    # <Note>: "@UnresolvedImport" ignores any warnings in Eclipse/PyDev (PyDev has trouble with external libraries).
+
+    # Warn about magnetotelluric TM mode:
+    if mode.lower() == 'tm':
+        print("TM mode not yet supported")
+        return None
+
+    # -----------------------------------------------------------------------------------------------------------------
+    # Anomaly border.
+    # -----------------------------------------------------------------------------------------------------------------
+
+    #<Note>: define the anomaly which must be 'cut out' in the main mesh.
+
+
+    # Prepare list to store the anomaly borders:
+    border_anomaly = []
+
+    # Cycle anomaly dictionary and define the border for each.
+    for anomaly in anomaly_coord:
+
+        # Extract the coordinates for current key:
+        coord = anomaly_coord[anomaly]
+
+        # Points defining the anomaly from left-top.
+        points0 = []
+        for i in range( 0, len(coord) ):
+            points0.append(pycad.Point(coord[i][0], coord[i][1], 0.0))
+
+        # Define the line segments connecting the points.
+        lines0 = []
+        for i in range( 0, len(points0)-1 ):
+            lines0.append(pycad.Line(points0[i],points0[i+1]))
+        # Connect the last segment from end to start:
+        lines0.append(pycad.Line(points0[-1], points0[0]))
+
+        # And define the border of the anomalous area.
+        border_anomaly.append( pycad.CurveLoop(*lines0) )
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Get the borders for each layer (air & host).
+    # --------------------------------------------------------------------------
+
+    # Borders around layers and the air/earth interface.
+    borders, air_earth_interface = makeLayerCake(x_start,x_extent,z_layers)
+
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Specification of number of elements in domains.
+    # --------------------------------------------------------------------------
+
+    #<Note>: specifying the number of mesh elements is somewhat heuristic
+    #        and is dependent on the mesh size and the anomaly sizes.
+
+    coord = anomaly_coord["anomaly_1"]
+
+    # First get the max-length of the anomaly to specify the number of elements.
+    length = max(( abs(coord[2][0]-coord[0][0]) ),  # X-length
+                 ( abs(coord[2][1]-coord[0][1]) ))  # Y-length
+
+    # Specify number of elements in air, anomaly and on air/earth interface:
+    nr_elements_air       = 1 * x_extent / elem_sizes["large"]
+    nr_elements_anomaly   = 2 * length   / elem_sizes["small"]
+    nr_elements_interface = 4 * x_extent / elem_sizes["small"]
+    #___________________________________________________________________________
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Domain definitions.
+    #---------------------------------------------------------------------------
+
+    # Define the air & layer areas; note the 'holes' specifiers.
+    domain_air     = pycad.PlaneSurface( borders[0] )
+    domain_host    = pycad.PlaneSurface( borders[1] , holes = [ border_anomaly[0] ] )
+    domain_anomaly = pycad.PlaneSurface( border_anomaly[0] )
+
+    # Specify the element sizes in the domains and along the interface.
+    #<Note>: Sizes must be assigned in the order as they appear below:
+    domain_air.setElementDistribution( nr_elements_air )
+    domain_anomaly.setElementDistribution( nr_elements_anomaly )
+    air_earth_interface.setElementDistribution( nr_elements_interface )
+
+    # Ready to define the mesh-design..
+    design2D = pycad.gmsh.Design(dim=2, element_size=elem_sizes["normal"] , keep_files=False)
+    # ..and also specify the domains for tagging with property values later on:
+    design2D.addItems( pycad.PropertySet("domain_air"    , domain_air),
+                       pycad.PropertySet("domain_host"   , domain_host),
+                       pycad.PropertySet("domain_anomaly", domain_anomaly) )
+
+    # Now define the unstructured finley-mesh..
+    model2D = finley.MakeDomain(design2D)
+    #___________________________________________________________________________
+
+
+    return model2D
+    #___________________________________________________________________________
+
+def generateCommemi1Mesh():
+    # --------------------------------------------------------------------------
+    # Geometric mesh parameters.
+    # --------------------------------------------------------------------------
+
+    # Mesh extents.
+    a_extent = 20000    # 20km - Vertical extent of air-layer in (m).
+    z_extent = 20000    # 20km - Vertical extent of subsurface in (m).
+    x_extent = 40000    # 40km - Horizontal extent of mesh in (m).
+
+    # Start point of mesh.
+    x_start = 0 #-x_extent/2.0
+
+    # Define interface locations in z-direction: top, air/earth, basement.
+    z_layers    = [   a_extent, 0, -z_extent]
+
+    # Mesh elements sizes.
+    elem_sizes = {
+                'large' : 10.00 * x_extent/100.0, # 5.00% of x_extent.
+                'normal': 05.00 * x_extent/100.0, # 2.50% of x_extent.
+                'small' : 00.50 * x_extent/100.0  # 0.25% of x_extent.
+                }
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Geometric anomaly parameters.
+    # --------------------------------------------------------------------------
+
+    # Extents of the rectangular 2D anomaly.
+    x_anomaly = 1000    # 1km - Horizontal extent of anomaly in (m).
+    z_anomaly = 2000    # 2km - Vertical extent of anomaly in (m).
+
+    # Coordinates of the rectangular 2D anomaly.
+    ya1 = -250                                    # Top
+    ya2 = -z_anomaly + ya1                        # Bottom
+    xa1 = x_start + x_extent/2.0 - x_anomaly/2.0  # Left
+    xa2 = x_start + x_extent/2.0 + x_anomaly/2.0  # Right
+
+    # Save in dictionary as a list of tuples from left-top corner, counterclockwise.
+    anomaly_coord = {
+                    'anomaly_1': ([xa1,ya1],[xa1,ya2],[xa2,ya2],[xa2,ya1])
+                    }
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Setup the COMMEMI-1 mesh.
+    # --------------------------------------------------------------------------
+
+    # This creates the mesh and saves it to the output folder.
+    return setupMesh("TE", x_start, x_extent, a_extent, z_layers,  anomaly_coord, elem_sizes)
+    #___________________________________________________________________________
+# ==============================================================================
+# ==============================================================================
+
+
+
+
+
+class Test_COMMEMI1(unittest.TestCase):
+    @unittest.skipIf(not HAVE_FINLEY, "Test requires finley to be available")
+    @unittest.skipIf(not escript.getEscriptParamInt("PASO_DIRECT"), "Missing direct solvers")
+    @unittest.skipIf(escript.getMPISizeWorld() > 1,
+            "Direct solvers and multiple MPI processes are currently incompatible")
+    def test_comm1(self):
+        # ---
+        # Initialisations
+        # ---
+
+        # Get timing:
+        startTime = datetime.datetime.now()
+
+        # Mode (TE includes air-layer, whereas TM does not):
+        mode = 'TE'
+
+        # Read the mesh file and define the 'finley' domain:
+        #mesh_file = "mesh/commemi-1/commemi1_te.fly"
+        #domain = finley.ReadMesh(mesh_file)
+        #mesh_file = "mesh/commemi-1/commemi1_te.msh"
+        #domain = finley.ReadGmsh(mesh_file, numDim=2)
+        domain = generateCommemi1Mesh()
+
+
+        # Sounding frequencies (in Hz):
+        freq_def = {"high":1.0e+1,"low":1.0e+1,"step":1}
+        # Frequencies will be mapped on a log-scale from
+        # 'high' to 'low' with 'step' points per decade.
+        # (also only one frequency must be passed via dict)
+
+        # Step sizes for sampling along vertical and horizontal axis (in m):
+        xstep=100
+        zstep=250
+
+
+
+
+        # ---
+        # Resistivity model
+        # ---
+
+        # Resistivity values assigned to tagged regions (in Ohm.m):
+        rho  = [
+                1.0e+14, # 0: air
+                100.0  , # 1: host
+                0.5    # 2: anomaly
+            ]
+
+        # Tags must match those in the file:
+        tags = ["domain_air", "domain_host", "domain_anomaly"]
+
+
+
+
+        # ---
+        # Layer definitions for 1D response at boundaries.
+        # ---
+
+        # List with resistivity values for left and right boundary.
+        rho_1d_left  = [ rho[0], rho[1] ]
+        rho_1d_rght  = [ rho[0], rho[1] ]
+
+        # Associated interfaces for 1D response left and right (must match the mesh file).
+        ifc_1d_left = [ 20000, 0, -20000]
+        ifc_1d_rght = [ 20000, 0, -20000]
+
+        # Save in dictionary with layer interfaces and resistivities left and right:
+        ifc_1d = {"left":ifc_1d_left , "right":ifc_1d_rght}
+        rho_1d = {"left":rho_1d_left , "right":rho_1d_rght}
+
+
+
+
+        # ---
+        # Adjust parameters here for TM mode
+        # ---
+
+        # Simply delete first element from lists:
+        if mode.upper() == 'TM':
+            tags.pop(0)
+            rho.pop(0)
+            rho_1d['left'].pop(0)
+            rho_1d['right'].pop(0)
+            ifc_1d['left'].pop(0)
+            ifc_1d['right'].pop(0)
+
+
+
+
+        # ---
+        # Run MT_2D
+        # ---
+
+        # Class options:
+        mt2d.MT_2D._solver = "DIRECT" #"ITERATIVE" #"CHOLEVSKY" #"CGLS " #"BICGSTAB" #"DIRECT" "ITERATIVE"
+        mt2d.MT_2D._debug   = False
+
+        # Instantiate an MT_2D object with required & optional parameters:
+        obj_mt2d = mt2d.MT_2D(domain, mode, freq_def, tags, rho, rho_1d, ifc_1d,
+                xstep=xstep ,zstep=zstep, maps=None, plot=False)
+
+        # Solve for fields, apparent resistivity and phase:
+        mt2d_fields, arho_2d, aphi_2d = obj_mt2d.pdeSolve()
+
+        #import random
+
+        #mt2d_fields[0]['real']+=random.random()
+        #mt2d_fields[0]['imag']+=50*random.random()
+
+        #print(arho_2d[0][0])
+        #for i in range(len(aphi_2d[0])):
+            #aphi_2d[0][i]+=(50*random.random())
+
+        #for i in range(len(arho_2d[0])):
+            #arho_2d[0][i]-=17.8*(random.random())
+
+        # ---
+        # User defined plots
+        # ---
+
+        from scipy.interpolate import InterpolatedUnivariateSpline
+
+        # Setup abscissas/Ordinates for escript data:
+        x  = numpy.array( obj_mt2d.loc.getX() )[:,0]
+        y0 = numpy.array( obj_mt2d.loc.getValue(arho_2d[0]) )
+        y1 = numpy.array( obj_mt2d.loc.getValue(aphi_2d[0]) )
+
+        # Values from Weaver -- Model 2D-1 (EP, T=0.1, z=0), see Zhdanov et al, 1997,
+        # "Methods for modelling electromagnetic fields. Results from COMMEMI -- the
+        # international project on the comparison of modelling results for electromag-
+        # netic induction", Journal of Applied Geophysics, 133-271
+        rte = [8.07,   14.10,  51.50,  95.71, 104.00, 100.00, 100.00] # TE rho_a (3 Canada)
+        rtm = [9.86,   46.40,  94.80,  98.30,  99.70, 100.00, 100.00] # TM rho_a (3 Canada)
+        if mode.lower() == 'te':
+            ra = rte
+        else:
+            ra = rtm
+        # Associated stations shifted to match escript coordinates:
+        xs = numpy.array( [0, 500, 1000, 2000, 4000, 8000, 16000] ) + x.max()/2.0
+
+        # Setup interpolation to get values at specified stations (for comparison):
+        fi = InterpolatedUnivariateSpline(x, y0)
+        # Save esscript values at comparison points in text file:
+        # re-enable to allow comparisons
+        #numpy.savetxt("commemi1_"+mode.lower()+".dat", numpy.column_stack((xs,fi(xs))), fmt='%g')
+
+
+
+        # X plot-limits:
+        x0lim = [2000,38000]
+        y1lim = [0,120]
+        y2lim = [40,85]
+
+        # Plot labels:
+        title = '    escript COMMEMI-1 MT-2D ' + '(' + mode.upper() + ')' + ' freq: ' + str(obj_mt2d.frequencies[0]) + ' Hz'
+        ylbl0 = r'resistivity $(\Omega m)$'
+        ylbl1 = r'phase $(\circ)$'
+        xlbl1 = 'X (m)'
+        # Setup the plot window with app. res. on top and phase on bottom:
+        if HAVE_MPL:
+            f, ax = pyplot.subplots(2, figsize=(3.33,3.33), dpi=1200,
+                    facecolor='w', edgecolor='k', sharex=True) # Mind shared axis
+            f.subplots_adjust(hspace=0.1, top=0.95, left=0.135, bottom=0.125, right=0.975)
+            f.suptitle(title, y=0.99,fontsize=8) #
+
+            # Top: apparent resistivity and points from Weaver for comparison:
+            ax[0].plot(x, y0, color='red',  label = 'escript')
+            ax[0].plot(xs,ra, linestyle='', markersize=3, marker='o',color='blue',  label = 'Weaver')
+            ax[0].grid(b=True, which='both', color='grey',linestyle=':')
+            ax[0].set_ylabel( ylbl0)
+            ax[0].yaxis.set_label_coords(-0.082, 0.5)
+            # Plot limits:
+            ax[0].set_xlim(x0lim)
+            ax[0].set_ylim(y1lim)
+
+            # Bottom: phase on linear plot
+            ax[1].plot(x,y1, color='blue')
+            ax[1].grid(b=True, which='both', color='grey',linestyle=':')
+            ax[1].set_xlabel( xlbl1 )
+            ax[1].set_ylabel( ylbl1 )
+            # Plot limits:
+            ax[1].set_xlim(x0lim)
+            ax[1].set_ylim(y2lim)
+
+            # ask matplotlib for the plotted objects and their labels
+            lna, la = ax[0].get_legend_handles_labels()
+            ax[0].legend(lna, la, bbox_to_anchor=(0.675, 0.325), loc=2,
+                    borderaxespad=0.,prop={'size':8}, frameon=False)
+
+            pyplot.ticklabel_format(style='sci', axis='x', scilimits=(0,0), useMathText=True)
+            ax[0].xaxis.major.formatter._useMathText = True
+            pyplot.rc('font', **{'size': 8,'family':'sans-serif'})
+            # Uncomment to inspect visually
+            #f.savefig("commemi1_"+mode.lower()+".png", dpi=1200)
+
+        # Now let's see if the points match
+        # First, we need to find correspondance between xs and x
+        indices=[]
+        for i in range(len(xs)):
+            mindiff=40000
+            mindex=0
+            for j in range(len(x)):
+                if abs(xs[i]-x[j]) < mindiff:
+                    mindiff=abs(xs[i]-x[j])
+                    mindex=j
+            indices.append(mindex)
+
+        # The following are very simple checks based on the visual shape of the correct result
+        maxdiff=0
+        for i in range(len(indices)):
+            if abs(y0[indices[i]]-ra[i])>maxdiff:
+                maxdiff=abs(y0[indices[i]]-ra[i])
+
+        if maxdiff>5:           #Threshold is pretty arbitrary
+            raise RuntimeError("Mismatch with reference data")
+
+        c=0
+        for y in y1:
+            if y<46:
+                c+=1
+
+        if not (74 < escript.Lsup(y1) < 81):
+            raise RuntimeError("Peak of bottom plot is off.")
+
+        if not (0.78 < c/len(y1) < 0.80):
+            raise RuntimeError("Bottom plot has too many high points")
+
+        #
+        print (datetime.datetime.now()-startTime)
+        print ("Done!")
+
+
+if __name__ == '__main__':
+    run_tests(__name__, exit_on_failure=True)
+    
diff --git a/downunder/test/python/run_comm4.py b/downunder/test/python/run_comm4.py
new file mode 100644
index 0000000..db34e0b
--- /dev/null
+++ b/downunder/test/python/run_comm4.py
@@ -0,0 +1,692 @@
+##############################################################################
+#
+# Copyright (c) 2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+"""
+Test script to run test model COMMEMI-4
+"""
+
+try:
+  import matplotlib
+  # The following line is here to allow automated testing. Remove or comment if
+  # you would like to display the final plot in a window instead.
+  matplotlib.use('agg')
+  
+  from matplotlib import pyplot
+  HAVE_MPL=True
+except:
+  HAVE_MPL=False
+
+import esys.escriptcore.utestselect as unittest
+from esys.escriptcore.testing import *
+
+import numpy
+import datetime
+import esys.downunder.magtel2d as mt2d
+import esys.escript            as escript
+import esys.finley             as finley
+import esys.escript.pdetools   as pdetools
+
+# Matplotlib uses outdated code -- ignore the warnings until an update is available:
+import warnings
+warnings.filterwarnings("ignore")  #, category=DeprecationWarning
+import logging
+# this is mainly to avoid warning messages
+logging.basicConfig(format='%(name)s: %(message)s', level=logging.INFO)
+
+try:
+    from esys.finley import Rectangle as fRect, Brick as fBrick
+    HAVE_FINLEY = True
+except ImportError:
+    HAVE_FINLEY = False
+
+# ==========================================================
+# ==========================================================
+
+
+
+def setupMesh(mode, coord, elem_sizes):         
+    #---------------------------------------------------------------------------
+    # DESCRIPTION:
+    # -----------
+    # This is a utility function which setups the COMMEMI-4 mesh.
+    # 
+    #
+    # ARGUMENTS:                                                              
+    # ----------
+    # mode       :: TE or TM mode.
+    # coord      :: dictionary with coordinate tuples.
+    # elem_sizes :: mesh element sizes, large, normal, small. 
+    #
+    # RETURNS:
+    # --------
+    # <Nothing> A mesh file is written to the output folder.
+    # 
+    #
+    # AUTHOR:
+    # -------
+    # Ralf Schaa, 
+    # University of Queensland
+    #
+    #---------------------------------------------------------------------------
+
+
+
+    #---------------------------------------------------------------------------
+    # Imports.
+    #---------------------------------------------------------------------------
+        
+    import esys.pycad              as pycad     # @UnresolvedImport   
+    import esys.finley             as finley    # @UnresolvedImport
+    import esys.escript            as escript   # @UnresolvedImport
+    import esys.weipa              as weipa     # @UnresolvedImport    
+    # <Note>: "@UnresolvedImport" ignores any warnings in Eclipse/PyDev (PyDev has trouble with external libraries).
+
+
+
+    model = "COMMEMI-4"
+
+    print("Preparing the mesh " + model + " ...")
+    print("")
+    
+    # Warn about magnetotelluric TM mode:
+    if mode.lower() == 'tm':
+        print("TM mode not yet supported")
+        return
+
+
+        
+    # Path to write the mesh:
+    outpath = "../out/commemi4"
+    
+    
+        
+     
+    # --------------------------------------------------------------------------
+    # Initialisations.
+    # --------------------------------------------------------------------------
+
+    # Get coordinates from dictionary as list of tuples  
+    a0 = coord["air"]   
+    l1 = coord["lyr1"]  
+    s1 = coord["slab"]  
+    b1 = coord["basin"] 
+    l2 = coord["lyr2"]  
+    l3 = coord["lyr3"]  
+    
+    # Mesh length from top-boundary.
+    x_extent = abs(a0[3][0]-a0[0][0])
+    
+    
+
+        
+    # --------------------------------------------------------------------------
+    # Point definitions.
+    # --------------------------------------------------------------------------
+    
+    #<Note>: define all points spanning the mesh, anomalies and layers; 
+    #        note also shared domain points must be defined only once.
+ 
+ 
+    # Mesh top boundary.    
+    air = []
+    air.append( pycad.Point( *a0[0] ) )    # 0: left  , top    (@ boundary)
+    air.append( pycad.Point( *a0[3] ) )    # 3: right , top    (@ boundary)
+    
+    
+    # First-layer.
+    ly1 = []
+    ly1.append( pycad.Point( *l1[0] ) )    # 0: left  , top    (@ air/earth interface)                       
+    ly1.append( pycad.Point( *l1[1] ) )    # 1: left  , bottom (@ boundary)                       
+    ly1.append( pycad.Point( *l1[2] ) )    # 2: right , bottom (@ slab/basin)   
+    ly1.append( pycad.Point( *l1[3] ) )    # 3: right , bottom (@ boundary)     
+    ly1.append( pycad.Point( *l1[4] ) )    # 4: right , top    (@ air/earth interface)                 
+
+   
+    # Slab.
+    sl1 = []
+    sl1.append( ly1[1]                )    # 0: left  , top    (@ boundary)                       
+    sl1.append( pycad.Point( *s1[1] ) )    # 1: left  , bottom (@ boundary)                       
+    sl1.append( pycad.Point( *s1[2] ) )    # 2: right , bottom (@ slab/basin)                         
+    sl1.append( ly1[2]                )    # 3: right , top    (@ slab/basin)                       
+    
+    
+    # Basin.
+    bs1 = []
+    bs1.append( ly1[2]                )    # 0: left  , top    (@ slab/basin)
+    bs1.append( sl1[2]                )    # 1: left  , centre (@ slab/basin) 
+    bs1.append( pycad.Point( *b1[2] ) )    # 2: left  , bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[3] ) )    # 3: centre, bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[4] ) )    # 4: edge  , bottom (@ lyr1/basin)                       
+    bs1.append( pycad.Point( *b1[5] ) )    # 5: right , bottom (@ boundary)
+    bs1.append( ly1[3]                )    # 6: right , top 
+    
+    
+    # Second-Layer.
+    ly2 = []
+    ly2.append( sl1[1]                )    # 0: left  , top    (@ lyr2/slab)
+    ly2.append( pycad.Point( *l2[1] ) )    # 1: left  , bottom (@ boundary) 
+    ly2.append( pycad.Point( *l2[2] ) )    # 2: right , bottom (@ boundary)                       
+    ly2.append( bs1[5]                )    # 3: right , top    (@ basin/boundary)                       
+    ly2.append( bs1[4]                )    # 4: edge  , top    (@ lyr2/basin)                      
+    ly2.append( bs1[3]                )    # 5: centre, top    (@ lyr2/basin)
+    ly2.append( bs1[2]                )    # 6: left  , top    (@ lyr2/basin)
+    ly2.append( sl1[2]                )    # 7: left  , centre (@ slab/basin) 
+    
+    
+    # Basement layer.       
+    ly3 = []    
+    ly3.append( ly2[1]                )    # 0: left  , top    (@ boundary)
+    ly3.append( pycad.Point( *l3[1] ) )    # 1: left  , bottom (@ boundary) 
+    ly3.append( pycad.Point( *l3[2] ) )    # 2: right , bottom (@ boundary) 
+    ly3.append( ly2[2]                )    # 3: right , top    (@ boundary)
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Line definitions.
+    # --------------------------------------------------------------------------
+
+    #<Note>: connects the points to define lines counterclockwise;    
+    #        shared lines are re-used to ensure that all domains  
+    #        are recognised as parts of the same mesh. 
+        
+    # Air.
+    ln0 = []
+    ln0.append( pycad.Line(air[0], ly1[0]) ) # 0 left-top     to left-bottom.
+    ln0.append( pycad.Line(ly1[0], ly1[4]) ) # 1 left-bottom  to right-bottom (air-earth interface).
+    ln0.append( pycad.Line(ly1[4], air[1]) ) # 2 right-bottom to right-top.
+    ln0.append( pycad.Line(air[1], air[0]) ) # 3 right-top    to left-top.
+        
+    # Top Layer.
+    ln1 = []
+    ln1.append( pycad.Line(ly1[0], ly1[1]) ) # 0 left-top         to left-bottom.   
+    ln1.append( pycad.Line(ly1[1], ly1[2]) ) # 1 left-bottom      to start-slab/basin.  
+    ln1.append( pycad.Line(ly1[2], ly1[3]) ) # 2 start-slab/basin to basin-boundary 
+    ln1.append( pycad.Line(ly1[3], ly1[4]) ) # 3 basin-boundary   to right-top.     
+    ln1.append( -ln0[1]                    ) # 4 right-top        to left-top.
+
+ 
+    # Slab.
+    ln2 = []
+    ln2.append( pycad.Line(sl1[0], sl1[1]) ) # 0 left-top     to left-bottom.   
+    ln2.append( pycad.Line(sl1[1], sl1[2]) ) # 1 left-bottom  to right-bottom.         
+    ln2.append( pycad.Line(sl1[2], sl1[3]) ) # 2 right-bottom to right-top.            
+    ln2.append( -ln1[1]                    ) # 3 right-top    to left-top
+
+
+    # Basin.
+    ln3 = []
+    ln3.append( -ln2[2]                    ) # 0 left-top         to left-centre.         
+    ln3.append( pycad.Line(bs1[1], bs1[2]) ) # 1 left-centre      to left-bottom.         
+    ln3.append( pycad.Line(bs1[2], bs1[3]) ) # 2 left-bottom      to mid-bottom.          
+    ln3.append( pycad.Line(bs1[3], bs1[4]) ) # 3 mid-bottom       to right-mid-top.       
+    ln3.append( pycad.Line(bs1[4], bs1[5]) ) # 4 right-mid-top    to right-bottom.        
+    ln3.append( pycad.Line(bs1[5], bs1[6]) ) # 5 right-bottom     to right-top.           
+    ln3.append( -ln1[2]                    ) # 6 right-top        to right-slab/basin.    
+    
+    
+    # Layer below.
+    ln4 = []
+    ln4.append( pycad.Line(ly2[0], ly2[1]) ) # 0 left-top      to left-bottom.        
+    ln4.append( pycad.Line(ly2[1], ly2[2]) ) # 1 left-bottom   to right-bottom.        
+    ln4.append( pycad.Line(ly2[2], ly2[3]) ) # 2 right-bottom  to right-top.            
+    ln4.append( -ln3[4]                    ) # 3 right-top     to right-mid-top.       
+    ln4.append( -ln3[3]                    ) # 4 right-mid-top to mid-bottom.          
+    ln4.append( -ln3[2]                    ) # 5 mid-bottom    to left-bottom.         
+    ln4.append( -ln3[1]                    ) # 6 left-bottom   to left-centre.         
+    ln4.append( -ln2[1]                    ) # 7 left-centre   to left-top.            
+        
+    # Basement layer.
+    ln5 = []
+    ln5.append( pycad.Line(ly3[0], ly3[1]) ) # 0 left-top     to left-bottom.
+    ln5.append( pycad.Line(ly3[1], ly3[2]) ) # 1 left-bottom  to right-bottom.
+    ln5.append( pycad.Line(ly3[2], ly3[3]) ) # 2 right-bottom to right-top.
+    ln5.append( -ln4[1]                    ) # 3 right-top    to left-top.
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Domain definitions.
+    # --------------------------------------------------------------------------
+    
+       
+    # First define all borders.       
+    borders = []   
+    borders.append( pycad.CurveLoop(*ln0) )   
+    borders.append( pycad.CurveLoop(*ln1) )   
+    borders.append( pycad.CurveLoop(*ln2) )   
+    borders.append( pycad.CurveLoop(*ln3) )    
+    borders.append( pycad.CurveLoop(*ln4) )    
+    borders.append( pycad.CurveLoop(*ln5) )    
+
+    # And next the domains.
+    domains = []
+    for i in range( len(borders) ):        
+        domains.append( pycad.PlaneSurface(borders[i]) ) 
+    #___________________________________________________________________________
+
+
+
+
+    # --------------------------------------------------------------------------
+    # Set element sizes in domains.
+    # --------------------------------------------------------------------------
+    
+    # Horizontal extents of segments along slab and basin:
+    x_extents = []
+    x_extents.append( l1[2][0] - l1[0][0] ) # 0
+    x_extents.append( l1[3][0] - l1[2][0] ) # 1
+
+    # Number of elements in the air-domain, first-layer as well as slab- and basin-domain.
+    domains[0].setElementDistribution(     x_extent / elem_sizes["large"]   )
+    domains[1].setElementDistribution(     x_extent / (elem_sizes["small"]) )
+    domains[2].setElementDistribution( 0.4*x_extent / (elem_sizes["small"]) )
+    domains[3].setElementDistribution( 0.5*x_extent / (elem_sizes["small"]) )
+    #<Note> slab and basin multiplied by approximate ratio of their x_extent.
+    #___________________________________________________________________________
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Now define the gmsh 'design' object. 
+    #---------------------------------------------------------------------------
+
+    design2D = pycad.gmsh.Design(dim=2, element_size=elem_sizes['large'], keep_files=False)
+    
+    # Also specify the domains for tagging with property values later on:
+    design2D.addItems(   
+    pycad.PropertySet( "air"   , domains[0]) ,   
+    pycad.PropertySet( "lyr1"  , domains[1]) ,   
+    pycad.PropertySet( "slab"  , domains[2]) ,   
+    pycad.PropertySet( "basin" , domains[3]) ,
+    pycad.PropertySet( "lyr2"  , domains[4]) ,
+    pycad.PropertySet( "lyr3"  , domains[5]) )   
+    
+    # Now define the unstructured finley-mesh..
+    model2D = finley.MakeDomain(design2D)  
+    #___________________________________________________________________________
+
+
+    return model2D
+
+def generateCommemi4Mesh():
+    #---------------------------------------------------------------------------
+    # DESCRIPTION:
+    # ------------
+    # Script for preparing the COMMEMI-2 2D model.
+    #
+    # The COMMEMI-4 2D model consist of a 3-layered halfspace,
+    # hosting an anomalous horizontal slab and a basin-structure 
+    # in the first layer. 
+    #
+    # References:
+    # -----------
+    # See Franke A., p.89, 2003 (MSc. Thesis).
+    # 
+    # Antje Franke, "Zweidimensionale Finite-Elemente-Modellierung 
+    # niederfrequenter elektromagnetischer Felder in der Fernzone", 
+    # Diplomarbeit (MSc.), 2003, Technische Universtitaet Freiberg.
+    #
+    # --------------------------------------------------------------------------
+
+
+    #---------------------------------------------------------------------------
+    # Geometric mesh parameters.
+    # --------------------------------------------------------------------------
+
+    # Horizontal extent and start point of mesh.
+    a_extent = 50000   # 50km - Vertical extent of air-layer in (m).
+    z_extent = 50000   # 50km - Vertical extent of subsurface in (m).
+    x_extent = 60000   # 60km - Horizontal extent of model in (m).
+
+    # Start point of mesh.
+    x_start  = 0 #-x_extent/2.0
+
+    # Mesh elements sizes.
+    elem_sizes = { 
+                'large' : 4.00 * x_extent/100.0, # 
+                'normal': 2.00 * x_extent/100.0, # 
+                'small' : 0.25 * x_extent/100.0  # 
+                }
+   #____________________________________________________________________________
+
+
+
+
+
+    #---------------------------------------------------------------------------
+    # Coordinate definitions.
+    # --------------------------------------------------------------------------
+
+    # X-coordinates of all domain corners (in order of appearance, left to right).
+    x0 = x_start                          # left         (@ boundary)
+    x1 = x_start + 24000                  # centre       (@ slab/basin)
+    x2 = x_start + 24000 + 8000           # edge-bottom  (@ slab/lyr1)
+    x3 = x_start + 24000 + 8000 + 3000    # edge-top     (@ slab/lyr1)
+    x4 = x_start + x_extent               # right        (@ boundary) 
+
+    # Y-coordinates of all domain corners (in order of appearance, top to bottom).
+    y0 = a_extent                         # top          
+    y1 = 0                                # centre       (@ air/earth)
+    y2 =-500                              # lyr1-bottom  (@ boundary-left) 
+    y3 =-1000                             # basin-bottom (@ boundary-right) 
+    y4 =-2000                             # slab-bottom  (@ boundary-left) 
+    y5 =-4000                             # basin-bottom (@ centre)  
+    y6 =-25000                            # lyr1-bottom 
+    y7 =-z_extent                         # bottom
+
+    # Save in dictionary as a list of tuples for each domain, from left-top corner, counterclockwise.
+    coord = {                                 
+            'air'  : ([x0, y0, 0],    # 0: left  , top
+                        [x0, y1, 0],    # 1: left  , bottom (@ air/earth)
+                        [x4, y1, 0],    # 2: right , bottom (@ air/earth)
+                        [x4, y0, 0]),   # 3: right , top
+                                        
+            'lyr1' : ([x0, y1, 0],    # 0: left  , top    
+                        [x0, y2, 0],    # 1: left  , bottom 
+                        [x1, y2, 0],    # 2: right , bottom (@ slab/basin)
+                        [x4, y2, 0],    # 3: right , bottom (@ boundary)
+                        [x4, y1, 0]),   # 4: right , top 
+                                            
+            'slab' : ([x0, y2, 0],    # 0: left  , top    
+                        [x0, y4, 0],    # 1: left  , bottom 
+                        [x1, y4, 0],    # 2: right , bottom (@ slab/basin)
+                        [x1, y2, 0]),   # 3: right , top    (@ slab/basin)
+                                    
+            'basin': ([x1, y2, 0],    # 0: left  , top    (@ slab/basin)
+                        [x1, y4, 0],    # 1: left  , centre (@ slab/basin) 
+                        [x1, y5, 0],    # 2: left  , bottom (@ lyr1/basin) 
+                        [x2, y5, 0],    # 3: centre, bottom (@ lyr1/basin)        
+                        [x3, y3, 0],    # 4: edge  , bottom (@ lyr1/basin)
+                        [x4, y3, 0],    # 5: right , bottom (@ boundary)
+                        [x4, y2, 0]),   # 6: right , top
+                                    
+            'lyr2' : ([x0, y4, 0],    # 0: left  , top    
+                        [x0, y6, 0],    # 1: left  , bottom 
+                        [x4, y6, 0],    # 2: right , bottom 
+                        [x4, y3, 0],    # 3: right , top    (@ basin/boundary)
+                        [x3, y3, 0],    # 4: edge  , top    (@ lyr2/basin)
+                        [x2, y5, 0],    # 5: centre, top    (@ lyr2/basin)
+                        [x1, y5, 0],    # 6: left  , top    (@ lyr2/basin)
+                        [x1, y4, 0]),   # 7: left  , centre (@ slab/basin)
+                                    
+            'lyr3' : ([x0, y6, 0],    # 0: left  , top    
+                        [x0, y7, 0],    # 1: left  , bottom 
+                        [x4, y7, 0],    # 2: right , bottom 
+                        [x4, y6, 0]),   # 3: right , top                   
+            }
+    #___________________________________________________________________________
+
+
+    #---------------------------------------------------------------------------
+    # Setup the COMMEMI-4 mesh.
+    #---------------------------------------------------------------------------
+
+    # This creates the mesh and saves it to the output folder.
+    return setupMesh("TE", coord, elem_sizes)
+    #___________________________________________________________________________
+
+
+
+# ==========================================================
+# ==========================================================
+
+class Test_COMMEMI4(unittest.TestCase):
+    @unittest.skipIf(not HAVE_FINLEY, "Test requires finley to be available")
+    @unittest.skipIf(not escript.getEscriptParamInt("PASO_DIRECT"), "Missing direct solvers")
+    @unittest.skipIf(escript.getMPISizeWorld() > 1, "Direct solvers and MPI are currently incompatible")
+    def test_comm4(self):
+        # ---
+        # Initialisations
+        # ---
+
+        # Get timing:
+        startTime = datetime.datetime.now()
+
+        # Mode (TE includes air-layer, whereas TM does not):
+        mode = 'TE'
+
+        # Read the mesh file and define the 'finley' domain: 
+        #mesh_file = "mesh/commemi-4/commemi4_tm.msh"
+        #domain = finley.ReadGmsh(mesh_file, numDim=2)
+        domain = generateCommemi4Mesh()
+
+        #mesh_file = "mesh/commemi-4/commemi4_tm.fly"
+        #domain = finley.ReadMesh(mesh_file)
+
+        # Sounding frequencies (in Hz):
+        freq_def = {"high":1.0e+0,"low":1.0e-0,"step":1}
+        # Frequencies will be mapped on a log-scale from
+        # 'high' to 'low' with 'step' points per decade.
+        # (also only one frequency must be passed via dict)
+
+        # Step sizes for sampling along vertical and horizontal axis (in m):
+        xstep=100
+        zstep=100
+
+
+
+        # ---
+        # Resistivity model
+        # ---
+
+        # Resistivity values assigned to tagged regions (in Ohm.m):
+        rho  = [
+                1.0e+14, # 0: air     1.0e-30
+                25.0   , # 1: lyr1    0.04
+                10.0   , # 2: slab    0.1
+                2.5    , # 3: basin   0.4
+                1000.0 , # 4: lyr2    0.001
+                5.0      # 5: lyr3    0.2
+            ]
+
+        # Tags must match those in the file:
+        tags = ["air", "lyr1", "slab", "basin", "lyr2", "lyr3"]
+
+        # Optional user defined map of resistivity:
+        def f4(x,z,r): return escript.sqrt(escript.sqrt(x*x+z*z))/r
+        maps = [None, None, None, None, f4, None]
+
+
+
+        # ---
+        # Layer definitions for 1D response at boundaries.
+        # ---
+
+        # List with resistivity values for left and right boundary.
+        rho_1d_left  = [ rho[0], rho[1], rho[2], rho[4], rho[5] ]
+        rho_1d_rght  = [ rho[0], rho[1], rho[3], rho[4], rho[5] ]
+
+        # Associated interfaces for 1D response left and right (must match the mesh file).
+        ifc_1d_left = [ 50000, 0, -500, -2000, -25000, -50000]
+        ifc_1d_rght = [ 50000, 0, -500, -1000, -25000, -50000]
+
+        # Save in dictionary with layer interfaces and resistivities left and right:
+        ifc_1d = {"left":ifc_1d_left , "right":ifc_1d_rght}
+        rho_1d = {"left":rho_1d_left , "right":rho_1d_rght}
+
+
+
+        # ---
+        # Adjust parameters here for TM mode
+        # ---
+
+        # Simply delete first element from lists:
+        if mode.upper() == 'TM':
+            tags.pop(0)
+            rho.pop(0)
+            rho_1d['left'].pop(0)
+            rho_1d['right'].pop(0)
+            ifc_1d['left'].pop(0)
+            ifc_1d['right'].pop(0)
+            if maps is not None:
+                maps.pop(0)
+
+
+
+        # ---
+        # Run MT_2D
+        # ---
+
+        # Class options:
+        mt2d.MT_2D._solver = "DIRECT" #"ITERATIVE" #"CHOLEVSKY" #"CGLS " #"BICGSTAB" #"DIRECT" "ITERATIVE"
+        mt2d.MT_2D._debug   = False
+
+        # Instantiate an MT_2D object with required & optional parameters:
+        obj_mt2d = mt2d.MT_2D(domain, mode, freq_def, tags, rho, rho_1d, ifc_1d,
+                xstep=xstep ,zstep=zstep, maps=None, plot=False)
+
+        # Solve for fields, apparent resistivity and phase:
+        mt2d_fields, arho_2d, aphi_2d = obj_mt2d.pdeSolve()
+        
+        
+        #import random
+
+        #mt2d_fields[0]['real']+=random.random()
+        #mt2d_fields[0]['imag']+=50*random.random()
+
+        #print(arho_2d[0][0])
+        #for i in range(len(aphi_2d[0])):
+            #aphi_2d[0][i]+=(50*random.random())
+
+        #for i in range(len(arho_2d[0])):
+            #arho_2d[0][i]-=7*(random.random())    
+        
+
+        # ---
+        # User defined plots
+        # ---
+
+        from scipy.interpolate import InterpolatedUnivariateSpline
+
+        # Setup abscissas/Ordinates for escript data:
+        x  = numpy.array( obj_mt2d.loc.getX() )[:,0]
+        y0 = numpy.array( obj_mt2d.loc.getValue(arho_2d[0]) )
+        y1 = numpy.array( obj_mt2d.loc.getValue(aphi_2d[0]) )
+
+        # Zhdanov et al, 1997, -- Model 2D-1 Table B.33. Model2D-4 (T=1.0, z=0), see 
+        # "Methods for modelling electromagnetic fields. Results from COMMEMI -- the
+        # international project on the comparison of modelling results for electromag-
+        # netic induction", Journal of Applied Geophysics, 133-271
+        rte = [12.70, 12.00, 8.80, 6.84, 6.67, 6.25] # TE rho_a (3 Canada)   
+        rtm = [11.40, 11.50, 9.03, 6.78, 6.80, 5.71] # TM rho_a (3 Canada) 
+        if mode.lower() == 'te':
+            ra = rte
+        else:
+            ra = rtm  
+        # Associated stations shifted to match escript coordinates:
+        xs = numpy.array( [-10, -7, -6, -5, 2, 5] )*1000 + x.max()/2.0
+
+        # Setup interpolation to get values at specified stations (for comparison):
+        fi = InterpolatedUnivariateSpline(x, y0) 
+        # Save esscript values at comparison points in text file:
+        # uncomment to investigate
+        #numpy.savetxt("mesh/commemi-4/commemi4_"+mode.lower()+".dat", numpy.column_stack((xs,fi(xs))), fmt='%g')
+
+        # X plot-limits:
+        x0lim = [2000,38000]
+        #y1lim = [0,120]
+        #y2lim = [40,85]
+        
+        # Plot labels:
+        title = '    escript COMMEMI-4 MT-2D ' + '(' + mode.upper() + ')' + ' freq: ' + str(obj_mt2d.frequencies[0]) + ' Hz'
+        ylbl0 = r'resistivity $(\Omega m)$'
+        ylbl1 = r'phase $(\circ)$'
+        xlbl1 = 'X (m)'
+        if HAVE_MPL:
+            # Setup the plot window with app. res. on top and phase on bottom:
+            f, ax = pyplot.subplots(2, figsize=(3.33,3.33), dpi=1200,
+                    facecolor='w', edgecolor='k', sharex=True) # Mind shared axis
+            f.subplots_adjust(hspace=0.1, top=0.95, left=0.135, bottom=0.125, right=0.975)  
+            f.suptitle(title, y=0.99,fontsize=8) # 
+                
+            # Top: apparent resistivity and points from Weaver for comparison:
+            ax[0].plot(x, y0, color='red',  label = 'escript')
+            ax[0].plot(xs,ra, linestyle='', markersize=3, marker='o',
+                    color='blue', label = 'Weaver') 
+            ax[0].grid(b=True, which='both', color='grey',linestyle=':')
+            ax[0].set_ylabel( ylbl0)
+            ax[0].yaxis.set_label_coords(-0.082, 0.5)
+            # Plot limits:
+            ax[0].set_xlim(x0lim)      
+            #ax[0].set_ylim(y1lim)    
+
+            # Bottom: phase on linear plot
+            ax[1].plot(x,y1, color='blue')
+            ax[1].grid(b=True, which='both', color='grey',linestyle=':')
+            ax[1].set_xlabel( xlbl1 )
+            ax[1].set_ylabel( ylbl1 )
+            # Plot limits:
+            ax[1].set_xlim(x0lim)      
+            #ax[1].set_ylim(y2lim)     
+
+            # ask matplotlib for the plotted objects and their labels
+            lna, la = ax[0].get_legend_handles_labels()
+            ax[0].legend(lna, la, bbox_to_anchor=(0.02, 0.325), loc=2,
+                    borderaxespad=0.,prop={'size':8}, frameon=False)
+
+            pyplot.ticklabel_format(style='sci', axis='x', scilimits=(0,0),
+                    useMathText=True)
+            ax[0].xaxis.major.formatter._useMathText = True
+            pyplot.rc('font', **{'size': 8,'family':'sans-serif'})
+        #uncomment to allow visual inspection
+        #f.savefig("mesh/commemi-4/commemi4_"+mode.lower()+".png", dpi=1200)
+
+        # Now let's see if the points match
+        # First, we need to find correspondance between xs and x
+        indices=[]
+        for i in range(len(xs)):
+            mindiff=40000
+            mindex=0
+            for j in range(len(x)):
+                if abs(xs[i]-x[j]) < mindiff: 
+                    mindiff=abs(xs[i]-x[j])
+                    mindex=j
+            indices.append(mindex)
+            
+        # The following are very simple checks based on the visual shape of the correct result        
+        maxdiff=0
+        for i in range(len(indices)):
+            if abs(y0[indices[i]]-ra[i])>maxdiff:
+                maxdiff=abs(y0[indices[i]]-ra[i])
+            
+        if maxdiff>5:           #Threshold is pretty arbitrary
+            raise RuntimeError("Mismatch with reference data")
+
+        c=0
+        for y in y1:
+            if y<46:
+                c+=1
+
+        if not (62 < escript.Lsup(y1) < 64):
+            raise RuntimeError("Peak of bottom plot is off.")
+            
+        if not (0.62 < c/len(y1) < 0.65):
+            print(c/len(y1))
+            raise RuntimeError("Bottom plot has too many high points")
+
+        #
+        print("Runtime:", datetime.datetime.now()-startTime)
+        print("Done!")
+
+
+if __name__ == '__main__':
+    run_tests(__name__, exit_on_failure=True)
diff --git a/downunder/test/python/run_coordinates.py b/downunder/test/python/run_coordinates.py
index e30c401..1c97fb7 100644
--- a/downunder/test/python/run_coordinates.py
+++ b/downunder/test/python/run_coordinates.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_datasources.py b/downunder/test/python/run_datasources.py
index e209b32..c154082 100644
--- a/downunder/test/python/run_datasources.py
+++ b/downunder/test/python/run_datasources.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_dcforward.py b/downunder/test/python/run_dcforward.py
index e65cae4..119372e 100644
--- a/downunder/test/python/run_dcforward.py
+++ b/downunder/test/python/run_dcforward.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 from esys.downunder import *
 from esys.escript import *
 from esys.escriptcore.testing import *
@@ -31,7 +32,7 @@ except KeyError:
     WORKDIR='.'
 
 try:
-    from esys.finley import Rectangle, Brick
+    from esys.finley import Rectangle, Brick, ReadGmsh
     HAVE_FINLEY = True
 except ImportError:
     HAVE_FINLEY = False
@@ -59,34 +60,35 @@ class TestDCResistivityForward(unittest.TestCase):
                         secondaryConductivity, current, a, start,
                         directionVector, numElectrodes))
 
+    @unittest.skipIf(not HAVE_GMSH, "gmsh not available")
     def test_getpotential3dPolePole(self):
         structured=False
         if structured:
             extents=[1000,1000,1000]
             dom=Brick(50,50,50,l0=extents[0],l1=extents[1],l2=-extents[2])
         else:
-            if not HAVE_GMSH:
-                raise unittest.SkipTest("gmsh required for test")
-            lc=30.0
-            bufferThickness=3000
             extents=[1000,2000,2000]
-            electrodeLst=[]
-            lcDiv=10
-            electrodeLst.append(("e1",[-0.4*extents[0], 0, 0,lc/lcDiv]))
-            electrodeLst.append(("e2",[-0.2*extents[0], 0, 0,lc/lcDiv]))
-            electrodeLst.append(("e3",[0.2*extents[0], 0, 0,lc/lcDiv]))
-            electrodeLst.append(("e4",[0.4*extents[0], 0, 0,lc/lcDiv]))
-            runName=os.path.join(WORKDIR, "dcResPolePole%d-%d"%(lc,lc/lcDiv))
-            domGen=DCResDomGenerator(extents, electrodeLst,lc=lc,tmpDir=WORKDIR,bufferThickness=bufferThickness,prism=None)
-            dom = domGen.getDom(mshName=runName+".msh")
-            if mpirank==0: 
-                os.unlink(runName+".msh")
+            tags=[]
+            points=[]
+            tags.append("e1")
+            tags.append("e2")
+            tags.append("e3")
+            tags.append("e4")
+            points.append([-0.4*extents[0], 0, 0])
+            points.append([-0.2*extents[0], 0, 0])
+            points.append([0.2*extents[0], 0,  0])
+            points.append([0.4*extents[0], 0,  0])
+            verbosity = 3 
+            filename  = os.path.join(TEST_DATA_ROOT, "pole.geo")
+            meshname  = os.path.join(TEST_DATA_ROOT, "dcResPolePole50-5.msh")
+            gmshGeo2Msh(filename, meshname, 3, 1, verbosity)
+            dom = ReadGmsh(meshname, 3, diracTags=tags, diracPoints=points)
         totalApparentRes = 130.
         primaryConductivity=Scalar(1/100., ContinuousFunction(dom))
         secondaryConductivity=Scalar(1/130., ContinuousFunction(dom))
         current=1000.
         a=4*0.05*extents[0]
-        midPoint = [0,0]
+        midPoint = [0.5*extents[0],0.5*extents[1]]
         directionVector=[1.,0.]
         numElectrodes = 4
 
@@ -95,11 +97,10 @@ class TestDCResistivityForward(unittest.TestCase):
         totalApparentResList = pps.getApparentResistivityTotal()
         for i in totalApparentResList:
             res_a = abs(i-totalApparentRes)
-            res_b = 0.075 * totalApparentRes
+            res_b = 0.05 * totalApparentRes
             self.assertLess(res_a, res_b, "result of %g greater than tolerance of %g"%(res_a, res_b))
 
     def test_getPotential3dSchlumberger(self):
-        structured=False
         numElectrodes = 12
         directionVector=[1.,0.]
         midPoint=[]
@@ -107,37 +108,30 @@ class TestDCResistivityForward(unittest.TestCase):
         current=0.5
         interval_a = 5
         interval_n = 5
-
-
-        if structured:
-            #does not work because finley does not allow the specification of domain origin
-            extents=[200,200,200]
-            dom=Brick(25,25,25,l0=(-extents[0]/2,extents[0]/2),l1=(-extents[1]/2,extents[1]/2),l2=-extents[2])
-            midPoint = [0,0]
-        else:
-            if not HAVE_GMSH:
-                raise unittest.SkipTest("gmsh required for test")
-            lc=10.0
-            bufferThickness=100
-            extents=[200,200,200]
-            midPoint = [0,0]
-            lcDiv=10.0
-            electrodes=[]
-            start=[]
-            start.append(midPoint[0] - (((numElectrodes-1)*interval_a)/2. * directionVector[0]))
-            start.append(midPoint[1] - (((numElectrodes-1)*interval_a)/2. * directionVector[1]))
-            electrodeTags=[]
-            electrodeLst=[]
-            for i in range(numElectrodes):
-                electrodes.append([start[0]+(directionVector[0]*i*interval_a), start[1]+(directionVector[1]*i*interval_a),0])
-                electrodeTags.append("e%d"%i)
-                electrodeLst.append([electrodeTags[i]],[electrodes[i][0], electrodes[i][1], electrodes[i][2], lc/lcDiv])
-            runName=os.path.join(WORKDIR, "dcResSchlum%d-%d"%(lc,lc/lcDiv))
-            domGen=DCResDomGenerator(extents, electrodeLst,lc=lc,tmpDir=WORKDIR,bufferThickness=bufferThickness,prism=None)
-            dom = domGen.getDom(mshName=runName+".msh",fieldSize=[70,100])
-            fn = domGen.getFileName()
-            if mpirank==0: 
-                os.unlink(runName+".msh")
+        if not HAVE_GMSH:
+            raise unittest.SkipTest("gmsh required for test")
+        lc=10.0
+        bufferThickness=100
+        extents=[200,200,200]
+        midPoint = [0,0]
+        lcDiv=10.0
+        electrodes=[]
+        start=[]
+        start.append(midPoint[0] - (((numElectrodes-1)*interval_a)/2. * directionVector[0]))
+        start.append(midPoint[1] - (((numElectrodes-1)*interval_a)/2. * directionVector[1]))
+        electrodeTags=[]
+        electrodeLst=[]
+        for i in range(numElectrodes):
+            electrodes.append([start[0]+(directionVector[0]*i*interval_a), start[1]+(directionVector[1]*i*interval_a),0])
+            electrodeTags.append("e%d"%i)
+        runName=os.path.join(WORKDIR, "dcResSchlum%d-%d"%(lc,lc/lcDiv))
+        filename  = os.path.join(TEST_DATA_ROOT, "schlum.geo")
+        meshname  = os.path.join(TEST_DATA_ROOT, "dcResSchlum10-1.msh")
+        verbosity=3
+        gmshGeo2Msh(filename, meshname, 3, 1, verbosity)
+        dom = ReadGmsh(meshname, 3, diracTags=electrodeTags, diracPoints=electrodes)
+        if mpirank==0: 
+            os.unlink(meshname)
             
         primaryConductivity=Scalar(1/100., ContinuousFunction(dom))
         secondaryConductivity=Scalar(1/130., ContinuousFunction(dom))    
@@ -149,7 +143,7 @@ class TestDCResistivityForward(unittest.TestCase):
         for i in totalApparentRes:
             for j in i:
                 res_a = abs(j-totalApparentResVal)
-                res_b = 0.05 * totalApparentResVal
+                res_b = 0.1 * totalApparentResVal
                 self.assertLess(res_a, res_b, "result of %g greater than tolerance of %g"%(res_a, res_b))
 
     def test_getPotentialDipDip(self):
@@ -164,25 +158,40 @@ class TestDCResistivityForward(unittest.TestCase):
             lc=10.0
             bufferThickness=300
             extents=[100,100,100]
-            electrodeLst=[]
+            electrodes=[]
+            tags=[]
             lcDiv=10
-            electrodeLst.append(("e0" , [-22.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e1" , [-18.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e2" , [-14.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e3" , [-10.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e4" , [-6.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e5" , [-2.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e6" , [ 2.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e7" , [ 6.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e8" , [ 10.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e9" , [ 14.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e10", [ 18.0, 0.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e11", [ 22.0, 0.0, 0, lc/lcDiv]))
-            runName=os.path.join(WORKDIR, "dcResdipdip%d-%d"%(lc,lc/lcDiv))
-            domGen=DCResDomGenerator(extents, electrodeLst,lc=lc,tmpDir=WORKDIR,bufferThickness=bufferThickness,prism=None)
-            dom = domGen.getDom(mshName=runName+".msh",reUse=False)
+            tags.append("e0" )
+            tags.append("e1" )
+            tags.append("e2" )
+            tags.append("e3" )
+            tags.append("e4" )
+            tags.append("e5" )
+            tags.append("e6" )
+            tags.append("e7" )
+            tags.append("e8" )
+            tags.append("e9" )
+            tags.append("e10")
+            tags.append("e11")
+            electrodes.append([-22.0, 0.0, 0])
+            electrodes.append([-18.0, 0.0, 0])
+            electrodes.append([-14.0, 0.0, 0])
+            electrodes.append([-10.0, 0.0, 0])
+            electrodes.append([-6.0, 0.0, 0])
+            electrodes.append([-2.0, 0.0, 0])
+            electrodes.append([ 2.0, 0.0, 0])
+            electrodes.append([ 6.0, 0.0, 0])
+            electrodes.append([ 10.0, 0.0, 0])
+            electrodes.append([ 14.0, 0.0, 0])
+            electrodes.append([ 18.0, 0.0, 0])
+            electrodes.append([ 22.0, 0.0, 0])
+            filename  = os.path.join(TEST_DATA_ROOT, "dip.geo")
+            meshname  = os.path.join(TEST_DATA_ROOT, "dcResdipdip-1.msh")
+            verbosity=3
+            gmshGeo2Msh(filename, meshname, 3, 1, verbosity)
+            dom = ReadGmsh(meshname, 3, diracTags=tags, diracPoints=electrodes)
             if mpirank==0: 
-                os.unlink(runName+".msh")
+                os.unlink(meshname)
         n=5
         totalApparentResVal = 130.
         primaryConductivity=Scalar(1/100., ContinuousFunction(dom))
@@ -201,48 +210,17 @@ class TestDCResistivityForward(unittest.TestCase):
         for i in totalApparentRes:
             for j in i:
                 res_a = abs(j-totalApparentResVal)
-                res_b = 0.075 * totalApparentResVal
+                res_b = 0.1 * totalApparentResVal
                 self.assertLess(res_a, res_b, "result of %g greater than tolerance of %g"%(res_a, res_b))
 
     def test_getPotentialWenner(self):
-        structured=True
         totalApparentResVal = 130.
-        if structured:
-            extents=[100,100,100]
-            dom=Brick(50,50,50,l0=extents[0],l1=extents[1],l2=-extents[2])
-        else:
-            if not HAVE_GMSH:
-                raise unittest.SkipTest("gmsh required for test")
-            lc=50.0
-            bufferThickness=3000
-            extents=[1000,2000,2000]
-            electrodeLst=[]
-            lcDiv=10
-
-            electrodeLst.append(("e0"  , [28.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e1"  , [32.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e2"  , [36.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e3"  , [40.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e4"  , [44.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e5"  , [48.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e6"  , [52.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e7"  , [56.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e8"  , [60.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e9"  , [64.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e10" , [68.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e11" , [72.0, 48.0, 0, lc/lcDiv]))
-
-            domGen=DCResDomGenerator(extents, electrodeLst,lc=lc,tmpDir=WORKDIR,bufferThickness=bufferThickness,prism=None)
-            runName=os.path.join(WORKDIR, "wenner%d-%d"%(lc,lc/lcDiv))
-            dom = domGen.getDom(mshName=runName+".msh")
-            if mpirank==0: 
-                os.unlink(runName+".msh")
-        totalApparentRes = 130.
+        extents=[100,100,100]
+        dom=Brick(50,50,50,l0=extents[0],l1=extents[1],l2=-extents[2])
         primaryConductivity=Scalar(1/100., ContinuousFunction(dom))
         secondaryConductivity=Scalar(1/130., ContinuousFunction(dom))
         current=1000.
         numElectrodes = 8
-        # a=(.8*extents[0])/numElectrodes
         a=2
         midPoint = [0.5*extents[0]+1,0.5*extents[1]]
         directionVector=[1.,0.]
@@ -254,7 +232,7 @@ class TestDCResistivityForward(unittest.TestCase):
         totalApparentRes=wenSurv.getApparentResistivityTotal()
         for i in totalApparentRes:
             res_a = abs(i-totalApparentResVal)
-            res_b = 0.05 * totalApparentResVal
+            res_b = 0.1 * totalApparentResVal
             self.assertLess(res_a, res_b, "result of %g greater than tolerance of %g"%(res_a, res_b))
 
     def test_getPotentialPolDip(self):
@@ -266,28 +244,42 @@ class TestDCResistivityForward(unittest.TestCase):
         else:
             if not HAVE_GMSH:
                 raise unittest.SkipTest("gmsh required for test")
-            lc=10.0
-            bufferThickness=300
+
             extents=[100,100,100]
-            electrodeLst=[]
-            lcDiv=10
-            electrodeLst.append(("e0"  , [28.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e1"  , [32.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e2"  , [36.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e3"  , [40.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e4"  , [44.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e5"  , [48.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e6"  , [52.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e7"  , [56.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e8"  , [60.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e9"  , [64.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e10" , [68.0, 48.0, 0, lc/lcDiv]))
-            electrodeLst.append(("e11" , [72.0, 48.0, 0, lc/lcDiv]))
-            runName=os.path.join(WORKDIR, "dcRespoldip%d-%d"%(lc,lc/lcDiv))
-            domGen=DCResDomGenerator(extents, electrodeLst,lc=lc,tmpDir=WORKDIR,bufferThickness=bufferThickness,prism=None)
-            dom = domGen.getDom(mshName=runName+".msh")
+            electrodes=[]
+            tags=[]
+            tags.append("e0" )
+            tags.append("e1" )
+            tags.append("e2" )
+            tags.append("e3" )
+            tags.append("e4" )
+            tags.append("e5" )
+            tags.append("e6" )
+            tags.append("e7" )
+            tags.append("e8" )
+            tags.append("e9" )
+            tags.append("e10")
+            tags.append("e11")
+            electrodes.append([-22.0, 0.0, 0])
+            electrodes.append([-18.0, 0.0, 0])
+            electrodes.append([-14.0, 0.0, 0])
+            electrodes.append([-10.0, 0.0, 0])
+            electrodes.append([-6.0, 0.0, 0])
+            electrodes.append([-2.0, 0.0, 0])
+            electrodes.append([ 2.0, 0.0, 0])
+            electrodes.append([ 6.0, 0.0, 0])
+            electrodes.append([ 10.0, 0.0, 0])
+            electrodes.append([ 14.0, 0.0, 0])
+            electrodes.append([ 18.0, 0.0, 0])
+            electrodes.append([ 22.0, 0.0, 0])
+            filename  = os.path.join(TEST_DATA_ROOT, "dip.geo")
+            meshname  = os.path.join(TEST_DATA_ROOT, "dcRespoldip10-1.msh")
+            verbosity=3
+            gmshGeo2Msh(filename, meshname, 3, 1, verbosity)
+            dom = ReadGmsh(meshname, 3, diracTags=tags, diracPoints=electrodes)
+
             if mpirank==0: 
-                os.unlink(runName+".msh")
+                os.unlink(meshname)
         n=5
         totalApparentResVal = 130.
         primaryConductivity   =  Scalar(1/100., ContinuousFunction(dom))
@@ -296,7 +288,7 @@ class TestDCResistivityForward(unittest.TestCase):
         numElectrodes = 12
         # a=(.8*extents[0])/numElectrodes
         a=4
-        midPoint = [0.5*extents[0],0.5*extents[1] - 2]
+        midPoint = [0,0]
         directionVector=[1.,0.]
         poldips=PoleDipoleSurvey(dom, primaryConductivity,
                 secondaryConductivity, current, a,n, midPoint,
@@ -308,7 +300,7 @@ class TestDCResistivityForward(unittest.TestCase):
         for i in totalApparentRes:
             for j in i:
                 res_a = abs(j-totalApparentResVal)
-                res_b = 0.075 * totalApparentResVal
+                res_b = 0.1 * totalApparentResVal
                 self.assertLess(res_a, res_b, "result of %g greater than tolerance of %g"%(res_a, res_b))
 
 ################################
diff --git a/downunder/test/python/run_domainbuilder.py b/downunder/test/python/run_domainbuilder.py
index 0d69068..f941bb2 100644
--- a/downunder/test/python/run_domainbuilder.py
+++ b/downunder/test/python/run_domainbuilder.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_forward.py b/downunder/test/python/run_forward.py
index b1a1ee3..18e6f32 100644
--- a/downunder/test/python/run_forward.py
+++ b/downunder/test/python/run_forward.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_gravity.py b/downunder/test/python/run_gravity.py
index 911ff25..ff80ac2 100644
--- a/downunder/test/python/run_gravity.py
+++ b/downunder/test/python/run_gravity.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """2D gravity inversion example using synthetic data"""
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
diff --git a/downunder/test/python/run_inversion_gravmag_2d.py b/downunder/test/python/run_inversion_gravmag_2d.py
index 89d6391..74f0b85 100644
--- a/downunder/test/python/run_inversion_gravmag_2d.py
+++ b/downunder/test/python/run_inversion_gravmag_2d.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """2D magnetic/gravity joint inversion example using synthetic data"""
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
diff --git a/downunder/test/python/run_inversioncostfunction.py b/downunder/test/python/run_inversioncostfunction.py
index 0ddc267..a875cbd 100644
--- a/downunder/test/python/run_inversioncostfunction.py
+++ b/downunder/test/python/run_inversioncostfunction.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2012-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_magnetic.py b/downunder/test/python/run_magnetic.py
index ff1ec72..fe2d2e5 100644
--- a/downunder/test/python/run_magnetic.py
+++ b/downunder/test/python/run_magnetic.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -16,6 +15,8 @@ from __future__ import print_function
 
 """2D magnetic inversion example using synthetic data"""
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_mappings.py b/downunder/test/python/run_mappings.py
index 22b89f9..466aa0f 100644
--- a/downunder/test/python/run_mappings.py
+++ b/downunder/test/python/run_mappings.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2012-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_minimizers.py b/downunder/test/python/run_minimizers.py
index aee9a2d..f592af8 100644
--- a/downunder/test/python/run_minimizers.py
+++ b/downunder/test/python/run_minimizers.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -79,7 +81,7 @@ class TestMinimizerLBFGS(unittest.TestCase):
 
     def test_callback(self):
         n=[0]
-        def callback(k, x, fg, gf):
+        def callback(**args):
             n[0]=n[0]+1
         self.minimizer.setCallback(callback)
         self.minimizer.setTolerance(1e-8)
@@ -115,7 +117,7 @@ class TestMinimizerBFGS(unittest.TestCase):
 
     def test_callback(self):
         n=[0]
-        def callback(k, x, fg, gf):
+        def callback(**args):
             n[0]=n[0]+1
         self.minimizer.setCallback(callback)
         self.minimizer.setTolerance(1e-10)
@@ -150,7 +152,7 @@ class TestMinimizerNLCG(unittest.TestCase):
 
     def test_callback(self):
         n=[0]
-        def callback(k, x, fg, gf):
+        def callback(**args):
             n[0]=n[0]+1
         self.minimizer.setCallback(callback)
         self.minimizer.setTolerance(1e-10)
diff --git a/downunder/test/python/run_regularization.py b/downunder/test/python/run_regularization.py
index 0da1694..811f047 100644
--- a/downunder/test/python/run_regularization.py
+++ b/downunder/test/python/run_regularization.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2012-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/downunder/test/python/run_seismic.py b/downunder/test/python/run_seismic.py
index 5785ed6..1d6493c 100644
--- a/downunder/test/python/run_seismic.py
+++ b/downunder/test/python/run_seismic.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/benchmarks/dudleybench.py b/dudley/benchmarks/dudleybench.py
index cebff36..2998c59 100644
--- a/dudley/benchmarks/dudleybench.py
+++ b/dudley/benchmarks/dudleybench.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/dudley/benchmarks/runbenchmark.py b/dudley/benchmarks/runbenchmark.py
index 112403c..e136676 100755
--- a/dudley/benchmarks/runbenchmark.py
+++ b/dudley/benchmarks/runbenchmark.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/dudley/py_src/__init__.py b/dudley/py_src/__init__.py
index 5f5152b..fb38930 100644
--- a/dudley/py_src/__init__.py
+++ b/dudley/py_src/__init__.py
@@ -17,6 +17,8 @@
 """A domain meshed with triangles or tetrahedra only. Imports submodules into its namespace
 """
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -24,10 +26,10 @@ __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
-import esys.escript		# This is just to ensure required libraries are loaded
-import esys.pasowrap	#if you don't import this, you won't be able to see methods not in AbstractSystemmatrix
+import esys.escript     # This is just to ensure required libraries are loaded
+import esys.pasowrap    #if you don't import this, you won't be able to see methods not in AbstractSystemmatrix
 from .dudleycpp import *
 from .factorywrappers import *
 from .readers import *
 
-__nodocorecursion=['dudleycpp', 'readers', 'factorywrappers']
\ No newline at end of file
+__nodocorecursion=['dudleycpp', 'readers', 'factorywrappers']
diff --git a/dudley/py_src/factorywrappers.py b/dudley/py_src/factorywrappers.py
index 0ec2adc..47a7cde 100644
--- a/dudley/py_src/factorywrappers.py
+++ b/dudley/py_src/factorywrappers.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2014-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -35,7 +37,6 @@ def Rectangle(n0=1, n1=1, order=1, l0=1.0, l1=1.0, periodic0=False, periodic1=Fa
     args=[n0, n1, order, l0, l1, periodic0, periodic1, integrationOrder, 
       reducedIntegrationOrder, faceon, useFullElementOrder, optimize];
     if 'escriptworld' in kwargs:
-      print (kwargs)
       args+=[kwargs['escriptworld']]
     else:
       args+=[None]
diff --git a/dudley/py_src/readers.py b/dudley/py_src/readers.py
index eb3f772..76f18a7 100644
--- a/dudley/py_src/readers.py
+++ b/dudley/py_src/readers.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/src/CPPAdapter/MeshAdapter.cpp b/dudley/src/CPPAdapter/MeshAdapter.cpp
index 3d858dd..60c943f 100644
--- a/dudley/src/CPPAdapter/MeshAdapter.cpp
+++ b/dudley/src/CPPAdapter/MeshAdapter.cpp
@@ -1701,7 +1701,7 @@ bool MeshAdapter::operator!=(const AbstractDomain& other) const
    return !(operator==(other));
 }
 
-int MeshAdapter::getSystemMatrixTypeId(const bp::object& options) const
+int MeshAdapter::getSystemMatrixTypeId(const boost::python::object& options) const
 {
     const escript::SolverBuddy& sb = bp::extract<escript::SolverBuddy>(options);
 
@@ -2027,7 +2027,8 @@ bool MeshAdapter::supportsContactElements() const
 }
 
 escript::Data MeshAdapter::randomFill(const escript::DataTypes::ShapeType& shape,
-       const escript::FunctionSpace& what, long seed, const bp::tuple& filter) const
+       const escript::FunctionSpace& what, long seed,
+       const boost::python::tuple& filter) const
 {
     Data towipe(0, shape, what, true);
     // since we just made this object, no sharing is possible and we don't need to check for
diff --git a/dudley/src/CPPAdapter/MeshAdapter.h b/dudley/src/CPPAdapter/MeshAdapter.h
index b662922..399ec3f 100644
--- a/dudley/src/CPPAdapter/MeshAdapter.h
+++ b/dudley/src/CPPAdapter/MeshAdapter.h
@@ -477,12 +477,11 @@ class MeshAdapter : public escript::AbstractContinuousDomain
 
   /**
      \brief
-     return the identifier of the matrix type to be used for the global stiffness matrix when a particular solver, package, perconditioner,
+     return the identifier of the matrix type to be used for the global
+     stiffness matrix when a particular solver, package, preconditioner,
      and symmetric matrix is used.
-     \param solver 
-     \param preconditioner
-     \param package
-     \param symmetry 
+     
+     \param options a SolverBuddy instance with the desired options set
   */
   DUDLEY_DLL_API
   virtual int getSystemMatrixTypeId(const boost::python::object& options) const;
diff --git a/dudley/src/CPPAdapter/dudleycpp.cpp b/dudley/src/CPPAdapter/dudleycpp.cpp
index bfa2962..9bf9f9c 100644
--- a/dudley/src/CPPAdapter/dudleycpp.cpp
+++ b/dudley/src/CPPAdapter/dudleycpp.cpp
@@ -106,16 +106,24 @@ BOOST_PYTHON_MODULE(dudleycpp)
 */      
 "Creates a tetrahedral mesh by subdividing n0 x n1 x n2 rectangular elements over the brick [0,l0] x [0,l1] x [0,l2]."
 "We accept floating point values for n0, n1 only to ease transition of scripts to python3 when the time comes."
-"\n\n:param n0:\n:type n0:\n:param n1:\n:type n1:\n:param n2:\n:type n2:\n"
+,"Creates a rectangular mesh with n0 x n1 x n2 elements over the brick [0,l0] x [0,l1] x [0,l2]."
+"\n\n:param n0: number of elements in direction 0\n:type n0: ``int``\n:param n1: number of elements in direction 1\n:type n1: ``int``\n"
+":param n2:number of elements in direction 2\n:type n2: ``int``\n"
 ":param order: =1, =-1 or =2 gives the order of shape function. If -1 macro elements of order 1 are used.\n"
-":param l0: length of side 0\n:param l1:\n:param l2:\n"
+":param l0: length of side 0\n"
+":type  l0: ``float``\n"
+":param l1: length of side 1\n"
+":type  l1: ``float``\n"
+":param l2: length of side 2\n"
+":type  l2: ``float``\n"
+":param periodic0: whether or not boundary conditions are periodic in direction 0\n:type periodic0: ``bool``\n"
+":param periodic1: whether or not boundary conditions are periodic in direction 1\n:type periodic1: ``bool``\n"
+":param periodic2: whether or not boundary conditions are periodic in direction 2\n:type periodic2: ``bool``\n"
 ":param integrationOrder: order of the quadrature scheme. If integrationOrder<0 the integration order is selected independently.\n"
 ":param reducedIntegrationOrder: order of the quadrature scheme. If reducedIntegrationOrder<0 the integration order is selected independently.\n"
-":param useElementsOnFace:  whether or not to use elements on face\n"
-":type useElementsOnFace: ``int``"
-":param periodic0:  whether or not boundary conditions are periodic\n"
-":param periodic1:\n:param periodic2:\n"
-":param useFullElementOrder:\n:param optimize:\n"
+":param useElementsOnFace:  Not used\n"
+":type useElementsOnFace: ``int``\n"
+":param useFullElementOrder: Whether or not to use Hex27 elements\n"":type useFullElementOrder: ``bool``\n"
 ":param optimize: Enable optimisation of node labels\n:type optimize: ``bool``"
 );
 
@@ -141,7 +149,7 @@ BOOST_PYTHON_MODULE(dudleycpp)
 ":type useElementsOnFace: ``int``"
 ":param periodic0:  whether or not boundary conditions are periodic\n"
 ":param periodic1:\n"
-":param useFullElementOrder:\n:param optimize:\n"
+":param useFullElementOrder: Not used: ``bool``\n"
 ":param useMacroElements: Enable the usage of first order macro elements.\n:type useMacroElements: ``bool``\n"
 ":param optimize: Enable optimisation of node labels\n:type optimize: ``bool``"
 );
diff --git a/dudley/src/generateReferenceElementList.py b/dudley/src/generateReferenceElementList.py
index a8232c7..5d1fe2d 100644
--- a/dudley/src/generateReferenceElementList.py
+++ b/dudley/src/generateReferenceElementList.py
@@ -11,7 +11,12 @@
 # Development 2012-2013 by School of Earth Sciences
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
-#******************************************************/
+#******************************************************
+
+
+from __future__ import print_function, division
+
+
 #
 #  this code generates the  Dudley_ReferenceElement_InfoList in ReferenceElements.c
 #
diff --git a/dudley/test/python/FCT_benchmark.py b/dudley/test/python/FCT_benchmark.py
index 2ed02fe..5d73a20 100755
--- a/dudley/test/python/FCT_benchmark.py
+++ b/dudley/test/python/FCT_benchmark.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/FCT_test1.py b/dudley/test/python/FCT_test1.py
index 3ce968b..ea1be79 100644
--- a/dudley/test/python/FCT_test1.py
+++ b/dudley/test/python/FCT_test1.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -97,7 +99,7 @@ fc.setInitialSolution(u0)
 t=T0
 print("QUALITY FCT: time = %s pi"%(t/pi),inf(u0),sup(u0),integrate(u0))
 while t<T_END:
-    print("time step t=",t+dt)	
+    print("time step t=",t+dt)
     u=fc.solve(dt, verbose=True)
     print("QUALITY FCT: time = %s pi"%(t+dt/pi),inf(u),sup(u),integrate(u))
     if TEST_SUPG:
diff --git a/dudley/test/python/FCT_test2.py b/dudley/test/python/FCT_test2.py
index e2a4914..f48ba30 100644
--- a/dudley/test/python/FCT_test2.py
+++ b/dudley/test/python/FCT_test2.py
@@ -13,7 +13,9 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -122,8 +124,8 @@ saveVTK("u.%s.vtu"%c,u=u0)
 fc.setInitialSolution(u0)
 t=T0
 while t<T_END:
-    print("time step t=",t+dt)	
-    u=fc.solve(dt)	
+    print("time step t=",t+dt)
+    u=fc.solve(dt)
     if TEST_SUPG:
         #========== supg tests ================
         nn=max(ceil(dt/dt_supg),1.)
diff --git a/dudley/test/python/OutTest.py b/dudley/test/python/OutTest.py
index 344d685..97759ac 100644
--- a/dudley/test/python/OutTest.py
+++ b/dudley/test/python/OutTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/PoissonSolverTest.py b/dudley/test/python/PoissonSolverTest.py
index 535e4a9..39d72f4 100644
--- a/dudley/test/python/PoissonSolverTest.py
+++ b/dudley/test/python/PoissonSolverTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/RT2D.py b/dudley/test/python/RT2D.py
index f803c97..45fb39f 100644
--- a/dudley/test/python/RT2D.py
+++ b/dudley/test/python/RT2D.py
@@ -1,3 +1,28 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+__copyright__="""Copyright (c) 2003-2015 by The University of Queensland
+http://www.uq.edu.au
+Primary Business: Queensland, Australia"""
+__license__="""Licensed under the Open Software License version 3.0
+http://www.opensource.org/licenses/osl-3.0.php"""
+__url__="https://launchpad.net/escript-finley"
+
 from esys.escript import *
 from esys.escript.models import StokesProblemCartesian
 from esys.dudley import Rectangle
@@ -5,10 +30,10 @@ from esys.weipa import saveVTK
 
 
 #physical properties
-rho1 = 1000		#fluid density on bottom
-rho2 = 1010		#fluid density on top
-eta1 = 100.0		#fluid viscosity on bottom
-eta2 = 100.0		#fluid viscosity on top
+rho1 = 1000             #fluid density on bottom
+rho2 = 1010             #fluid density on top
+eta1 = 100.0            #fluid viscosity on bottom
+eta2 = 100.0            #fluid viscosity on top
 g=10.0
 
 #solver settings
diff --git a/dudley/test/python/RecTest.py b/dudley/test/python/RecTest.py
index 5833aa7..389fe00 100644
--- a/dudley/test/python/RecTest.py
+++ b/dudley/test/python/RecTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/axisymm-splitB.py b/dudley/test/python/axisymm-splitB.py
index 5108133..4cb4615 100755
--- a/dudley/test/python/axisymm-splitB.py
+++ b/dudley/test/python/axisymm-splitB.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -90,7 +92,7 @@ while istep < nstep:
     print("time step :",istep," t = ",t)
     r=Function(dom).getX()[0]
     r_b=FunctionOnBoundary(dom).getX()[0]
-    print("	volume : ",integrate(r))
+    print("volume : ",integrate(r))
     #
     #  step 1:
     #
@@ -98,7 +100,7 @@ while istep < nstep:
     n_d=dom.getNormal()
     t_d=matrixmult(numpy.array([[0.,-1.],[1.,0]]),n_d)
     sigma_d=(sign(inner(t_d,U))*alpha_w*t_d-n_d)*Pen*clip(inner(n_d,U),0.)
-    print("	sigma_d =",inf(sigma_d),sup(sigma_d))
+    print("sigma_d =",inf(sigma_d),sup(sigma_d))
 
     momentumStep1.setValue(D=r*ro*kronecker(dom),
                            Y=r*ro*U+dt*r*[0.,-ro*g], 
@@ -134,8 +136,8 @@ while istep < nstep:
     #
     p+=dp         
     U=U_new
-    print("	U:",inf(U),sup(U))
-    print("	P:",inf(p),sup(p)) 
+    print("U:",inf(U),sup(U))
+    print("P:",inf(p),sup(p)) 
 
 
     p_pos=clip(p,small)
@@ -144,7 +146,7 @@ while istep < nstep:
     gamma=sqrt(2*((gg[0,0]-vol/3)**2+(gg[1,1]-vol/3)**2+(U[0]/r-vol/3)**2+(gg[1,0]+gg[0,1])**2/2))
     m=whereNegative(eta*gamma-alpha*p_pos) 
     eta_d=m*eta+(1.-m)*alpha*p_pos/(gamma+small)  
-    print("	viscosity =",inf(eta_d),sup(eta_d)) 
+    print("viscosity =",inf(eta_d),sup(eta_d)) 
     dev_stress=eta_d*(symmetric(gg)-2./3.*vol*kronecker(dom))
     #
     # step size control:
@@ -153,7 +155,7 @@ while istep < nstep:
     dt1=inf(dom.getSize()/(length(U)+small))
     dt2=inf(0.5*ro*(len**2)/eta_d)
     dt=dt1*dt2/(dt1+dt2)
-    print("	new step size = ",dt)
+    print("new step size = ",dt)
     #
     #  update geometry
     #
diff --git a/dudley/test/python/blocktest.py b/dudley/test/python/blocktest.py
index 92652bb..ea56272 100755
--- a/dudley/test/python/blocktest.py
+++ b/dudley/test/python/blocktest.py
@@ -1,15 +1,4 @@
-#
-# this script is testing block solvers for PDEs
-#
-#
-#    - u_{j,ii} + b*u_j+ a*sum_{k<>j}  (u_j-u_k) = F_j
-#
-#  where a controls the degree of coupling and b the degree of diagonal dominance.
-#  a and b may have any value.
-#  
-#  The domain needs to be a unit square or cube with any type of mesh
-#
-#
+
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -24,6 +13,20 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+#
+# this script is testing block solvers for PDEs
+#
+#
+#    - u_{j,ii} + b*u_j+ a*sum_{k<>j}  (u_j-u_k) = F_j
+#
+#  where a controls the degree of coupling and b the degree of diagonal dominance.
+#  a and b may have any value.
+#  
+#  The domain needs to be a unit square or cube with any type of mesh
+#
+##############################################################################
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/dudley/test/python/brick.py b/dudley/test/python/brick.py
index 7430017..717e9d8 100644
--- a/dudley/test/python/brick.py
+++ b/dudley/test/python/brick.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/convection.py b/dudley/test/python/convection.py
index 729d736..92a6112 100644
--- a/dudley/test/python/convection.py
+++ b/dudley/test/python/convection.py
@@ -18,6 +18,9 @@ this is a convection simulation over a domain [0,L] X [0,L] x [0,H]
 It is solved in dimensionless form
 
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/fixme_run_generators.py b/dudley/test/python/fixme_run_generators.py
index e91571a..fca2667 100644
--- a/dudley/test/python/fixme_run_generators.py
+++ b/dudley/test/python/fixme_run_generators.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/generate_dumps.py b/dudley/test/python/generate_dumps.py
index e425f70..08189f3 100644
--- a/dudley/test/python/generate_dumps.py
+++ b/dudley/test/python/generate_dumps.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/generate_meshes.py b/dudley/test/python/generate_meshes.py
index 837a054..54ef273 100644
--- a/dudley/test/python/generate_meshes.py
+++ b/dudley/test/python/generate_meshes.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/linearElastic.py b/dudley/test/python/linearElastic.py
index 4adba87..b9347df 100755
--- a/dudley/test/python/linearElastic.py
+++ b/dudley/test/python/linearElastic.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/rayleigh_taylor_instabilty.py b/dudley/test/python/rayleigh_taylor_instabilty.py
index 2ec823a..c3b33d4 100644
--- a/dudley/test/python/rayleigh_taylor_instabilty.py
+++ b/dudley/test/python/rayleigh_taylor_instabilty.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/rectangle.py b/dudley/test/python/rectangle.py
index 296aa02..c1ee5c2 100644
--- a/dudley/test/python/rectangle.py
+++ b/dudley/test/python/rectangle.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/run_escriptOnDudley.py b/dudley/test/python/run_escriptOnDudley.py
index 98d19f7..924a6da 100644
--- a/dudley/test/python/run_escriptOnDudley.py
+++ b/dudley/test/python/run_escriptOnDudley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/run_inputOutput.py b/dudley/test/python/run_inputOutput.py
index b6db161..7bd6763 100644
--- a/dudley/test/python/run_inputOutput.py
+++ b/dudley/test/python/run_inputOutput.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/dudley/test/python/run_linearPDEsOnDudley1.py b/dudley/test/python/run_linearPDEsOnDudley1.py
index fb4da76..d30e760 100644
--- a/dudley/test/python/run_linearPDEsOnDudley1.py
+++ b/dudley/test/python/run_linearPDEsOnDudley1.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/dudley/test/python/run_linearPDEsOnDudley2.py b/dudley/test/python/run_linearPDEsOnDudley2.py
index 059ed19..1ad322e 100644
--- a/dudley/test/python/run_linearPDEsOnDudley2.py
+++ b/dudley/test/python/run_linearPDEsOnDudley2.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/dudley/test/python/run_models.py b/dudley/test/python/run_models.py
index e7fd194..5456a0b 100644
--- a/dudley/test/python/run_models.py
+++ b/dudley/test/python/run_models.py
@@ -14,7 +14,7 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -29,377 +29,18 @@ from esys.escriptcore.testing import *
 VERBOSE = False
 
 from esys.escript import *
-from esys.escript.models import StokesProblemCartesian, PowerLaw, IncompressibleIsotropicFlowCartesian, FaultSystem, DarcyFlow
-from esys.escript.models import Mountains
+from esys.escript.models import PowerLaw, FaultSystem, DarcyFlow
 from esys.dudley import Rectangle, Brick
 
 from math import pi
 import numpy, os, sys, tempfile
-#==============================================================================
+#======================================================================
 try:
      DUDLEY_WORKDIR=os.environ['DUDLEY_WORKDIR']
 except KeyError:
      DUDLEY_WORKDIR='.'
 
-#==============================================================================
- at unittest.skip("Test not previously tested")
-class Test_StokesProblemCartesian2D(unittest.TestCase):
-   def setUp(self):
-       NE=6
-       self.TOL=1e-3
-       self.domain=Rectangle(NE,NE,order=-1)
-   def tearDown(self):
-       del self.domain
-   def test_PCG_P_0(self):
-       ETA=1.
-       P1=0.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0*mask,p0,verbose=VERBOSE,max_iter=100,usePCG=True)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(p+P1*x[0]*x[1])
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-
-   def test_PCG_P_small(self):
-       ETA=1.
-       P1=1.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=True)
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(P1*x[0]*x[1]+p)
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-
-   def test_PCG_P_large(self):
-       ETA=1.
-       P1=1000.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=True)
-       # u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=True)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(P1*x[0]*x[1]+p)/P1
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-
-   def test_GMRES_P_0(self):
-       ETA=1.
-       P1=0.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=50,usePCG=False,iter_restart=18)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(P1*x[0]*x[1]+p)
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-
-   def test_GMRES_P_small(self):
-       ETA=1.
-       P1=1.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=20,usePCG=False)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(P1*x[0]*x[1]+p)
-
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-
-   def test_GMRES_P_large(self):
-       ETA=1.
-       P1=1000.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*[1.,0]+(2*ETA-P1*x[0])*[0.,1.]
-       mask=whereZero(x[0])    * [1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.] \
-              +whereZero(x[1])    * [1.,0.] \
-              +whereZero(x[1]-1)  * [1.,1.]
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*[0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=False)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])/0.25
-       error_p=Lsup(P1*x[0]*x[1]+p)/P1
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-#==============================================================================
- at unittest.skip("Test not previously tested")
-class Test_StokesProblemCartesian3D(unittest.TestCase):
-   def setUp(self):
-       NE=6
-       self.TOL=1e-4
-       self.domain=Brick(NE,NE,NE,order=-1)
-   def tearDown(self):
-       del self.domain
-   def test_PCG_P_0(self):
-       ETA=1.
-       P1=0.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       x=self.domain.getX()
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,0.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE ,max_iter=100,usePCG=True)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-
-   def test_PCG_P_small(self):
-       ETA=1.
-       P1=1.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,0.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE ,max_iter=100,usePCG=True)
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-
-   def test_PCG_P_large(self):
-       ETA=1.
-       P1=1000.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,0.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE ,max_iter=100,usePCG=True)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)/P1 
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-
-   def test_GMRES_P_0(self):
-       ETA=1.
-       P1=0.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       x=self.domain.getX()
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,1.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=False,iter_restart=20)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-   def test_GMRES_P_small(self):
-       ETA=1.
-       P1=1.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,1.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL/10)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE,max_iter=100,usePCG=False)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)/P1
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-   def test_GMRES_P_large(self):
-       ETA=1.
-       P1=1000.
-
-       x=self.domain.getX()
-       F=-P1*x[1]*x[2]*[1.,0.,0.]-P1*x[0]*x[2]*[0.,1.,0.]+(2*ETA*((1-x[0])*x[0]+(1-x[1])*x[1])-P1*x[0]*x[1])*[0.,0.,1.]
-       mask=whereZero(x[0])    * [1.,1.,1.] \
-              +whereZero(x[0]-1)  * [1.,1.,1.] \
-              +whereZero(x[1])    * [1.,0.,1.] \
-              +whereZero(x[1]-1)  * [1.,1.,1.] \
-              +whereZero(x[2])    * [1.,1.,0.] \
-              +whereZero(x[2]-1)  * [1.,1.,1.]
-       
-       
-       sp=StokesProblemCartesian(self.domain)
-       
-       sp.initialize(f=F,fixed_u_mask=mask,eta=ETA)
-       u0=(1-x[0])*x[0]*(1-x[1])*x[1]*[0.,0.,1.]
-       p0=Scalar(-P1,ReducedSolution(self.domain))
-       sp.setTolerance(self.TOL)
-       u,p=sp.solve(u0,p0, verbose=VERBOSE ,max_iter=100,usePCG=False)
-       
-       error_v0=Lsup(u[0]-u0[0])
-       error_v1=Lsup(u[1]-u0[1])
-       error_v2=Lsup(u[2]-u0[2])/0.25**2
-       error_p=Lsup(P1*x[0]*x[1]*x[2]+p)/P1
-       self.assertTrue(error_p<10*self.TOL, "pressure error too large.")
-       self.assertTrue(error_v0<10*self.TOL, "0-velocity error too large.")
-       self.assertTrue(error_v1<10*self.TOL, "1-velocity error too large.")
-       self.assertTrue(error_v2<10*self.TOL, "2-velocity error too large.")
-#==============================================================================
+#======================================================================
 class Darcy(unittest.TestCase): #subclassing required
     # this is a simple test for the darcy flux problem
     #
diff --git a/dudley/test/python/run_nlpde2dOnDudley.py b/dudley/test/python/run_nlpde2dOnDudley.py
index a0bb44b..977056a 100644
--- a/dudley/test/python/run_nlpde2dOnDudley.py
+++ b/dudley/test/python/run_nlpde2dOnDudley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/dudley/test/python/run_nlpde3dOnDudley.py b/dudley/test/python/run_nlpde3dOnDudley.py
index ab790fd..8815877 100644
--- a/dudley/test/python/run_nlpde3dOnDudley.py
+++ b/dudley/test/python/run_nlpde3dOnDudley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/dudley/test/python/run_simplesolve.py b/dudley/test/python/run_simplesolve.py
index b806bbe..e8dbf1e 100644
--- a/dudley/test/python/run_simplesolve.py
+++ b/dudley/test/python/run_simplesolve.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_splitworldOnFinley.py b/dudley/test/python/run_splitworldOnDudley.py
similarity index 62%
copy from finley/test/python/run_splitworldOnFinley.py
copy to dudley/test/python/run_splitworldOnDudley.py
index 095f581..05c11a9 100644
--- a/finley/test/python/run_splitworldOnFinley.py
+++ b/dudley/test/python/run_splitworldOnDudley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -24,22 +26,45 @@ __url__="https://launchpad.net/escript-finley"
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
 from esys.escript import *
-from esys.finley import Rectangle, Brick, ReadMesh, ReadGmsh
-from test_splitworld import Test_SplitWorld
+from esys.dudley import Rectangle, Brick, ReadMesh, ReadGmsh
+from test_splitworld import Test_SplitWorld, sw_testing
 
 
 mpisize=getMPISizeWorld()
 NE=4 # number elements, must be even
 
-class Test_SplitOnFinley(Test_SplitWorld):
+class Test_SplitOnDudley(Test_SplitWorld):
   def setUp(self):
     self.domainpars=[Rectangle, NE, NE]
     
   def tearDown(self):
     del self.domainpars
+    
+class Test_dudley_sw_2D(sw_testing):
+    def setUp(self):
+        from esys.dudley import Rectangle
+        self.domain_ctr=Rectangle
+        self.domain_vec=(6,6)
+        self.domain_dict={}
+
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+
+
+class Test_dudley_sw_3D(sw_testing):
+    def setUp(self):
+        from esys.dudley import Brick
+        self.domain_ctr=Brick
+        self.domain_vec=(6,6,6)
+        self.domain_dict={}
+        
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+    
 
 
 
 if __name__ == '__main__':
     run_tests(__name__, exit_on_failure=True)
-
diff --git a/dudley/test/python/run_utilOnDudley.py b/dudley/test/python/run_utilOnDudley.py
index 6a22e6e..6a66381 100644
--- a/dudley/test/python/run_utilOnDudley.py
+++ b/dudley/test/python/run_utilOnDudley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/seismic_wave.py b/dudley/test/python/seismic_wave.py
index 7030d15..3b0f9e9 100644
--- a/dudley/test/python/seismic_wave.py
+++ b/dudley/test/python/seismic_wave.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -156,7 +156,7 @@ def getDomain():
     global netotal
     
     v_p={}
-    for tag in list(rho_tab.keys()):
+    for tag in sorted(rho_tab.keys()):
        v_p[tag]=sqrt((2*mu_tab[tag]+lmbd_tab[tag])/rho_tab[tag])
     v_p_ref=min(v_p.values())
     print("velocities: bedrock = %s, sand = %s, water =%s, absorber =%s, reference =%s"%(v_p[bedrock],v_p[sand],v_p[water],v_p[absorber],v_p_ref))
@@ -284,7 +284,7 @@ def getMaterialProperties(dom):
    lmbd=Scalar(lmbd_tab[bedrock],Function(dom))
    tags=Scalar(bedrock,Function(dom))
    
-   for tag in list(rho_tab.keys()):
+   for tag in sorted(rho_tab.keys()):
       rho.setTaggedValue(tag,rho_tab[tag])
       eta.setTaggedValue(tag,eta_tab[tag])
       mu.setTaggedValue(tag,mu_tab[tag])
diff --git a/dudley/test/python/slip_stress_mesh_old.py b/dudley/test/python/slip_stress_mesh_old.py
index 6d4ec85..991cde3 100644
--- a/dudley/test/python/slip_stress_mesh_old.py
+++ b/dudley/test/python/slip_stress_mesh_old.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/slip_stress_old.py b/dudley/test/python/slip_stress_old.py
index cdf3ec2..ea97a2b 100644
--- a/dudley/test/python/slip_stress_old.py
+++ b/dudley/test/python/slip_stress_old.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/stokes_problems.py b/dudley/test/python/stokes_problems.py
index ef2cd98..733e4e8 100644
--- a/dudley/test/python/stokes_problems.py
+++ b/dudley/test/python/stokes_problems.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/subduction1.py b/dudley/test/python/subduction1.py
index 95373f4..7d401b7 100644
--- a/dudley/test/python/subduction1.py
+++ b/dudley/test/python/subduction1.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/subduction1_gen.py b/dudley/test/python/subduction1_gen.py
index 8b5d791..5275f2f 100644
--- a/dudley/test/python/subduction1_gen.py
+++ b/dudley/test/python/subduction1_gen.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/dudley/test/python/time_chunks.py b/dudley/test/python/time_chunks.py
index ae57818..205209b 100644
--- a/dudley/test/python/time_chunks.py
+++ b/dudley/test/python/time_chunks.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/dudley/test/python/tp.py b/dudley/test/python/tp.py
index da49c4c..6d48842 100644
--- a/dudley/test/python/tp.py
+++ b/dudley/test/python/tp.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escript/py_src/__init__.py b/escript/py_src/__init__.py
index 97437e7..896096e 100644
--- a/escript/py_src/__init__.py
+++ b/escript/py_src/__init__.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2013-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -26,3 +28,6 @@ from esys.escriptcore.nonlinearPDE import NonlinearPDE
 from esys.escriptcore.datamanager import DataManager
 from esys.escriptcore.symbolic import *
 from esys.escriptcore.splitworld import *
+
+__all__=[x for x in dir() if not x.startswith('internal_') and not x.startswith('Internal_') and not x.startswith('__')]
+
diff --git a/escript/py_src/datamanager.py b/escript/py_src/datamanager.py
index 4b10f1b..3ca7653 100644
--- a/escript/py_src/datamanager.py
+++ b/escript/py_src/datamanager.py
@@ -1,3 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
 
 from esys.escriptcore.datamanager import *
diff --git a/escript/py_src/linearPDEs.py b/escript/py_src/linearPDEs.py
index a45ec4f..b6048cb 100644
--- a/escript/py_src/linearPDEs.py
+++ b/escript/py_src/linearPDEs.py
@@ -1,4 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
+from __future__ import print_function, division
 
-from esys.escriptcore.linearPDEs import *
\ No newline at end of file
+from esys.escriptcore.linearPDEs import *
diff --git a/escript/py_src/modelframe.py b/escript/py_src/modelframe.py
index 1baf9bf..20d518b 100644
--- a/escript/py_src/modelframe.py
+++ b/escript/py_src/modelframe.py
@@ -1,4 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
+from __future__ import print_function, division
 
-from esys.escriptcore.modelframe import *
\ No newline at end of file
+from esys.escriptcore.modelframe import *
diff --git a/escript/py_src/models.py b/escript/py_src/models.py
index f756b04..a3cfad8 100644
--- a/escript/py_src/models.py
+++ b/escript/py_src/models.py
@@ -1,4 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
+from __future__ import print_function, division
 
-from esys.escriptcore.models import *
\ No newline at end of file
+from esys.escriptcore.models import *
diff --git a/escript/py_src/pdetools.py b/escript/py_src/pdetools.py
index 8e21c68..6af4f2b 100644
--- a/escript/py_src/pdetools.py
+++ b/escript/py_src/pdetools.py
@@ -1,3 +1,20 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
-from esys.escriptcore.pdetools import *
\ No newline at end of file
+from __future__ import print_function, division
+
+
+from esys.escriptcore.pdetools import *
diff --git a/escript/py_src/symbolic.py b/escript/py_src/symbolic.py
index 3f6948b..9464d57 100644
--- a/escript/py_src/symbolic.py
+++ b/escript/py_src/symbolic.py
@@ -1,5 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
-
+from __future__ import print_function, division
 
 from esys.escriptcore.symbolic import *
diff --git a/escript/py_src/unitsSI.py b/escript/py_src/unitsSI.py
index 8b4258e..dd664fc 100644
--- a/escript/py_src/unitsSI.py
+++ b/escript/py_src/unitsSI.py
@@ -1,5 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
+from __future__ import print_function, division
 
-
-from esys.escriptcore.unitsSI import *
\ No newline at end of file
+from esys.escriptcore.unitsSI import *
diff --git a/escript/py_src/util.py b/escript/py_src/util.py
index 5e1f9c4..f9361a9 100644
--- a/escript/py_src/util.py
+++ b/escript/py_src/util.py
@@ -1,4 +1,19 @@
 
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
 
+from __future__ import print_function, division
 
-from esys.escriptcore.util import *
\ No newline at end of file
+from esys.escriptcore.util import *
diff --git a/escriptcore/py_src/__init__.py b/escriptcore/py_src/__init__.py
index b45e86c..bad7755 100644
--- a/escriptcore/py_src/__init__.py
+++ b/escriptcore/py_src/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/benchmark.py b/escriptcore/py_src/benchmark.py
index b448218..16a213d 100644
--- a/escriptcore/py_src/benchmark.py
+++ b/escriptcore/py_src/benchmark.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/datamanager.py b/escriptcore/py_src/datamanager.py
index c909434..67df185 100644
--- a/escriptcore/py_src/datamanager.py
+++ b/escriptcore/py_src/datamanager.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -126,7 +128,7 @@ class DataManager(object):
         if self._restartdir != None:
             self.__clearData()
 
-        for name,var in list(data.items()):
+        for name,var in sorted(data.items(), key=lambda x: x[0]):
             if hasattr(var, "getDomain"):
                 if self._domain is None:
                     self._domain=var.getDomain()
@@ -221,7 +223,7 @@ class DataManager(object):
         """
         self._metadata=metadata
         ss=""
-        for i,p in list(schema.items()):
+        for i,p in sorted(list(schema.items()), key=lambda x: x[0]):
             ss="%s xmlns:%s=\"%s\""%(ss, i, p)
         self._md_schema=ss.strip()
 
@@ -325,7 +327,7 @@ class DataManager(object):
         pickle.dump(self._stamp, open(stamp_file, "wb"))
         ff=self.__getDumpFilename("_domain", restartdir)
         self._domain.dump(ff)
-        for name, var in list(self._data.items()):
+        for name, var in sorted(list(self._data.items(), key=lambda x: x[0])):
             ff=self.__getDumpFilename(name, restartdir)
             var.dump(ff)
         print(("Restart files saved in "+os.path.join(self._workdir, restartdir)))
diff --git a/escriptcore/py_src/domainCouplers.py b/escriptcore/py_src/domainCouplers.py
index 2eb0a15..132f3cf 100644
--- a/escriptcore/py_src/domainCouplers.py
+++ b/escriptcore/py_src/domainCouplers.py
@@ -21,6 +21,8 @@ must already support interpolation in at least one direction.
 
 """
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2014-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/faultsystems.py b/escriptcore/py_src/faultsystems.py
index 9847683..3bf1d1b 100644
--- a/escriptcore/py_src/faultsystems.py
+++ b/escriptcore/py_src/faultsystems.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/flows.py b/escriptcore/py_src/flows.py
index 26b2d8d..7fb84ba 100644
--- a/escriptcore/py_src/flows.py
+++ b/escriptcore/py_src/flows.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/gmshrunner.py b/escriptcore/py_src/gmshrunner.py
index 3b26c95..f2be953 100644
--- a/escriptcore/py_src/gmshrunner.py
+++ b/escriptcore/py_src/gmshrunner.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/heat.py b/escriptcore/py_src/heat.py
index 0efed1b..868911a 100644
--- a/escriptcore/py_src/heat.py
+++ b/escriptcore/py_src/heat.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/levelset.py b/escriptcore/py_src/levelset.py
index 0ce35d3..67838d2 100644
--- a/escriptcore/py_src/levelset.py
+++ b/escriptcore/py_src/levelset.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/linearPDEs.py b/escriptcore/py_src/linearPDEs.py
index 679bcf6..232f4f2 100644
--- a/escriptcore/py_src/linearPDEs.py
+++ b/escriptcore/py_src/linearPDEs.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -29,7 +31,7 @@ differential equations (PDEs) and Transport problems within `escript`.
 PDE over to the PDE solver library defined through the `Domain`
 of the PDE. The general interface is provided through the `LinearPDE` class.
 `TransportProblem` provides an interface to initial value problems dominated
-by its advective terms.
+by their advective terms.
 
 :var __author__: name of author
 :var __copyright__: copyrights
@@ -48,9 +50,8 @@ __author__="Lutz Gross, l.gross at uq.edu.au"
 
 SolverOptions = escore.SolverOptions
 SolverBuddy = escore.SolverBuddy
-        
+
 class IllegalCoefficient(ValueError):
-  
    """
    Exception that is raised if an illegal coefficient of the general or
    particular PDE is requested.
@@ -243,7 +244,7 @@ class PDECoef(object):
                 try:
                   newValue=escore.Data(newValue,self.getFunctionSpace(domain,reducedEquationOrder,reducedSolutionOrder))
                 except RuntimeError as er:
-                 msg="Attempting to interpolate coefficient to function space %s encountered the followin error: %s"%(self.getFunctionSpace(domain),str(er))
+                 msg="Attempting to interpolate coefficient to function space %s encountered the following error: %s"%(self.getFunctionSpace(domain),str(er))
                  raise IllegalCoefficientFunctionSpace(msg)
                 except:
                   raise IllegalCoefficientFunctionSpace("Unable to interpolate coefficient to function space %s"%self.getFunctionSpace(domain))
@@ -508,7 +509,7 @@ class LinearProblem(object):
 
        to introduce the coefficients *A* and *B*.
        """
-       for name, type in list(coeff.items()):
+       for name, type in sorted(coeff.items(), key=lambda x: x[0]):
            if not isinstance(type,PDECoef):
               raise ValueError("coefficient %s has no type."%name)
            self.__COEFFICIENTS[name]=type
@@ -544,12 +545,12 @@ class LinearProblem(object):
      return self.__system_status
    def setSystemStatus(self,status=None):
      """
-     Sets the system status to ``status`` if ``status`` is not present the 
+     Sets the system status to ``status`` if ``status`` is not present the
      current status of the domain is used.
      """
      if status is None:
          self.__system_status=self.getDomainStatus()
-     else: 
+     else:
          self.__system_status=status
 
    def getDim(self):
@@ -655,16 +656,16 @@ class LinearProblem(object):
        else:
           raise ValueError("options must be a SolverOptions object.")
        self.__solver_options.setSymmetry(self.__sym)
-     
+
    def getSolverOptions(self):
        """
        Returns the solver options
-   
+
        :rtype: `SolverOptions`
        """
        self.__solver_options.setSymmetry(self.__sym)
        return self.__solver_options
-       
+
    def isUsingLumping(self):
       """
       Checks if matrix lumping is the current solver method.
@@ -688,7 +689,7 @@ class LinearProblem(object):
 
    def setSymmetryOn(self):
       """
-      Sets the symmetry flag. 
+      Sets the symmetry flag.
       :note: The method overwrites the symmetry flag set by the solver options
       """
       self.__sym=True
@@ -1026,7 +1027,7 @@ class LinearProblem(object):
      """
      Resets all coefficients to their default values.
      """
-     for i in list(self.__COEFFICIENTS.keys()):
+     for i in sorted(self.__COEFFICIENTS.keys()):
          self.__COEFFICIENTS[i].resetValue()
 
    def alteredCoefficient(self,name):
@@ -1067,7 +1068,7 @@ class LinearProblem(object):
        if not self.getDomainStatus()==self.getSystemStatus(): self.invalidateSolution()
        if self.__solution_rtol>self.getSolverOptions().getTolerance() or \
           self.__solution_atol>self.getSolverOptions().getAbsoluteTolerance():
-            self.invalidateSolution()  
+            self.invalidateSolution()
        return self.__is_solution_valid
 
    def validOperator(self):
@@ -1255,12 +1256,12 @@ class LinearProblem(object):
       :raise IllegalCoefficient: if an unknown coefficient keyword is used
       """
       # check if the coefficients are  legal:
-      for i in list(coefficients.keys()):
+      for i in sorted(coefficients.keys()):
          if not self.hasCoefficient(i):
             raise IllegalCoefficient("Attempt to set unknown coefficient %s"%i)
       # if the number of unknowns or equations is still unknown we try to estimate them:
       if self.__numEquations is None or self.__numSolutions is None:
-         for i,d in list(coefficients.items()):
+         for i,d in sorted(coefficients.items(), key=lambda x: x[0]):
             if hasattr(d,"shape"):
                 s=d.shape
             elif isinstance(d, escore.Data) and not d.isEmpty():
@@ -1278,7 +1279,7 @@ class LinearProblem(object):
       if self.__numEquations is None: raise UndefinedPDEError("unidentified number of equations")
       if self.__numSolutions is None: raise UndefinedPDEError("unidentified number of solutions")
       # now we check the shape of the coefficient if numEquations and numSolutions are set:
-      for i,d in list(coefficients.items()):
+      for i,d in sorted(coefficients.items(), key=lambda x: x[0]):
         try:
            self.__COEFFICIENTS[i].setValue(self.getDomain(),
                      self.getNumEquations(),self.getNumSolutions(),
@@ -1369,7 +1370,7 @@ class LinearProblem(object):
             d, y, d_contact, y_contact, d_dirac, y_dirac):
         """
         adds a PDE to the system, results depend on domain
-        
+
         :param mat:
         :type mat: `OperatorAdapter`
         :param rhs:
@@ -1408,11 +1409,11 @@ class LinearProblem(object):
         else:
             self.getDomain().addPDEToSystem(operator,righthandside, A, B, C, D,
                     X, Y, d, y, d_contact, y_contact, d_dirac, y_dirac)
-            
+
    def addToSystem(self, op, rhs, data):
         """
         adds a PDE to the system, results depend on domain
-        
+
         :param mat:
         :type mat: `OperatorAdapter`
         :param rhs:
@@ -1425,7 +1426,7 @@ class LinearProblem(object):
    def addPDEToLumpedSystem(self, operator, a, b, c, hrz_lumping):
         """
         adds a PDE to the lumped system, results depend on domain
-        
+
         :param mat:
         :type mat: `OperatorAdapter`
         :param rhs:
@@ -1443,11 +1444,11 @@ class LinearProblem(object):
             self.getDomain().addPDEToLumpedSystem(operator, a, b, c, hrz_lumping, self.assembler)
         else:
             self.getDomain().addPDEToLumpedSystem(operator, a, b, c, hrz_lumping)
-   
+
    def addPDEToRHS(self, righthandside, X, Y, y, y_contact, y_dirac):
         """
         adds a PDE to the right hand side, results depend on domain
-        
+
         :param mat:
         :type mat: `OperatorAdapter`
         :param righthandside:
@@ -1468,13 +1469,13 @@ class LinearProblem(object):
                     ("y_dirac", y_dirac)]
             self.addToRHS(righthandside, data)
         else:
-            self.getDomain().addPDEToRHS(righthandside, X, Y, y, y_contact, 
+            self.getDomain().addPDEToRHS(righthandside, X, Y, y, y_contact,
                     y_dirac)
-   
+
    def addToRHS(self, rhs, data):
         """
         adds a PDE to the right hand side, results depend on domain
-        
+
         :param mat:
         :type mat: `OperatorAdapter`
         :param righthandside:
@@ -1794,7 +1795,7 @@ class LinearPDE(LinearProblem):
                         D_reduced_times_e=D_reduced
                  else:
                     D_reduced_times_e=escore.Data()
-                    
+
                  if not d_reduced.isEmpty():
                      if self.getNumSolutions()>1:
                         d_reduced_times_e=util.matrix_mult(d_reduced,numpy.ones((self.getNumSolutions(),)))
@@ -1802,7 +1803,7 @@ class LinearPDE(LinearProblem):
                         d_reduced_times_e=d_reduced
                  else:
                     d_reduced_times_e=escore.Data()
-                    
+
                  if not d_dirac.isEmpty():
                      if self.getNumSolutions()>1:
                         d_dirac_times_e=util.matrix_mult(d_dirac,numpy.ones((self.getNumSolutions(),)))
@@ -2104,7 +2105,7 @@ class LinearPDE(LinearProblem):
      A=self.getCoefficient("A")
      if not A.isEmpty():
            out+=util.tensormult(A,util.grad(u,self.getFunctionSpaceForCoefficient("A")))
-      
+
      B=self.getCoefficient("B")
      if not B.isEmpty():
            if B.getRank() == 1:
@@ -2112,7 +2113,7 @@ class LinearPDE(LinearProblem):
            else:
                out+=util.generalTensorProduct(B,u,axis_offset=1)
 
-     X=self.getCoefficient("X") 
+     X=self.getCoefficient("X")
      if not X.isEmpty():
            out-=X
 
@@ -2342,7 +2343,7 @@ class WavePDE(LinearPDE):
            q=PDECoef(PDECoef.SOLUTION,(PDECoef.BY_SOLUTION,),PDECoef.BOTH))
         self.assembler = self.getDomain().createAssembler("WaveAssembler", c)
 
-    
+
     def getSystem(self):
         """
         Returns the operator and right hand side of the PDE.
@@ -2366,7 +2367,7 @@ class WavePDE(LinearPDE):
                  D=self.getCoefficient("D")
                  d=self.getCoefficient("d")
                  d_dirac=self.getCoefficient("d_dirac")
-                 
+
                  if not D.isEmpty():
                      if self.getNumSolutions()>1:
                         D_times_e=util.matrix_mult(D,numpy.ones((self.getNumSolutions(),)))
@@ -2381,7 +2382,7 @@ class WavePDE(LinearPDE):
                         d_times_e=d
                  else:
                     d_times_e=escore.Data()
-                    
+
                  if not d_dirac.isEmpty():
                      if self.getNumSolutions()>1:
                         d_dirac_times_e=util.matrix_mult(d_dirac,numpy.ones((self.getNumSolutions(),)))
@@ -2394,7 +2395,7 @@ class WavePDE(LinearPDE):
                     hrz_lumping=( self.getSolverOptions().getSolverMethod() ==  SolverOptions.HRZ_LUMPING )
                     self.addPDEToLumpedSystem(operator, D_times_e, d_times_e, d_dirac_times_e,  hrz_lumping )
                  else:
-                    self.addToRHS(operator, 
+                    self.addToRHS(operator,
                         [("Y", D_times_e), ("y", d_times_e),
                          ("y_dirac", d_dirac_times_e)])
                  self.trace("New lumped operator has been built.")
@@ -2402,7 +2403,7 @@ class WavePDE(LinearPDE):
                  self.resetRightHandSide()
                  righthandside=self.getCurrentRightHandSide()
                  self.addToRHS(righthandside,
-                                [(i, self.getCoefficient(i)) for i in 
+                                [(i, self.getCoefficient(i)) for i in
                                     ["du", "Y", "y", "y_dirac"]
                                 ])
                  self.trace("New right hand side has been built.")
@@ -2428,7 +2429,7 @@ class WavePDE(LinearPDE):
                  self.resetRightHandSide()
                  righthandside=self.getCurrentRightHandSide()
                  self.addToRHS(righthandside,
-                                [(i, self.getCoefficient(i)) for i in 
+                                [(i, self.getCoefficient(i)) for i in
                                     ["du", "Y", "y", "y_contact", "y_dirac"]
                                 ])
                  self.insertConstraint(rhs_only=True)
@@ -2533,7 +2534,7 @@ class LameEquation(LinearPDE):
 
         if name == "A" :
             out = self.createCoefficient("A")
-            if self.getCoefficient("lame_lambda").isEmpty(): 
+            if self.getCoefficient("lame_lambda").isEmpty():
                 if self.getCoefficient("lame_mu").isEmpty():
                     pass
                 else:
@@ -2541,7 +2542,7 @@ class LameEquation(LinearPDE):
                         for j in range(self.getDim()):
                             out[i,j,j,i] += self.getCoefficient("lame_mu")
                             out[i,j,i,j] += self.getCoefficient("lame_mu")
-            else: 
+            else:
                 if self.getCoefficient("lame_mu").isEmpty():
                     for i in range(self.getDim()):
                         for j in range(self.getDim()):
@@ -2622,7 +2623,7 @@ class LameEquation(LinearPDE):
                         D_reduced_times_e=D_reduced
                  else:
                     D_reduced_times_e=escore.Data()
-                    
+
                  if not d_reduced.isEmpty():
                      if self.getNumSolutions()>1:
                         d_reduced_times_e=util.matrix_mult(d_reduced,numpy.ones((self.getNumSolutions(),)))
@@ -2630,7 +2631,7 @@ class LameEquation(LinearPDE):
                         d_reduced_times_e=d_reduced
                  else:
                     d_reduced_times_e=escore.Data()
-                    
+
                  if not d_dirac.isEmpty():
                      if self.getNumSolutions()>1:
                         d_dirac_times_e=util.matrix_mult(d_dirac,numpy.ones((self.getNumSolutions(),)))
@@ -3043,7 +3044,7 @@ class TransportPDE(LinearProblem):
                        object on `FunctionOnContactOne` or `FunctionOnContactZero`
       :keyword y_contact_reduced: value for coefficient ``y_contact_reduced``
       :type y_contact_reduced: any type that can be cast to a `Data` object on `ReducedFunctionOnContactOne` or `ReducedFunctionOnContactZero`
-      
+
       :keyword d_dirac: value for coefficient ``d_dirac``
       :type d_dirac: any type that can be cast to a `Data` object on `DiracDeltaFunctions`
       :keyword y_dirac: value for coefficient ``y_dirac``
@@ -3067,7 +3068,7 @@ class TransportPDE(LinearProblem):
        Returns an instance of a new transport operator.
        """
        optype=self.getRequiredOperatorType()
-       self.trace("New Transport problem pf type %s is allocated."%optype)
+       self.trace("New Transport problem of type %s is allocated."%optype)
        return self.getDomain().newTransportProblem( \
                                self.getNumEquations(), \
                                self.getFunctionSpaceForSolution(), \
@@ -3110,8 +3111,9 @@ class TransportPDE(LinearProblem):
    #====================================================================
    def getSolution(self, dt=None, u0=None):
        """
-       Returns the solution by marching forward by time step dt. if ''u0'' is present,
-       ''u0'' is used as the initial value otherwise the solution from the last call is used.
+       Returns the solution by marching forward by time step dt.
+       If ''u0'' is present, ''u0'' is used as the initial value otherwise
+       the solution from the last call is used.
 
        :param dt: time step size. If ``None`` the last solution is returned.
        :type dt: positive ``float`` or ``None``
@@ -3276,13 +3278,13 @@ class TransportPDE(LinearProblem):
      Switches debug output on.
      """
      super(TransportPDE,self).setDebugOn()
-     
+
    def setDebugOff(self):
      """
      Switches debug output off.
      """
      super(TransportPDE,self).setDebugOff()
-     
+
 def SingleTransportPDE(domain, debug=False):
    """
    Defines a single transport problem
diff --git a/escriptcore/py_src/modelframe.py b/escriptcore/py_src/modelframe.py
index 4c8d431..eed3547 100644
--- a/escriptcore/py_src/modelframe.py
+++ b/escriptcore/py_src/modelframe.py
@@ -14,7 +14,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -85,7 +86,7 @@ class ESySXMLParser(object):
     """
     def __init__(self,xml, debug=False):
         if sys.version_info[0]<3:
-            xml=str(xml)	# xml might be unicode 
+            xml=str(xml)        # xml might be unicode 
         #print("\n")
         #print(type(xml))
         #print(xml)
@@ -191,7 +192,7 @@ class ESySXMLCreator(object):
         return n
 
     def getLinkableObjectId(self, obj):
-        for id, o in list(self.__linkable_object_registry.items()):
+        for id, o in sorted(self.__linkable_object_registry.items(), key=lambda x: x[0]):
             if o == obj: return id
         id =next(self.__number_sequence)
         self.__linkable_object_registry[id]=obj
@@ -424,7 +425,7 @@ class _ParameterIterator(object):
         o=next(self.__iter)
         return (o,self.__set.getAttributeObject(o))
         
-    def next(self):	#Still needed by py2.6
+    def next(self):     #Still needed by py2.6
         return self.__next__()
 
     def __iter__(self):
@@ -490,7 +491,7 @@ class ParameterSet(LinkableObject):
         if isinstance(parameters,type([])):
             parameters = list(zip(parameters, itertools.repeat(None)))
         if isinstance(parameters,type(dict())):
-            parameters = iter(list(parameters.items()))
+            parameters = iter(sorted(parameters.items()))
 
         for prm, value in parameters:
             setattr(self,prm,value)
@@ -622,10 +623,10 @@ class ParameterSet(LinkableObject):
                 param.appendChild(esysxml.createDataNode('Value', str(value)))
             elif isinstance(value, dict):
                  dic = esysxml.createElement('dictionary')
-                 if len(list(value.keys()))>0:
-                     dic.setAttribute('key_type', list(value.keys())[0].__class__.__name__)
-                     dic.setAttribute('value_type', value[list(value.keys())[0]].__class__.__name__)
-                 for k,v in list(value.items()):
+                 if len(value.keys())>0:
+                     dic.setAttribute('key_type', sorted(value.keys())[0].__class__.__name__)
+                     dic.setAttribute('value_type', value[sorted(value.keys())[0]].__class__.__name__)
+                 for k,v in sorted(value.items(), key=lambda x: x[0]):
                     i=esysxml.createElement('item')
                     i.appendChild(esysxml.createDataNode('key', k))
                     i.appendChild(esysxml.createDataNode('value', v))
@@ -980,7 +981,7 @@ class Simulation(Model):
                out+=m.getAllModels()
             else:
                out.append(m)
-        return list(set(out))
+        return sorted(list(set(out)), key=lambda x: str(x))
 
     def checkModels(self, models, hash):
         """
diff --git a/escriptcore/py_src/models.py b/escriptcore/py_src/models.py
index 3699f14..7fa4506 100644
--- a/escriptcore/py_src/models.py
+++ b/escriptcore/py_src/models.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/mountains.py b/escriptcore/py_src/mountains.py
index b09550b..a185c36 100644
--- a/escriptcore/py_src/mountains.py
+++ b/escriptcore/py_src/mountains.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/nonlinearPDE.py b/escriptcore/py_src/nonlinearPDE.py
index 19b1eeb..94e2949 100644
--- a/escriptcore/py_src/nonlinearPDE.py
+++ b/escriptcore/py_src/nonlinearPDE.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -334,7 +336,7 @@ class NonlinearPDE(object):
         # separate symbolic expressions from other coefficients
         constants={}
         expressions={}
-        for n, e in self._set_coeffs.items():
+        for n, e in sorted(self._set_coeffs.items(), key=lambda x: x[0]):
             if symb.isSymbol(e):
                 expressions[n]=e
             else:
@@ -651,7 +653,7 @@ class NonlinearPDE(object):
         """
 
         u=self._unknown
-        for name,val in coefficients.items():
+        for name,val in sorted(coefficients.items(), key=lambda x: x[0]):
             shape=util.getShape(val)
             if not shape == self.getShapeOfCoefficient(name):
                 raise lpe.IllegalCoefficientValue("%s has shape %s but must have shape %s"%(name, shape, self.getShapeOfCoefficient(name)))
@@ -757,7 +759,7 @@ class NonlinearPDE(object):
         # if the solution etc are cached this could be omitted:
         constants={}
         expressions={}
-        for n, e in self._set_coeffs.items():
+        for n, e in sorted(self._set_coeffs.items(), key=lambda x: x[0]):
             if n not in self.__COEFFICIENTS:
                 if symb.isSymbol(e):
                     expressions[n]=e
@@ -779,7 +781,7 @@ class NonlinearPDE(object):
              if symb.isSymbol(self._r):
                  names.append('r')
                  ev.addExpression(self._r.diff(f))
-        for n in self._set_coeffs.keys():
+        for n in sorted(self._set_coeffs.keys()):
             if n in self.__COEFFICIENTS and symb.isSymbol(self._set_coeffs[n]):
                    if n=="X" or n=="X_reduced":
                       T0=time()
diff --git a/escriptcore/py_src/pdetools.py b/escriptcore/py_src/pdetools.py
index 4aeb3da..83ea8a2 100644
--- a/escriptcore/py_src/pdetools.py
+++ b/escriptcore/py_src/pdetools.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -616,7 +618,7 @@ class Defect(object):
         """
         Returns the inner product of x0 and x1
         
-        NOTE: MUST BE OVERRIDDEN
+        NOTE: MUST BE OVERRIDDEN BY A SUBCLASS
 
         :param x0: value for x0
         :param x1: value for x1
@@ -642,7 +644,7 @@ class Defect(object):
         """
         Returns the value F of a given ``x``.
 
-        NOTE: MUST BE OVERRIDDEN
+        NOTE: MUST BE OVERRIDDEN BY A SUBCLASS
 
         :param x: value for which the defect ``F`` is evaluated
         :return: value of the defect at ``x``
diff --git a/escriptcore/py_src/rheologies.py b/escriptcore/py_src/rheologies.py
index 130c4a7..5f22384 100644
--- a/escriptcore/py_src/rheologies.py
+++ b/escriptcore/py_src/rheologies.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/runmodel.py b/escriptcore/py_src/runmodel.py
index 2744341..366d509 100644
--- a/escriptcore/py_src/runmodel.py
+++ b/escriptcore/py_src/runmodel.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/splitworld.py b/escriptcore/py_src/splitworld.py
index 6627f8d..ca4c918 100644
--- a/escriptcore/py_src/splitworld.py
+++ b/escriptcore/py_src/splitworld.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2014-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -24,13 +26,6 @@ __author__="Joel Fenwick"
 
 """
 This module contains the Python side of the SplitWorld functionality.
-
-:var __author__: name of author
-:var __copyright__: copyrights
-:var __license__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
 """
 
 import warnings
@@ -38,6 +33,125 @@ warnings.simplefilter('default', category=DeprecationWarning)
 
 from . import escriptcpp as escore
 
+
+class SplitWorld(object):
+  """
+  Wrapper for the C++ class exposed as __SplitWorld.
+  This is a namespace consideration, it allows us to make 
+  boost::python::raw_functions into members of a class.
+  """
+  
+  def __init__(self, count):
+    """
+    :var count: How many equally sized subworlds should our compute resources be partitioned into?
+    :var type: `int`
+    """
+    self.cpp_obj=escore.Internal_SplitWorld(count)
+    
+  def buildDomains(self, fn, *vec, **kwargs):
+    """
+    Instruct subworlds how to build the domain.
+    :var fn: The function/class to call to create a domain.
+    :type fn: `callable`
+    The remaining parameters are for the arguments of the function.
+    """
+    escore.internal_buildDomains(self.cpp_obj, fn, *vec, **kwargs)
+
+    
+  def addJob(self, jobctr, *vec, **kwargs):
+    """
+    Submit a job to be run later on an available subworld.
+    :var jobctr: class or function to be called to create a job
+    :type jobctr: `callable`
+    The remaining parameters are for the arguments of the function.
+    """
+    escore.internal_addJob(self.cpp_obj, jobctr, *vec, **kwargs)
+    
+  def addJobPerWorld(self, jobctr, *vec, **kwargs):
+    """
+    Submit one job per subworld to run later.
+    :var jobctr: class or function to be called to create a job
+    :type jobctr: `callable`
+    The remaining parameters are for the arguments of the function.    
+    """
+    escore.internal_addJobPerWorld(self.cpp_obj, jobctr,  *vec, **kwargs)
+    
+    
+  def addVariable(self, vname, vartype, *vec, **kwargs):
+    """
+    Create a variable on all subworlds.
+    :var vartype: the type of variable to be created
+    :type vartype: `str`
+    The remaining parameters are for optional arguments depending on the variable type.
+    """
+    if vartype=="local":
+        escore.internal_addVariable(self.cpp_obj, vname, escore.internal_makeLocalOnly);
+    elif vartype=="Data":
+        escore.internal_addVariable(self.cpp_obj, vname, escore.internal_makeDataReducer, *vec)
+    elif vartype=="float":
+        escore.internal_addVariable(self.cpp_obj, vname, escore.internal_makeScalarReducer, *vec)
+    else:
+        raise ValueError("Unknown variable type (%s)"%str(vartype))
+
+  def runJobs(self):
+    """
+    Executes pending jobs.
+    """
+    self.cpp_obj.runJobs()
+
+  def removeVariable(self, name):
+    """
+    Removes the named variable from all subworlds.
+    :var name: 
+    :type name: `str`
+    """
+    self.cpp_obj.removeVariable(name)
+    
+    
+  def clearVariable(self, name):
+    """
+    Clears the value of the named variable.  The variable itself still exists.
+    :var name: variable to clear
+    :type name: `str`
+    """
+    self.cpp_obj.clearVariable(name)
+    
+  def getVarList(self):
+    """
+    Returns the names of all declared variables and a boolean for each indicating whether they have values.
+    """
+    return self.cpp_obj.getVarList()
+    
+    
+  def getFloatVariable(self, vname):
+    """
+    Return the value of a floating point variable
+    """
+    return self.cpp_obj.getDoubleVariable(vname)
+    
+  def getSubWorldCount(self):
+    """
+    Return the number of subworlds in this splitworld
+    """
+    return self.cpp_obj.getSubWorldCount()
+
+  def getSubWorldID(self):
+    """
+    Return the id of the subworld which _this_ MPI process belongs to.
+    """
+    return self.cpp_obj.getSubWorldID()
+    
+    
+  def copyVariable(self, src, dest):
+    """
+    copy the contents of one splitworld variable into another
+    :var src: name of variable to copy from
+    :type src: `str`
+    :var dest: name of variable to copy to 
+    :type dest: `str`
+    """
+    self.cpp_obj.copyVariable(src, dest)
+    
 class Job(object):
   """
   Describes a sequence of work to be carried out in a subworld.
@@ -48,7 +162,7 @@ class Job(object):
   To do specific work, this class should be subclassed and the work() 
   (and possibly __init__ methods overloaded).
   The majority of the work done by the job will be in the *overloaded* work() method.
-  The work() method should retreive values from the outside using importValue() and pass values to
+  The work() method should retrieve values from the outside using importValue() and pass values to
   the rest of the system using exportValue().
   The rest of the methods should be considered off limits.
   """
@@ -69,11 +183,6 @@ class Job(object):
     self.swid=kwargs["swid"]    # which subworld are we running in?
     
     
-  def wantValue(self, name):
-    """
-    Register your interest in importing a variable with the given name
-    """
-    self.wantedvalues.append(name)
     
   def setImportValue(self, name, v):
     """
@@ -81,19 +190,9 @@ class Job(object):
     :var name: label used to identify this import
     :type name: ``str``
     :var v: value to be imported
-    :type v: ?
+    :type v: python object
     """
     self.importedvalues[name]=v
-  
-  
-  def getExportValue(self, name):
-    """
-    get value exported by work()  [called from outside the job]
-    """
-    if name in self.exportedvalues:
-        return self.exportedvalues[name]
-    else:
-        return None
         
   def exportValue(self, name, v):
     """
@@ -102,7 +201,15 @@ class Job(object):
     For use inside the work() method.
     :var name: registered label for exported value
     :type name: ``str``
+    :var v: value to be imported
+    :type v: python object
     """
+    if type(name)==type([]):
+        for x in name:
+          if type(x)!=type(""):
+            raise RuntimeError("Variable name must be a string or list of strings- instead got [%s]"%(str(type(x))))
+    elif type(name)!=type(""):
+      raise RuntimeError("Variable name must be a string or list of strings- instead got %s"%(str(type(name))))
     self.exportedvalues[name]=v
     
   def importValue(self, name):
@@ -129,7 +236,7 @@ class Job(object):
     """
     self.importedvalues.clear()
     
-  def requestImport(self, name):
+  def declareImport(self, name):
     """
     Adds name to the list of imports
     """
@@ -137,32 +244,34 @@ class Job(object):
       raise ValueError("Imports must be identified with non-empty strings")
     if not name in self.wantedvalues:
       self.wantedvalues+=name
-    
+
   def work(self):
     """
     Need to be overloaded for the job to actually do anthing.
-    A return value of True, indicates this job thinks it is done.
+    A return value of True indicates this job thinks it is done.
     A return value of False indicates work still to be done
     """
-    return True
+    raise RuntimeError("work() function not overridden as required")
 
 class FunctionJob(Job):
   """
-  Takes a python function (with only keyword params) to be called as the work method
+  Takes a python function (with only self and keyword params) to be called as the work method
   """
   def __init__(self, fn, *args, **kwargs):
     super(FunctionJob, self).__init__(*args, **kwargs)
     self.__fn__ = fn
+    if fn is None:
+      raise ValueError("Attempt to create a Function Job with no function to run (fn argument missing).")
     self.__calldict__ = kwargs
     if "imports" in kwargs:
       if isinstance(kwargs["imports"], str):
-        self.requestImport(kwargs["imports"])
+        self.declareImport(kwargs["imports"])
       else:
         for n in kwargs["imports"]:
-          self.requestImport(n)
+          self.declareImport(n)
 
   def work(self):
     self.__fn__(self, **self.__calldict__)
     return True
 
-    
\ No newline at end of file
+    
diff --git a/escriptcore/py_src/start.py b/escriptcore/py_src/start.py
index 937fe85..1521a83 100644
--- a/escriptcore/py_src/start.py
+++ b/escriptcore/py_src/start.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/symbolic/__init__.py b/escriptcore/py_src/symbolic/__init__.py
index f89776d..b9b378e 100644
--- a/escriptcore/py_src/symbolic/__init__.py
+++ b/escriptcore/py_src/symbolic/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/symbolic/evaluator.py b/escriptcore/py_src/symbolic/evaluator.py
index 057618a..7c69581 100644
--- a/escriptcore/py_src/symbolic/evaluator.py
+++ b/escriptcore/py_src/symbolic/evaluator.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -162,7 +164,7 @@ class Evaluator(object):
 
     def __str__(self):
         ret="\n".join([str(e) for e in self.expressions])+"\n"
-        for k in self._subsdict:
+        for k in sorted(self._subsdict.keys()):
             v=self._subsdict[k]
             if v.__class__.__name__=="Data":
                 ret+="%s=<Data object>"%k
diff --git a/escriptcore/py_src/symbolic/functions.py b/escriptcore/py_src/symbolic/functions.py
index cec5ec3..b5e6707 100644
--- a/escriptcore/py_src/symbolic/functions.py
+++ b/escriptcore/py_src/symbolic/functions.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/symbolic/pretty.py b/escriptcore/py_src/symbolic/pretty.py
index 0b731e8..c757bfd 100644
--- a/escriptcore/py_src/symbolic/pretty.py
+++ b/escriptcore/py_src/symbolic/pretty.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/symbolic/symbol.py b/escriptcore/py_src/symbolic/symbol.py
index f68cd37..8b7600a 100644
--- a/escriptcore/py_src/symbolic/symbol.py
+++ b/escriptcore/py_src/symbolic/symbol.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/symbolic/symconstants.py b/escriptcore/py_src/symbolic/symconstants.py
index 5620cbe..301a4ff 100644
--- a/escriptcore/py_src/symbolic/symconstants.py
+++ b/escriptcore/py_src/symbolic/symconstants.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 from .symbol import Symbol
 import sympy
 pi=Symbol(sympy.pi)
diff --git a/escriptcore/py_src/symbolic/utils.py b/escriptcore/py_src/symbolic/utils.py
index 5f67207..1de9cc6 100644
--- a/escriptcore/py_src/symbolic/utils.py
+++ b/escriptcore/py_src/symbolic/utils.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/testing.py b/escriptcore/py_src/testing.py
index ff36169..7cd72a4 100644
--- a/escriptcore/py_src/testing.py
+++ b/escriptcore/py_src/testing.py
@@ -50,6 +50,8 @@ Printing the list of skipped tests::
 
 """
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2014-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/unitsSI.py b/escriptcore/py_src/unitsSI.py
index 8143ccc..d426b8b 100644
--- a/escriptcore/py_src/unitsSI.py
+++ b/escriptcore/py_src/unitsSI.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 """
 
     :synopsis: some tools supporting physical units and conversion
diff --git a/escriptcore/py_src/utestselect.py b/escriptcore/py_src/utestselect.py
index a4f4d6b..9985696 100644
--- a/escriptcore/py_src/utestselect.py
+++ b/escriptcore/py_src/utestselect.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2014-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/py_src/util.py b/escriptcore/py_src/util.py
index 03dbd70..ac23865 100644
--- a/escriptcore/py_src/util.py
+++ b/escriptcore/py_src/util.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -179,13 +181,13 @@ def saveDataCSV(filename, append=False, sep=", ", csep="_", **data):
     """
     # find a function space:
     fs = None
-    for n,d in list(data.items()):
+    for n,d in sorted(data.items(), key=lambda x: x[0]):
         if isinstance(d, Data): fs=d.getFunctionSpace()
     if fs is None:
         raise ValueError("saveDataCSV: there must be at least one Data object in the argument list.")
     
     new_data={}
-    for n,d in list(data.items()):
+    for n,d in sorted(data.items(), key=lambda x: x[0]):
         if isinstance(d, Data):
             new_data[n]=d
         else:
@@ -250,7 +252,7 @@ def saveESD(datasetName, dataDir=".", domain=None, timeStep=0, deltaT=1, dynamic
            file is updated in each iteration.
     """
     new_data = {}
-    for n,d in list(data.items()):
+    for n,d in sorted(data.items(), key=lambda x: x[0]):
           if not d.isEmpty(): 
             fs = d.getFunctionSpace() 
             domain2 = fs.getDomain()
@@ -292,7 +294,7 @@ def saveESD(datasetName, dataDir=".", domain=None, timeStep=0, deltaT=1, dynamic
         outputString += "N=%d\n" % domain.getMPISize()
 
     # now add the variables
-    for varName, d in list(new_data.items()):
+    for varName,d in sorted(new_data.items(), key=lambda x: x[0]):
         varFile = datasetName+"_"+varName+".%s"%timeStepFormat
         d.dump(os.path.join(dataDir, (varFile + ".nc") % fileNumber))
         if domain.onMasterProcessor():
diff --git a/escriptcore/src/AbstractReducer.cpp b/escriptcore/src/AbstractReducer.cpp
index ea7b5bd..88fa2f6 100644
--- a/escriptcore/src/AbstractReducer.cpp
+++ b/escriptcore/src/AbstractReducer.cpp
@@ -46,3 +46,13 @@ void AbstractReducer::clear()
     valueadded=false;
 }
 
+void AbstractReducer::newRunJobs()
+{
+    had_an_export_this_round=false;
+}
+
+bool AbstractReducer::canClash()
+{
+    return false;
+}
+
diff --git a/escriptcore/src/AbstractReducer.h b/escriptcore/src/AbstractReducer.h
index e61c390..627631c 100644
--- a/escriptcore/src/AbstractReducer.h
+++ b/escriptcore/src/AbstractReducer.h
@@ -57,12 +57,15 @@ public:
 	// same communicator requirements for reduceRemoteValues
 	// Must give the same answer when called on any process in the subworlds
 	// Must only be called on 
-    virtual bool checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::string& errstring)=0; 
+    virtual bool checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::string& errstring)=0;
+	// Some reducers need to know what domain they are operating in
+    virtual void setDomain(Domain_ptr dom){} 
     
 
 #ifdef ESYS_MPI  
 	// send from proc 0 in the communicator to all others
-    virtual bool groupSend(MPI_Comm& com)=0;
+	// second param is true if we have rank o
+    virtual bool groupSend(MPI_Comm& com, bool imsending)=0;
     
 	// reduction with some procs submitting identity values
     virtual bool groupReduce(MPI_Comm& com, char mystate)=0;  
@@ -74,12 +77,16 @@ public:
 	// Must only be called on participating SubWorlds
 	// the mpi_info holds a communicator linking corresponding processes
 	// in every participating subworld
-    virtual bool reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)=0;
+    virtual bool reduceRemoteValues(MPI_Comm& comm)=0;
     
 	// true if at least one localValue has been added
 	// used to check if this subworld should participate in remote merges
     bool hasValue();
     
+	// true if reductions could fail for some reason other than MPI failure
+	// for example SET type variables 
+    virtual bool canClash();
+    
 	// Get a value for this variable from another process
 	// This is not a reduction and will replace any existing value
     virtual bool recvFrom(Esys_MPI_rank localid, Esys_MPI_rank source, esysUtils::JMPI& mpiinfo)=0;
@@ -92,10 +99,17 @@ public:
    
     virtual boost::python::object getPyObj()=0; 
     
+	// notify the reducer that a new runJobs() call 
+	// is being executed
+    virtual void newRunJobs();
+
     virtual void clear();
+
+    virtual void copyValueFrom(boost::shared_ptr<AbstractReducer>& src)=0;
 protected:
 
     bool valueadded;
+    bool had_an_export_this_round;
     static const int PARAMTAG;    
 };
 
diff --git a/escriptcore/src/Data.cpp b/escriptcore/src/Data.cpp
index efc7c95..1118f1c 100644
--- a/escriptcore/src/Data.cpp
+++ b/escriptcore/src/Data.cpp
@@ -249,7 +249,7 @@ Data::Data()
 }
 
 Data::Data(double value,
-           const bp::tuple& shape,
+           const boost::python::tuple& shape,
            const FunctionSpace& what,
            bool expanded)
     : m_shared(false), m_lazy(false)
@@ -371,7 +371,7 @@ Data::Data(const DataTypes::ValueType& value,
 }
 
 
-Data::Data(const bp::object& value,
+Data::Data(const boost::python::object& value,
            const FunctionSpace& what,
            bool expanded)
         : m_shared(false), m_lazy(false)
@@ -391,7 +391,7 @@ Data::Data(const WrappedArray& w, const FunctionSpace& what,
 }
 
 
-Data::Data(const bp::object& value,
+Data::Data(const boost::python::object& value,
            const Data& other)
         : m_shared(false), m_lazy(false)
 {
@@ -581,7 +581,7 @@ Data::setToZero()
 {
     if (isEmpty())
     {
-        throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+        throw DataException("Error - Operations (setToZero)  permitted on instances of DataEmpty.");
     }
     if (isLazy())
     {
@@ -2326,7 +2326,7 @@ Data::operator+=(const Data& right)
 }
 
 Data&
-Data::operator+=(const bp::object& right)
+Data::operator+=(const boost::python::object& right)
 {
     if (isProtected()) {
         throw DataException("Error - attempt to update protected Data object.");
@@ -2358,7 +2358,7 @@ Data::operator-=(const Data& right)
 }
 
 Data&
-Data::operator-=(const bp::object& right)
+Data::operator-=(const boost::python::object& right)
 {
     if (isProtected()) {
         throw DataException("Error - attempt to update protected Data object.");
@@ -2381,7 +2381,7 @@ Data::operator*=(const Data& right)
 }
 
 Data&
-Data::operator*=(const bp::object& right)
+Data::operator*=(const boost::python::object& right)
 {  
     if (isProtected()) {
         throw DataException("Error - attempt to update protected Data object.");
@@ -2404,7 +2404,7 @@ Data::operator/=(const Data& right)
 }
 
 Data&
-Data::operator/=(const bp::object& right)
+Data::operator/=(const boost::python::object& right)
 {
     if (isProtected()) {
         throw DataException("Error - attempt to update protected Data object.");
@@ -2504,7 +2504,7 @@ escript::operator/(const Data& left, const Data& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator+(const Data& left, const bp::object& right)
+escript::operator+(const Data& left, const boost::python::object& right)
 {
     Data tmp(right,left.getFunctionSpace(),false);
     MAKELAZYBIN2(left,tmp,ADD);
@@ -2514,7 +2514,7 @@ escript::operator+(const Data& left, const bp::object& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator-(const Data& left, const bp::object& right)
+escript::operator-(const Data& left, const boost::python::object& right)
 {
     Data tmp(right,left.getFunctionSpace(),false);
     MAKELAZYBIN2(left,tmp,SUB);
@@ -2524,7 +2524,7 @@ escript::operator-(const Data& left, const bp::object& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator*(const Data& left, const bp::object& right)
+escript::operator*(const Data& left, const boost::python::object& right)
 {
     Data tmp(right,left.getFunctionSpace(),false);
     MAKELAZYBIN2(left,tmp,MUL);
@@ -2534,7 +2534,7 @@ escript::operator*(const Data& left, const bp::object& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator/(const Data& left, const bp::object& right)
+escript::operator/(const Data& left, const boost::python::object& right)
 {
     Data tmp(right,left.getFunctionSpace(),false);
     MAKELAZYBIN2(left,tmp,DIV);
@@ -2544,7 +2544,7 @@ escript::operator/(const Data& left, const bp::object& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator+(const bp::object& left, const Data& right)
+escript::operator+(const boost::python::object& left, const Data& right)
 {
     Data tmp(left,right.getFunctionSpace(),false);
     MAKELAZYBIN2(tmp,right,ADD);
@@ -2554,7 +2554,7 @@ escript::operator+(const bp::object& left, const Data& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator-(const bp::object& left, const Data& right)
+escript::operator-(const boost::python::object& left, const Data& right)
 {
     Data tmp(left,right.getFunctionSpace(),false);
     MAKELAZYBIN2(tmp,right,SUB);
@@ -2564,7 +2564,7 @@ escript::operator-(const bp::object& left, const Data& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator*(const bp::object& left, const Data& right)
+escript::operator*(const boost::python::object& left, const Data& right)
 {
     Data tmp(left,right.getFunctionSpace(),false);
     MAKELAZYBIN2(tmp,right,MUL);
@@ -2574,7 +2574,7 @@ escript::operator*(const bp::object& left, const Data& right)
 //
 // NOTE: It is essential to specify the namespace this operator belongs to
 Data
-escript::operator/(const bp::object& left, const Data& right)
+escript::operator/(const boost::python::object& left, const Data& right)
 {
     Data tmp(left,right.getFunctionSpace(),false);
     MAKELAZYBIN2(tmp,right,DIV);
@@ -4374,7 +4374,7 @@ size_t Data::getNumberOfTaggedValues() const
 }
 
 
-Data escript::randomData(const bp::tuple& shape,
+Data escript::randomData(const boost::python::tuple& shape,
        const FunctionSpace& what,
        long seed, const boost::python::tuple& filter)
 {
diff --git a/escriptcore/src/Data.h b/escriptcore/src/Data.h
index 35763d8..93df06e 100644
--- a/escriptcore/src/Data.h
+++ b/escriptcore/src/Data.h
@@ -1858,6 +1858,9 @@ template <class BinaryOp>
 		DataAbstract* t=m_data->deepCopy();
    		set_m_data(DataAbstract_ptr(t));
 	}
+#ifdef EXWRITECHK		
+	m_data->exclusivewritecalled=true;
+#endif	
   }
 
   /**
@@ -1972,6 +1975,12 @@ Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
    {
 	throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
    }
+#ifdef EXWRITECHK
+   if (!getReady()->exclusivewritecalled)
+   {
+        throw DataException("Error, call to Data::getSampleDataRW without a preceeding call to requireWrite/exclusiveWrite.");
+   }
+#endif
    return getReady()->getSampleDataRW(sampleNo);
 }
 
@@ -2279,7 +2288,7 @@ Data::algorithm(BinaryFunction operation, double initial_value) const
     EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
     return escript::algorithm(*leftC,operation,initial_value);
   } else if (isEmpty()) {
-    throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+    throw DataException("Error - Operations (algorithm) not permitted on instances of DataEmpty.");
   } else if (isLazy()) {
     throw DataException("Error - Operations not permitted on instances of DataLazy.");
   } else {
@@ -2301,7 +2310,7 @@ Data
 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
 {
   if (isEmpty()) {
-    throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+    throw DataException("Error - Operations (dp_algorithm) not permitted on instances of DataEmpty.");
   } 
   else if (isExpanded()) {
     Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
@@ -2352,7 +2361,7 @@ C_TensorBinaryOperation(Data const &arg_0,
 {
   if (arg_0.isEmpty() || arg_1.isEmpty())
   {
-     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+     throw DataException("Error - Operations (C_TensorBinaryOperation) not permitted on instances of DataEmpty.");
   }
   if (arg_0.isLazy() || arg_1.isLazy())
   {
@@ -3160,7 +3169,7 @@ C_TensorUnaryOperation(Data const &arg_0,
 {
   if (arg_0.isEmpty())	// do this before we attempt to interpolate
   {
-     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+     throw DataException("Error - Operations (C_TensorUnaryOperation) not permitted on instances of DataEmpty.");
   }
   if (arg_0.isLazy())
   {
diff --git a/escriptcore/src/DataAbstract.cpp b/escriptcore/src/DataAbstract.cpp
index b16b939..c7fcd83 100644
--- a/escriptcore/src/DataAbstract.cpp
+++ b/escriptcore/src/DataAbstract.cpp
@@ -103,6 +103,10 @@ DataAbstract::DataAbstract(const FunctionSpace& what, const ShapeType& shape, bo
     m_rank(DataTypes::getRank(shape))
 
 {
+#ifdef EXWRITECHK
+    exclusivewritecalled=false;
+#endif  
+  
     m_isempty=isDataEmpty;
     if (m_rank>ESCRIPT_MAX_DATA_RANK)
     {
diff --git a/escriptcore/src/DataAbstract.h b/escriptcore/src/DataAbstract.h
index 96bcfec..a7a566c 100644
--- a/escriptcore/src/DataAbstract.h
+++ b/escriptcore/src/DataAbstract.h
@@ -475,6 +475,15 @@ class ESCRIPT_DLL_API DataAbstract : public REFCOUNT_BASE_CLASS(DataAbstract)
 	return m_lazyshared || (m_owners.size()>1);
   }
 
+#ifdef EXWRITECHK
+  bool exclusivewritecalled;	// used to check for some potential programming faults 
+				// involving shared data.
+				// This flag only asserts that exclusive write has been called
+				// on this object, it does not definitively guarantee that
+				// sharing has not occurred since that call
+				// This flag is for internal use only may be removed without warning
+#endif
+  
 protected:
     /**
     \brief Returns true if this object is not shared.
@@ -554,7 +563,7 @@ DataAbstract::getNumDPPSample() const
 {
   if (isEmpty())
   {
-     	throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+     	throw DataException("Error - Operations (getNumDPPSample) not permitted on instances of DataEmpty.");
   }
   return m_noDataPointsPerSample;
 }
@@ -565,7 +574,7 @@ DataAbstract::getNumSamples() const
 {
   if (isEmpty())
   {
-     	throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+     	throw DataException("Error - Operations (getNumSamples) not permitted on instances of DataEmpty.");
   }
   return m_noSamples;
 }
@@ -584,7 +593,7 @@ DataAbstract::getShape() const
 {
 	if (isEmpty())
 	{
-		throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+		throw DataException("Error - Operations (getShape) not permitted on instances of DataEmpty.");
 	}
 	return m_shape;
 }
@@ -595,7 +604,7 @@ DataAbstract::getRank() const
 {
 	if (isEmpty())
 	{
-		throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+		throw DataException("Error - Operations (getRank) not permitted on instances of DataEmpty.");
 	}
 	return m_rank;
 }
@@ -606,7 +615,7 @@ DataAbstract::getNoValues() const
 {	
 	if (isEmpty())
 	{
-		throw DataException("Error - Operations not permitted on instances of DataEmpty.");
+		throw DataException("Error - Operations (getNoValues) not permitted on instances of DataEmpty.");
 	}
 	return m_novalues;
 }
diff --git a/escriptcore/src/DataExpanded.h b/escriptcore/src/DataExpanded.h
index 0dfaadf..ff6bf26 100644
--- a/escriptcore/src/DataExpanded.h
+++ b/escriptcore/src/DataExpanded.h
@@ -381,23 +381,15 @@ TODO Note that this constructor will also copy data to all points if it only con
   eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol=1.e-13);
 
 /**
- *      \brief
- *           reorders data sample ordered by reference_ids to the ordering of the functions space
- *
- *                \param reference_ids - Input - reference_ids used for current ordering
- *                  */
+    \brief
+    reorders data sample ordered by reference_ids to the ordering of the functions space
+
+    \param reference_ids - Input - reference_ids used for current ordering
+*/
   ESCRIPT_DLL_API
   virtual void
   reorderByReferenceIDs(int *reference_ids);
 
-//   /**
-//   \brief Fill the object with random values
-// 
-//   \param seed - random seed
-//   */
-//   ESCRIPT_DLL_API
-//   void randomFill(long seed);
-
  protected:
 
  private:
diff --git a/escriptcore/src/LocalOps.h b/escriptcore/src/LocalOps.h
index baa2d02..3a871cd 100644
--- a/escriptcore/src/LocalOps.h
+++ b/escriptcore/src/LocalOps.h
@@ -417,14 +417,14 @@ void  eigenvalues_and_eigenvectors3(const double A00, const double A01, const do
         }
       } else {
          eigenvalues3(A00,A01,A02,A11,A12,A22,ev0,ev1,ev2);
-         const register double absev0=fabs(*ev0);
-         const register double absev1=fabs(*ev1);
-         const register double absev2=fabs(*ev2);
-         register double max_ev=absev0>absev1 ? absev0 : absev1;
+         const double absev0=fabs(*ev0);
+         const double absev1=fabs(*ev1);
+         const double absev2=fabs(*ev2);
+         double max_ev=absev0>absev1 ? absev0 : absev1;
          max_ev=max_ev>absev2 ? max_ev : absev2;
-         const register double d_01=fabs((*ev0)-(*ev1));
-         const register double d_12=fabs((*ev1)-(*ev2));
-         const register double max_d=d_01>d_12 ? d_01 : d_12;
+         const double d_01=fabs((*ev0)-(*ev1));
+         const double d_12=fabs((*ev1)-(*ev2));
+         const double max_d=d_01>d_12 ? d_01 : d_12;
          if (max_d<=tol*max_ev) {
              *V00=1.;
              *V10=0;
@@ -436,8 +436,8 @@ void  eigenvalues_and_eigenvectors3(const double A00, const double A01, const do
              *V12=0;
              *V22=1.;
          } else {
-            const register double S00=A00-(*ev0);
-            const register double absS00=fabs(S00);
+            const double S00=A00-(*ev0);
+            const double absS00=fabs(S00);
             if (absS00>m) {
                 vectorInKernel3__nonZeroA00(S00,A01,A02,A01,A11-(*ev0),A12,A02,A12,A22-(*ev0),V00,V10,V20);
             } else if (absA02<m) {
@@ -446,8 +446,8 @@ void  eigenvalues_and_eigenvectors3(const double A00, const double A01, const do
                 vectorInKernel3__nonZeroA00(A02,A12,A22-(*ev0),S00,A01,A02,A01,A11-(*ev0),A12,V00,V10,V20);
             }
             normalizeVector3(V00,V10,V20);;
-            const register double T00=A00-(*ev2);
-            const register double absT00=fabs(T00);
+            const double T00=A00-(*ev2);
+            const double absT00=fabs(T00);
             if (absT00>m) {
                  vectorInKernel3__nonZeroA00(T00,A01,A02,A01,A11-(*ev2),A12,A02,A12,A22-(*ev2),V02,V12,V22);
             } else if (absA02<m) {
@@ -455,7 +455,7 @@ void  eigenvalues_and_eigenvectors3(const double A00, const double A01, const do
             } else {
                  vectorInKernel3__nonZeroA00(A02,A12,A22-(*ev2),T00,A01,A02,A01,A11-(*ev2),A12,V02,V12,V22);
             }
-            const register double dot=(*V02)*(*V00)+(*V12)*(*V10)+(*V22)*(*V20);
+            const double dot=(*V02)*(*V00)+(*V12)*(*V10)+(*V22)*(*V20);
             *V02-=dot*(*V00);
             *V12-=dot*(*V10);
             *V22-=dot*(*V20);
diff --git a/escriptcore/src/MPIDataReducer.cpp b/escriptcore/src/MPIDataReducer.cpp
index 480d7dc..adc8d66 100644
--- a/escriptcore/src/MPIDataReducer.cpp
+++ b/escriptcore/src/MPIDataReducer.cpp
@@ -38,6 +38,10 @@ Reducer_ptr makeDataReducer(std::string type)
     {
 	op=MPI_SUM;
     }
+    else if (type=="SET")
+    {
+	op=MPI_OP_NULL;
+    }
     else
     {
 	throw SplitWorldException("Unsupported operation for makeDataReducer.");
@@ -56,19 +60,22 @@ void combineData(Data& d1, const Data& d2, MPI_Op op)
     if (op==MPI_SUM)
     {
 	d1+=d2;
+    } 
+    else if (op==MPI_OP_NULL) 
+    {
+	throw SplitWorldException("Multiple 'simultaneous' attempts to export a 'SET' variable.");
     }
 }
 
 }
 
 MPIDataReducer::MPIDataReducer(MPI_Op op)
-  : reduceop(op)
+  : reduceop(op), had_an_export_this_round(false)
 {
     valueadded=false;
-    if (op==MPI_SUM)
+    if ((op==MPI_SUM) || (op==MPI_OP_NULL))
     {
 	// deliberately left blank
-	throw SplitWorldException("Unsupported MPI_Op");
     }
     else
     {
@@ -76,6 +83,10 @@ MPIDataReducer::MPIDataReducer(MPI_Op op)
     }
 }
 
+void MPIDataReducer::newRunJobs()
+{
+    had_an_export_this_round=false;
+}
 
 void MPIDataReducer::setDomain(escript::Domain_ptr d)
 {
@@ -85,6 +96,10 @@ void MPIDataReducer::setDomain(escript::Domain_ptr d)
 std::string MPIDataReducer::description()
 {
     std::string op="SUM";
+    if (reduceop==MPI_OP_NULL)
+    {
+	op="SET";
+    }
     return "Reducer("+op+") for Data objects"; 
 }
 
@@ -116,25 +131,48 @@ bool MPIDataReducer::reduceLocalValue(boost::python::object v, std::string& errs
 	return false;
     }
     Data& d=ex();
+    if (d.isEmpty())
+    {
+	errstring="reduceLocalValue: Got an empty Data object. Not allowed to reduce those.";
+	return false;
+    }
     if ((d.getDomain()!=dom) && (dom.get()!=0))
     {
 	errstring="reduceLocalValue: Got a Data object, but it was not using the SubWorld's domain.";
 	return false;
     }
     d.expand();		// because I don't want to mess about with types of Data
-    if (!valueadded)	// first value so answer becomes this one
+    if (!valueadded || !had_an_export_this_round)	// first value so answer becomes this one
     {
 	value=d;
 	dom=d.getDomain();
+        had_an_export_this_round=true;
+	valueadded=true;
     }
     else
     {
-	if (d.getFunctionSpace()!=value.getFunctionSpace())
+	if (reduceop==MPI_OP_NULL)
 	{
-	    errstring="reduceLocalValue: FunctionSpaces for Data objects being combined must match.";
-	    return false;
+	    if (had_an_export_this_round) 
+	    {
+		reset();
+		errstring="reduceLocalValue: Multiple 'simultaneous' attempts to export a 'SET' variable.";
+		return false;
+	    }
+	    value=d;
+	    dom=d.getDomain();
+	    had_an_export_this_round=true;
 	}
-	combineData(value, d, reduceop);
+        else
+        { 
+	    had_an_export_this_round=true;
+	    if (d.getFunctionSpace()!=value.getFunctionSpace())
+	    {
+	        errstring="reduceLocalValue: FunctionSpaces for Data objects being combined must match.";
+	        return false;
+	    }
+	    combineData(value, d, reduceop);
+        }
     }
     return true;
 }
@@ -167,9 +205,13 @@ bool MPIDataReducer::checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::st
 	errstring="MPI failure in checkRemoteCompatibility.";
 	return false;
     }
-    for (int i=0;i<mpi_info->size-1;++i)
+    for (int i=0;i<(mpi_info->size-1);++i)
     {
-	for (int j=0;j<compat.size();++i)
+	if ((rbuff[i*compat.size()]==1) || (rbuff[(i+1)*compat.size()]==1))	// one of them doesn't have a value
+	{
+	    continue;
+	}
+	for (int j=0;j<compat.size();++j)
 	{
 	    if (rbuff[i*compat.size()+j]!=rbuff[(i+1)*compat.size()+j])
 	    {
@@ -188,20 +230,22 @@ bool MPIDataReducer::checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::st
 
 // By the time this function is called, we know that all the values 
 // are compatible
-bool MPIDataReducer::reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)
+bool MPIDataReducer::reduceRemoteValues(MPI_Comm& comm)
 {
-    if (!active)
-    {
-	return false;	// shutting down this option until I implement it
-    }
 #ifdef ESYS_MPI
     DataTypes::ValueType& vr=value.getExpandedVectorReference();
     Data result(0, value.getDataPointShape(), value.getFunctionSpace(), true);
-    DataTypes::ValueType& rr=value.getExpandedVectorReference();
-    if (MPI_Allreduce(&(vr[0]), &(rr[0]), vr.size(), MPI_DOUBLE, reduceop, mpi_info->comm)!=MPI_SUCCESS)
+    DataTypes::ValueType& rr=result.getExpandedVectorReference();
+    if (reduceop==MPI_OP_NULL)
+    {
+	reset();	// we can't be sure what the value should be
+	return false;		// this will stop bad things happening but won't give an informative error message
+    }
+    if (MPI_Allreduce(&(vr[0]), &(rr[0]), vr.size(), MPI_DOUBLE, reduceop, comm)!=MPI_SUCCESS)
     {
 	return false;
     }
+    value=result;
     return true;
 #else
     return true;
@@ -211,7 +255,7 @@ bool MPIDataReducer::reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)
 // populate a vector of ints with enough information to ensure two values are compatible
 // or to construct a container for incomming data
 // Format for this:
-//  [0]    Type of Data:  {0 : error,  1: DataEmpty, 10: constant, 11:tagged, 12:expanded}
+//  [0]    Type of Data:  {0 : error,  1:no value, 10: constant, 11:tagged, 12:expanded}
 //  [1]    Functionspace type code
 //  [2]    Only used for tagged --- gives the number of tags (which exist in the data object)
 //  [3..6] Components of the shape  
@@ -222,6 +266,11 @@ void MPIDataReducer::getCompatibilityInfo(std::vector<unsigned>& params)
     {
 	params[0]=0;
     }
+    if (!valueadded)
+    {
+	params[0]=1;
+	return;
+    }
     if (value.isConstant())
     {
 	params[0]=10;
@@ -237,6 +286,7 @@ void MPIDataReducer::getCompatibilityInfo(std::vector<unsigned>& params)
     else	// This could be DataEmpty or some other weirdness but we won't allow that
     {
 	params[0]=0;	// invalid type to send
+	return;
     }    
     params[1]=value.getFunctionSpace().getTypeCode();
     params[2]=static_cast<unsigned>(value.getNumberOfTaggedValues());    
@@ -252,7 +302,7 @@ void MPIDataReducer::getCompatibilityInfo(std::vector<unsigned>& params)
 	// This is not a reduction and will replace any existing value
 bool MPIDataReducer::recvFrom(Esys_MPI_rank localid, Esys_MPI_rank source, esysUtils::JMPI& mpiinfo)
 {
-#ifdef ESYS_MPI  
+#ifdef ESYS_MPI 
       // first we need to find out what we are expecting
     unsigned params[7];
     MPI_Status stat;
@@ -301,6 +351,10 @@ bool MPIDataReducer::recvFrom(Esys_MPI_rank localid, Esys_MPI_rank source, esysU
 	// This is not a reduction and will replace any existing value    
 bool MPIDataReducer::sendTo(Esys_MPI_rank localid, Esys_MPI_rank target, esysUtils::JMPI& mpiinfo)
 {
+      if (!valueadded)
+      {
+	  return false;		// May be misinterpreted as an MPI failure
+      }
 #ifdef ESYS_MPI  
       // first step is to let the other world know what sort of thing it needs to make
       if (value.isLazy())
@@ -338,18 +392,132 @@ bool MPIDataReducer::sendTo(Esys_MPI_rank localid, Esys_MPI_rank target, esysUti
 
 boost::python::object MPIDataReducer::getPyObj()
 {
-    throw SplitWorldException("getPyObj Not implemented yet.");
+    boost::python::object o(value);
+    return o;
 }
 
 
 	// send from proc 0 in the communicator to all others
-bool MPIDataReducer::groupSend(MPI_Comm& com)
+	// second argument is true if this rank is sending
+bool MPIDataReducer::groupSend(MPI_Comm& comm, bool imsending)
 {
-    throw SplitWorldException("groupSend Not implemented yet.");
+      if (dom.get()==0)
+      {
+	  return 0;	// trying to avoid throwing here
+			// this will still cause a lockup if it happens
+      }
+#ifdef ESYS_MPI
+      if (imsending)
+      {
+	  // first step is to let the other world know what sort of thing it needs to make
+	  if (value.isLazy())
+	  {
+	      value.resolve();
+	  }
+	  std::vector<unsigned> params;
+	  getCompatibilityInfo(params);
+	  if (MPI_Bcast(&params[0], params.size(), MPI_UNSIGNED, 0,comm)!=MPI_SUCCESS)
+	  {
+	      return false;
+	  }
+	    // now we have informed the other end of what happened
+	    // are we done or is there actually data to send
+	  if (params[0]<10)
+	  {
+	      return false;
+	  }
+	    // at this point, we know there is data to send
+	  const DataAbstract::ValueType::value_type* vect=value.getDataRO();
+	    // now the receiver knows how much data it should be receive
+	    // need to make sure that we aren't trying to send data with no local samples
+	  if (vect!=0)
+	  {
+	      if (MPI_Bcast(const_cast<DataAbstract::ValueType::value_type*>(vect), value.getLength(), MPI_DOUBLE, 0, comm)!=MPI_SUCCESS)
+	      {
+		  return false;
+	      }
+	  }
+      }
+      else	// we are receiving
+      {
+	
+	    // first we need to find out what we are expecting
+	  unsigned params[7];
+	  if (MPI_Bcast(params, 7, MPI_UNSIGNED, 0, comm)!=MPI_SUCCESS)
+	  {
+	      return false;
+	  }
+	  if (params[0]<10)	// the sender somehow tried to send something invalid
+	  {
+	      return false;
+	  }
+	    // now we put the shape object together
+	  escript::DataTypes::ShapeType s;
+	  for (int i=0;i<4;++i)
+	  {
+	      if (params[3+i]>0)
+	      {
+		  s.push_back(params[3+i]);
+	      }
+	      else
+	      {
+		  break;
+	      }
+	  }
+	    // Now we need the FunctionSpace
+	  FunctionSpace fs=FunctionSpace(dom, static_cast<int>(params[1]));
+	  value=Data(0, s, fs, params[0]==12);
+	  if (params[0]==11)	// The Data is tagged so we need to work out what tags we need
+	  {
+	      // TODO:  Need to ship the tags and names over but for now just make sure there
+	      // are the same number of tags
+	      value.tag();
+	      
+	      DataVector dv(DataTypes::noValues(s), 0, 1);
+	      for (unsigned i=0;i<params[2];++i)
+	      {
+		  value.setTaggedValueFromCPP(static_cast<int>(i)+1, s, dv, 0);
+	      }
+	      return false;	// because I don't trust this yet
+	  }
+	  DataAbstract::ValueType::value_type* vect=&(value.getExpandedVectorReference()[0]);
+	  if (MPI_Bcast(const_cast<DataAbstract::ValueType::value_type*>(vect), value.getLength(), MPI_DOUBLE, 0, comm)!=MPI_SUCCESS)
+	  {
+	      return false;
+	  }
+	  valueadded=true;
+      }
+#endif        
+    return true;
 }
 
+	// We assume compatible values at this point
 bool MPIDataReducer::groupReduce(MPI_Comm& com, char mystate)
 {
     throw SplitWorldException("groupReduce Not implemented yet.");
 }
 
+void MPIDataReducer::copyValueFrom(boost::shared_ptr<AbstractReducer>& src)
+{
+    MPIDataReducer* sr=dynamic_cast<MPIDataReducer*>(src.get());
+    if (sr==0)
+    {
+	throw SplitWorldException("Source and destination need to be the same reducer types.");
+    }
+    if (sr->value.isEmpty())
+    {
+	throw SplitWorldException("Attempt to copy DataEmpty.");
+    }
+    if (sr==this)
+    {
+	throw SplitWorldException("Source and destination can not be the same variable.");
+    }
+    value.copy(sr->value);    
+    valueadded=true;
+}
+
+bool MPIDataReducer::canClash()
+{
+    return (reduceop==MPI_OP_NULL);
+}
+
diff --git a/escriptcore/src/MPIDataReducer.h b/escriptcore/src/MPIDataReducer.h
index 9487a3e..d19dc39 100644
--- a/escriptcore/src/MPIDataReducer.h
+++ b/escriptcore/src/MPIDataReducer.h
@@ -43,7 +43,7 @@ public:
     void getCompatibilityInfo(std::vector<unsigned>& params);
     
       // talk to corresponding processes in other subworlds
-    bool reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active);
+    bool reduceRemoteValues(MPI_Comm& com);
     
       // human readable description
     std::string description();
@@ -58,15 +58,21 @@ public:
     virtual boost::python::object getPyObj();
 
 	// send from proc 0 in the communicator to all others
-    bool groupSend(MPI_Comm& com);
+    bool groupSend(MPI_Comm& com, bool imsending);
+
+    bool canClash();
     
 	// reduction with some procs submitting identity values
     bool groupReduce(MPI_Comm& com, char mystate);    
-    
+   
+
+    void newRunJobs();
+    void copyValueFrom(boost::shared_ptr<AbstractReducer>& src);
 private:    
     escript::Data value;
     escript::const_Domain_ptr dom;
     MPI_Op reduceop;
+    bool had_an_export_this_round;
 };
 
 Reducer_ptr makeDataReducer(std::string type);
diff --git a/escriptcore/src/MPIScalarReducer.cpp b/escriptcore/src/MPIScalarReducer.cpp
index f3840da..0456dfe 100644
--- a/escriptcore/src/MPIScalarReducer.cpp
+++ b/escriptcore/src/MPIScalarReducer.cpp
@@ -47,6 +47,10 @@ Reducer_ptr makeScalarReducer(std::string type)
     {
 	op=MPI_MIN;
     }
+    else if (type=="SET")
+    {
+	op=MPI_OP_NULL;
+    }
     else
     {
 	throw SplitWorldException("Unsupported operation for makeScalarReducer.");
@@ -74,16 +78,20 @@ void combineDouble(double& d1, const double d2, MPI_Op op)
     else if (op==MPI_MIN)
     {
 	d1=(d2<d1)?d2:d1;      
+    }
+    else if (op==MPI_OP_NULL) 
+    {
+	throw SplitWorldException("Multiple 'simultaneous' attempts to export a 'SET' variable.");
     }    
 }
 }
 
 
 MPIScalarReducer::MPIScalarReducer(MPI_Op op)
-  : reduceop(op)
+  : reduceop(op), had_an_export_this_round(false)
 {
     valueadded=false;
-    if (op==MPI_SUM)	// why not switch? because we don't know MPI_Op is scalar
+    if ((op==MPI_SUM) || (op==MPI_OP_NULL))	// why not switch? because we don't know MPI_Op is scalar
     {
 	identity=0;
     }
@@ -121,6 +129,10 @@ std::string MPIScalarReducer::description()
     {
 	op="MIN";
     }
+    else if (reduceop==MPI_OP_NULL)
+    {
+	op="SET";
+    }    
     else
     {
 	throw SplitWorldException("Unsupported MPI reduction operation");
@@ -128,6 +140,11 @@ std::string MPIScalarReducer::description()
     return "Reducer("+op+") for double scalars"; 
 }
 
+void MPIScalarReducer::newRunJobs()
+{
+    had_an_export_this_round=false;
+}
+
 bool MPIScalarReducer::valueCompatible(boost::python::object v)
 {
     extract<double> ex(v);
@@ -147,14 +164,30 @@ bool MPIScalarReducer::reduceLocalValue(boost::python::object v, std::string& er
 	errstring="reduceLocalValue: expected double value. Got something else.";
 	return false;
     }
-    if (!valueadded)	// first value so answer becomes this one
+    if (!valueadded || !had_an_export_this_round)	// first value so answer becomes this one
     {
 	value=ex();
 	valueadded=true;
+        had_an_export_this_round=true;
     }
     else
     {
-	combineDouble(value, ex(), reduceop);
+	if (reduceop==MPI_OP_NULL)
+	{
+	    if (had_an_export_this_round) 
+	    {
+		reset();
+		errstring="reduceLocalValue: Multiple 'simultaneous' attempts to export a 'SET' variable.";
+		return false;
+	    }
+	    value=ex();
+	}
+        else
+        { 
+	    combineDouble(value, ex(), reduceop);
+        }      
+        had_an_export_this_round=true;
+	
     }
     return true;
 }
@@ -172,19 +205,20 @@ bool MPIScalarReducer::checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::
 
 // By the time this function is called, we know that all the values 
 // are compatible
-bool MPIScalarReducer::reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)
+bool MPIScalarReducer::reduceRemoteValues(MPI_Comm& com)
 {
 #ifdef ESYS_MPI
-    if (!active)
-    {
-        value=identity;
+    if (reduceop==MPI_OP_NULL)
+    {	
+	reset();
+	return false;		// this will stop bad things happening but won't give an informative error message
     }
-std::cout << "Value in " << value << std::endl;    
-    if (MPI_Allreduce(&value, &value, 1, MPI_DOUBLE, reduceop, mpi_info->comm)!=MPI_SUCCESS)
+    double rvalue;
+    if (MPI_Allreduce(&value, &rvalue, 1, MPI_DOUBLE, reduceop, com)!=MPI_SUCCESS)
     {
 	return false;
     }
-std::cout << "Value out " << value << std::endl;    
+    value=rvalue;
     return true;
 #else
     return true;
@@ -246,7 +280,7 @@ boost::python::object MPIScalarReducer::getPyObj()
 #ifdef ESYS_MPI
 
 	// send from proc 0 in the communicator to all others
-bool MPIScalarReducer::groupSend(MPI_Comm& com)
+bool MPIScalarReducer::groupSend(MPI_Comm& com, bool imsending)
 {
     if (MPI_Bcast(&value, 1, MPI_DOUBLE, 0, com)==MPI_SUCCESS)
     {
@@ -259,6 +293,10 @@ bool MPIScalarReducer::groupSend(MPI_Comm& com)
 bool MPIScalarReducer::groupReduce(MPI_Comm& com, char mystate)
 {
     double answer=0;
+    if (reduceop==MPI_OP_NULL)
+    {
+	return false;
+    }
     if (MPI_Allreduce((mystate==reducerstatus::NEW)?&value:&identity, &answer, 1, MPI_DOUBLE, reduceop, com)==MPI_SUCCESS)
     {
 	value=answer;
@@ -269,3 +307,20 @@ bool MPIScalarReducer::groupReduce(MPI_Comm& com, char mystate)
 }
 
 #endif
+
+void MPIScalarReducer::copyValueFrom(boost::shared_ptr<AbstractReducer>& src)
+{
+    MPIScalarReducer* sr=dynamic_cast<MPIScalarReducer*>(src.get());
+    if (sr==0)
+    {
+	throw SplitWorldException("Source and destination need to be the same reducer types.");
+    }
+    value=sr->value;
+    valueadded=true;
+}
+
+bool MPIScalarReducer::canClash()
+{
+    return (reduceop==MPI_OP_NULL);
+}
+
diff --git a/escriptcore/src/MPIScalarReducer.h b/escriptcore/src/MPIScalarReducer.h
index d63c5a0..3e5f315 100644
--- a/escriptcore/src/MPIScalarReducer.h
+++ b/escriptcore/src/MPIScalarReducer.h
@@ -43,7 +43,7 @@ public:
     void getCompatibilityInfo(std::vector<unsigned>& params);
     
       // talk to corresponding processes in other subworlds
-    bool reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active);
+    bool reduceRemoteValues(MPI_Comm& com);
     
       // human readable description
     std::string description();
@@ -59,15 +59,23 @@ public:
     virtual boost::python::object getPyObj(); 
     
     	// send from proc 0 in the communicator to all others
-    bool groupSend(MPI_Comm& com);
+    bool groupSend(MPI_Comm& com, bool imsending);
+
+    bool canClash();    
     
 	// reduction with some procs submitting identity values
     bool groupReduce(MPI_Comm& com, char mystate);
     
+    void copyValueFrom(boost::shared_ptr<AbstractReducer>& src);    
+    
+    void newRunJobs();
+    
 private:    
     double value;
     MPI_Op reduceop;
     double identity;
+    bool had_an_export_this_round;
+    
 };
 
 
diff --git a/escriptcore/src/NonReducedVariable.cpp b/escriptcore/src/NonReducedVariable.cpp
index 80a2dc0..b71cffe 100644
--- a/escriptcore/src/NonReducedVariable.cpp
+++ b/escriptcore/src/NonReducedVariable.cpp
@@ -69,7 +69,7 @@ void NonReducedVariable::getCompatibilityInfo(std::vector<unsigned>& params)
     // empty
 }
 
-bool NonReducedVariable::reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)
+bool NonReducedVariable::reduceRemoteValues(MPI_Comm& mpi_info)
 {
     return true;
 }
@@ -99,7 +99,7 @@ boost::python::object NonReducedVariable::getPyObj()
     return value;
 }
 
-bool NonReducedVariable::groupSend(MPI_Comm& com)
+bool NonReducedVariable::groupSend(MPI_Comm& com, bool imsending)
 {
     return true;
 }
@@ -109,6 +109,18 @@ bool NonReducedVariable::groupReduce(MPI_Comm& com, char mystate)
     return true;
 }
 
+void NonReducedVariable::copyValueFrom(boost::shared_ptr<AbstractReducer>& src)
+{
+    NonReducedVariable* sr=dynamic_cast<NonReducedVariable*>(src.get());
+    if (sr==0)
+    {
+	throw SplitWorldException("Source and destination need to be the same reducer types.");
+    }
+    value=sr->value;
+    valueadded=true;
+}
+
+
 namespace escript
 {
 Reducer_ptr makeNonReducedVariable()
diff --git a/escriptcore/src/NonReducedVariable.h b/escriptcore/src/NonReducedVariable.h
index b6a8fd7..6281c8f 100644
--- a/escriptcore/src/NonReducedVariable.h
+++ b/escriptcore/src/NonReducedVariable.h
@@ -44,7 +44,7 @@ public:
     void getCompatibilityInfo(std::vector<unsigned>& params);
     
       // talk to corresponding processes in other subworlds
-    bool reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active);
+    bool reduceRemoteValues(MPI_Comm& mpi_info);
     
       // human readable description
     std::string description();
@@ -60,11 +60,13 @@ public:
     virtual boost::python::object getPyObj(); 
     
     	// send from proc 0 in the communicator to all others
-    bool groupSend(MPI_Comm& com);
+    bool groupSend(MPI_Comm& com, bool imsending);
     
 	// reduction with some procs submitting identity values
     bool groupReduce(MPI_Comm& com, char mystate);
     
+    void copyValueFrom(boost::shared_ptr<AbstractReducer>& src);
+    
 private:    
     boost::python::object value;
     boost::python::object identity;
diff --git a/escriptcore/src/SolverOptions.cpp b/escriptcore/src/SolverOptions.cpp
index 7561065..87a9848 100644
--- a/escriptcore/src/SolverOptions.cpp
+++ b/escriptcore/src/SolverOptions.cpp
@@ -457,7 +457,6 @@ void SolverBuddy::setSolverMethod(int method)
         case SO_METHOD_CGS:
         case SO_METHOD_CHOLEVSKY:
         case SO_METHOD_CR:
-        case SO_METHOD_DIRECT:
         case SO_METHOD_GMRES:
         case SO_METHOD_HRZ_LUMPING:
         case SO_METHOD_ITERATIVE:
@@ -470,6 +469,19 @@ void SolverBuddy::setSolverMethod(int method)
         case SO_METHOD_TFQMR:
             this->method = meth;
             break;
+        case SO_METHOD_DIRECT:
+#ifdef USE_UMFPACK
+            this->method = meth;
+            break;
+#elif defined MKL
+            this->method = meth;
+            break;
+#elif defined PASTIX
+            this->method = meth;
+            break;
+#else
+            throw SolverOptionsException("Cannot use DIRECT solver method, the running escript was not compiled with a direct solver enabled");
+#endif
         default:
             throw SolverOptionsException("unknown solver method");
     }
diff --git a/escriptcore/src/SplitWorld.cpp b/escriptcore/src/SplitWorld.cpp
index 4b292de..26a59d0 100644
--- a/escriptcore/src/SplitWorld.cpp
+++ b/escriptcore/src/SplitWorld.cpp
@@ -73,10 +73,15 @@ SplitWorld::SplitWorld(unsigned int numgroups, MPI_Comm global)
 	corrcom=esysUtils::makeInfo(corrsub,true);
 	
     #else
+	if (numgroups!=1)
+	{
+	    throw SplitWorldException("SplitWorld error: non-MPI builds can only create 1 subworld.");
+	  
+	}
 	subcom=esysUtils::makeInfo(0);
 	corrcom=esysUtils::makeInfo(0);
     #endif
-    localworld=SubWorld_ptr(new SubWorld(globalcom, subcom,corrcom, swcount, grank%wsize,manualimport));
+    localworld=SubWorld_ptr(new SubWorld(globalcom, subcom,corrcom, swcount, grank/wsize,manualimport));
     localid=grank/wsize;
 }
 
@@ -116,7 +121,8 @@ object SplitWorld::buildDomains(tuple t, dict kwargs)
 void SplitWorld::runJobs()
 {
     esysUtils::NoCOMM_WORLD ncw;	// it's destructor will unset the flag
-    localworld->resetInterest();    
+    localworld->resetInterest();  
+    localworld->newRunJobs();
     try 
     {
 	distributeJobs();
@@ -155,7 +161,6 @@ void SplitWorld::runJobs()
 		    }
 	        }
 	    }
-
 	} while (false);
         int res=mres;
         // now we find out about the other worlds
@@ -163,14 +168,13 @@ void SplitWorld::runJobs()
         {
 	    throw SplitWorldException("MPI appears to have failed.");
         }
-
 	localworld->clearJobs();
 	  // at this point, the remote world has all the reductions done
 	  // now we need to do the global merges
 	if (!localworld->checkRemoteCompatibility(err))
 	{
 	    mres=4;
-	    err="Error in checkRemoteCompatibility.";
+	    err=std::string("Error in checkRemoteCompatibility. ")+err;
 	}
 	if (mres==0)	
 	{  	
@@ -401,6 +405,30 @@ void SplitWorld::distributeJobs()
     }
 }
 
+int SplitWorld::getSubWorldCount()
+{
+    return swcount;
+}
+
+int SplitWorld::getSubWorldID()
+{
+    return localid;
+}
+
+void SplitWorld::copyVariable(const std::string& src, const std::string& dest)
+{
+    if (manualimport)
+    {
+	throw SplitWorldException("copyVariable is not yet supported for manualimport.");
+	  // we would need to make sure that the value is on this world
+	  // so we need to do a value transport here
+    }
+    else
+    {
+        localworld->copyVariable(src, dest);
+    }
+}
+
 
 namespace escript
 {
@@ -467,7 +495,7 @@ boost::python::object raw_addVariable(boost::python::tuple t, boost::python::dic
     int l=len(t);
     if (l<3)
     {
-	throw SplitWorldException("Insufficient parameters to addReducer.");
+	throw SplitWorldException("Insufficient parameters to addVariable.");
     }
     extract<SplitWorld&> exw(t[0]);
     if (!exw.check())
diff --git a/escriptcore/src/SplitWorld.h b/escriptcore/src/SplitWorld.h
index ca595bc..c0317e3 100644
--- a/escriptcore/src/SplitWorld.h
+++ b/escriptcore/src/SplitWorld.h
@@ -55,6 +55,10 @@ public:
 
     double getScalarVariable(const std::string& name);
     
+    int getSubWorldCount();
+    int getSubWorldID();
+
+    void copyVariable(const std::string& src, const std::string& dest);     
     
     
 private:    
diff --git a/escriptcore/src/SubWorld.cpp b/escriptcore/src/SubWorld.cpp
index 97dd05d..b6b138f 100644
--- a/escriptcore/src/SubWorld.cpp
+++ b/escriptcore/src/SubWorld.cpp
@@ -24,6 +24,7 @@
 
 #include "MPIDataReducer.h"
 #include "MPIScalarReducer.h"
+#include "NonReducedVariable.h"
 
 #include <boost/python/import.hpp>
 #include <boost/python/dict.hpp>
@@ -44,6 +45,7 @@ SubWorld::SubWorld(JMPI& global, JMPI& comm, JMPI& corr, unsigned int subworldco
     ,globalinfoinvalid(true)
 #endif    
 {
+	swcount=subworldcount;	// redundant to keep clang happy
 }
 
 SubWorld::~SubWorld()
@@ -85,10 +87,10 @@ void SubWorld::setMyVarState(const std::string& vname, char state)
     setVarState(vname, state, localid);
 }
 
-void SubWorld::setAllVarsState(std::string& vname, char state)
+void SubWorld::setAllVarsState(const std::string& vname, char state)
 {
 #ifdef ESYS_MPI  
-      // we need to know where the variable is in thbe sequence
+      // we need to know where the variable is in the sequence
     str2char::iterator it=varstate.find(vname);
     size_t c=0;
     for (;it!=varstate.end();++it,++c)
@@ -124,7 +126,7 @@ void SubWorld::setAllVarsState(std::string& vname, char state)
 }
 
 
-void SubWorld::setVarState(const std::string& vname, char state, int rank)
+void SubWorld::setVarState(const std::string& vname, char state, int swid)
 {
 #ifdef ESYS_MPI  
       // we need to know where the variable is in thbe sequence
@@ -141,16 +143,15 @@ void SubWorld::setVarState(const std::string& vname, char state, int rank)
     {
 	return;
     }
-    
 	// we now have the sequence position of the variable
     if (!globalinfoinvalid)	// it will be updated in the next synch
     {
-	unsigned char ostate=globalvarinfo[c+getNumVars()*rank];
-	globalvarinfo[c+getNumVars()*rank]=state;
+	unsigned char ostate=globalvarinfo[c+getNumVars()*swid];
+	globalvarinfo[c+getNumVars()*swid]=state;
 	globalvarcounts[vname][ostate]--;
 	globalvarcounts[vname][state]++;
     }
-    if (rank==localid)	// we are updating our own state so we need to change "varstate"
+    if (swid==localid)	// we are updating our own state so we need to change "varstate"
     {
 	it->second=state;
     }
@@ -365,12 +366,20 @@ double SubWorld::getScalarVariable(const std::string& name)
 	throw SplitWorldException(std::string("(Getting scalar --- Variable value) ")+errmsg);
     }
 #endif
-	
-    if (dynamic_cast<MPIScalarReducer*>(it->second.get())==0)
+    if (dynamic_cast<MPIScalarReducer*>(it->second.get()))
     {
-	throw SplitWorldException("Variable is not scalar.");
+	return dynamic_cast<MPIScalarReducer*>(it->second.get())->getDouble();
+    }
+    if (dynamic_cast<NonReducedVariable*>(it->second.get()))
+    {
+	boost::python::extract<double> ex(it->second->getPyObj());
+	if (!ex.check())
+	{
+	    throw SplitWorldException("Variable is not scalar.");
+	}
+	return ex();
     }
-    return dynamic_cast<MPIScalarReducer*>(it->second.get())->getDouble();
+    throw SplitWorldException("Variable is not scalar.");
 }
 
 bool SubWorld::checkRemoteCompatibility(std::string& errmsg)
@@ -387,23 +396,99 @@ bool SubWorld::checkRemoteCompatibility(std::string& errmsg)
 
 #ifdef ESYS_MPI  
   
-bool SubWorld::makeComm(MPI_Comm& sourcecom, MPI_Comm& subcom,std::vector<int>& members)
+bool SubWorld::makeComm(MPI_Comm& sourcecom, JMPI& ncom,std::vector<int>& members)
 {
+      MPI_Comm subcom;
       MPI_Group sourceg, g;
       if (MPI_Comm_group(sourcecom, &sourceg)!=MPI_SUCCESS) {return false;}
-      if (MPI_Group_incl(sourceg, members.size(), &members[0], &g)!=MPI_SUCCESS) {return false;}      
+      if (MPI_Group_incl(sourceg, members.size(), &members[0], &g)!=MPI_SUCCESS) {return false;}    
       // then create a communicator with that group
       if (MPI_Comm_create(sourcecom, g, &subcom)!=MPI_SUCCESS) 
       {
-	return false;
-	
+	  return false;	
       }
+      ncom=makeInfo(subcom, true);
       return true;
 }
 
+
+// The mystate, could be computed from vnum, this is just to shortcut
+// creates two groups, the first contains procs which need to reduce
+// the second group contains a single process with the new value and
+// all other interested parties
+bool SubWorld::makeGroupReduceGroups(MPI_Comm& srccom, int vnum, char mystate, JMPI& red, JMPI& cop, bool& incopy)
+{
+    incopy=false;
+    if ((mystate==rs::NEW)
+            || (mystate==rs::INTERESTED)
+            || (mystate==rs::OLDINTERESTED))
+    {
+	// first create a group with all the updates in it
+	std::vector<int> redmembers;
+	std::vector<int> copmembers;
+        for (int i=0+vnum;i<globalvarinfo.size();i+=getNumVars())
+        {
+	    bool havesrc=false;
+	    int world=i/getNumVars();
+            // make a vector of the involved procs with New at the front
+            switch (globalvarinfo[i])
+            {
+                case rs::NEW:
+		    if (!havesrc)
+		    {
+		        copmembers.insert(copmembers.begin(), world);
+			havesrc=true;
+			if (world==localid)
+			{
+			    incopy=true;			
+			}
+		    }
+		    redmembers.push_back(world);
+		    break;
+                case rs::INTERESTED:
+                case rs::OLDINTERESTED:
+                          copmembers.push_back(world);
+                          if (world==localid)
+                          {
+                              incopy=true;
+                          }
+                          break;
+            }
+        }
+	if (!makeComm(srccom, red, redmembers))
+	{
+	    return false;
+	}
+	if (!makeComm(srccom, cop, copmembers))
+	{
+	    return false;
+	}
+        return true;
+
+    }
+    else  // for people not in involved in the value shipping
+    {     // This would be a nice time to use MPI_Comm_create_group
+          // but it does not exist in MPI2.1
+        MPI_Comm temp;
+	if (MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &temp)!=MPI_SUCCESS)
+	{
+	    return false;
+	}
+	red=makeInfo(temp, true);
+        if (MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &temp)!=MPI_SUCCESS)
+	{
+	    return false;
+	}
+	cop=makeInfo(temp, true);
+	return true;
+    }
+
+}
+
+
 // a group with NEW nodes at the front and INT and OLDINT at the back
 // NONE worlds get an empty communicator
-bool SubWorld::makeGroupComm1(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm& com)
+bool SubWorld::makeGroupComm1(MPI_Comm& srccom, int vnum, char mystate, JMPI& com)
 {
       if ((mystate==rs::NEW)
 	    || (mystate==rs::INTERESTED)
@@ -428,14 +513,18 @@ bool SubWorld::makeGroupComm1(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm
       else	// for people not in involved in the value shipping
       {		// This would be a nice time to use MPI_Comm_create_group
 		// but it does not exist in MPI2.1
-	  return MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &com);
+          MPI_Comm temp;
+	  MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &temp);
+	  com=makeInfo(temp, true);
+	  return true;
       }
 }
 
 // A group with a single OLD or OLDINT at the front and all the INT worlds 
 // following it
-bool SubWorld::makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm& com)
+bool SubWorld::makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, JMPI& com, bool& ingroup)
 {
+      ingroup=false;
       if ((mystate==rs::OLD)
 	    || (mystate==rs::INTERESTED)
 	    || (mystate==rs::OLDINTERESTED))
@@ -445,17 +534,27 @@ bool SubWorld::makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm
 	  bool havesrc=false;
 	  for (int i=0+vnum;i<globalvarinfo.size();i+=getNumVars())
 	  {
-	      // make a vector of the involved procs with New at the front
+	      int world=i/getNumVars(); 
+	      // make a vector of the involved procs with OLD/OLDINTERESTED at the front
 	      switch (globalvarinfo[i])
 	      {
 		case rs::NEW:   return false;  break;
-		case rs::INTERESTED: members.push_back(i/getNumVars());  break;     
+		case rs::INTERESTED: members.push_back(world);
+			  if (world==localid)
+			  {
+			      ingroup=true;
+			  }
+			  break;     
 		case rs::OLD: 
 		case rs::OLDINTERESTED:
 			  if (!havesrc)
 			  {
-			      members.insert(members.begin(), i/getNumVars());
+			      members.insert(members.begin(), world);
 			      havesrc=true;
+			      if (world==localid)
+			      {
+				ingroup=true;
+			      }
 			  }
 			  break;
 	      }
@@ -465,7 +564,10 @@ bool SubWorld::makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm
       else	// for people not in involved in the value shipping
       {		// This would be a nice time to use MPI_Comm_create_group
 		// but it does not exist in MPI2.1	
-	  return MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &com);
+          MPI_Comm temp;
+	  MPI_Comm_create(srccom, MPI_GROUP_EMPTY, &temp);
+	  com=makeInfo(temp, true);
+	  return true;
       }
 }
 
@@ -483,7 +585,7 @@ bool SubWorld::synchVariableValues(std::string& err)
     
     // need to keep track of which vars have updates
     std::vector<std::string> varswithupdates;
-    
+
     int vnum=0;    
     for (str2reduce::iterator it=reducemap.begin();it!=reducemap.end();++it, ++vnum)
     {
@@ -523,7 +625,7 @@ bool SubWorld::synchVariableValues(std::string& err)
 	    // first deal updates as source(s)
 	if (newcount==1)	// only one update so send from that
 	{
-	    MPI_Comm com;
+	    JMPI com;
 	    if (!makeGroupComm1(corrmpi->comm, vnum, varstate[it->first],com))
 	    {
 		err="Error creating group for sharing values,";
@@ -531,28 +633,93 @@ bool SubWorld::synchVariableValues(std::string& err)
 	    }
 	    if (varstate[it->first]!=rs::NONE && varstate[it->first]!=rs::OLD)
 	    {
-		it->second->groupSend(com);
+		it->second->groupSend(com->comm, (varstate[it->first]==rs::NEW));
 		  // Now record the fact that we have the variable now
 		if (varstate[it->first]==rs::INTERESTED)
 		{
 		    setMyVarState(it->first, rs::OLDINTERESTED); 
 		}
 	    }
-	    // disolve the group
-	    MPI_Comm_free(&com);
+	    continue;
+	}
+	if (newcount==swcount)		// everybody is in on this
+	{
+	    if (!it->second->reduceRemoteValues(corrmpi->comm))
+	    {
+		it->second->reset();
+		setAllVarsState(it->first, rs::NONE);
+		//setMyVarState(it->first, rs::NONE);
+		err=it->first+"Either MPI failed, or there were multiple simultaneous updates to a variable with the SET operation.";
+		return false;
+	    }
+	        // Now record the fact that we have the variable now
+	    if (varstate[it->first]==rs::INTERESTED)
+	    {
+		setMyVarState(it->first, rs::OLDINTERESTED); 
+	    }	    
 	    continue;
 	}
 	if (newcount>1)
 	{
-	    // form a group to send to [updates and interested and oldinterested]
-	    MPI_Comm com;
-	    if (!makeGroupComm1(corrmpi->comm, vnum, varstate[it->first],com))
+	    // make groups to reduce and then copy
+	    JMPI red;
+	    JMPI cop;
+	    bool incopy;
+	    if (!makeGroupReduceGroups(corrmpi->comm, vnum, varstate[it->first], red, cop, incopy))
 	    {
-		err="Error creating group for sharing values,";
+		err="Error creating groups for sharing values,";
 		return false;
 	    }
-	    it->second->groupReduce(com,varstate[it->first]);
-	    MPI_Comm_free(&com);	    
+	    char reduceresult=0;
+		// only new values get reduced
+	    if (varstate[it->first]==rs::NEW)
+	    {
+	        if (!it->second->reduceRemoteValues(red->comm))
+		{
+		    char s=1;
+		    MPI_Allreduce(&s, &reduceresult, 1, MPI_CHAR, MPI_MAX, corrmpi->comm);
+		    reduceresult=1;
+
+		}
+		else
+		{
+		    if (it->second->canClash())
+		    {
+			char s=0;
+			MPI_Allreduce(&s, &reduceresult, 1, MPI_CHAR, MPI_MAX, corrmpi->comm);		    
+		    }
+		}
+	    }
+	    else
+	    {
+		if (it->second->canClash())
+		{
+		    char s=0;
+		    MPI_Allreduce(&s, &reduceresult, 1, MPI_CHAR, MPI_MAX, corrmpi->comm);		    
+		}
+	    }
+		// if there was a clash somewhere
+	    if (reduceresult!=0)
+	    {
+		it->second->reset();
+		setAllVarsState(it->first, rs::NONE);
+		err="Either MPI failed, or there were multiple simultaneous updates to a variable with the SET operation.";
+		return false;	      
+	    }
+	      
+		// if we are involved in copying the new value around
+	    if (incopy)
+	    {
+                it->second->groupSend(cop->comm, (varstate[it->first]==rs::NEW));
+	        if (varstate[it->first]==rs::INTERESTED) 
+	        {
+		    setMyVarState(it->first, rs::OLDINTERESTED); 
+		}
+	    }
+            if (varstate[it->first]==rs::NEW)
+            {
+		setMyVarState(it->first, rs::OLDINTERESTED);
+            }
 	    continue;
 	}
 	    // at this point, we need to ship info around but there are no updates
@@ -565,16 +732,20 @@ bool SubWorld::synchVariableValues(std::string& err)
 	{
 	    continue;
 	}
-	MPI_Comm com;
-	if (!makeGroupComm2(corrmpi->comm, vnum, varstate[it->first],com))
+	JMPI com;
+	bool ingroup=false;
+	if (!makeGroupComm2(corrmpi->comm, vnum, varstate[it->first],com, ingroup))
 	{
 	    err="Error creating group for sharing values";
 	    return false;
 	}
 	// form group to send to [latestsource and interested]
-	it->second->groupSend(com);
-	// dissolve the group	
-	MPI_Comm_free(&com);
+	
+	if (ingroup)		// since only one holder needs to send
+	{
+	    bool imsending=(varstate[it->first]==rs::NEW);
+	    it->second->groupSend(com->comm, imsending);
+	}
     }
 	// now we need to age any out of date copies of vars
     for (size_t i=0;i<varswithupdates.size();++i)
@@ -603,6 +774,10 @@ bool SubWorld::amLeader()
 // share that info around
 bool SubWorld::synchVariableInfo(std::string& err)
 {
+    if (getNumVars()==0)
+    {
+	return true;
+    }
     if (manualimports)		// manual control over imports
     {
 	for (size_t i=0;i<jobvec.size();++i)
@@ -742,12 +917,6 @@ bool SubWorld::synchVariableInfo(std::string& err)
     return true;
 }
 
-// // merge / ship values as required
-// bool SubWorld::synchVariableValues(std::string& err)
-// {
-//     return reduceRemoteValues(err);
-// }
-
 // if 4, a Job performed an invalid export
 // if 3, a Job threw an exception 
 // if 2, a Job did not return a bool
@@ -796,13 +965,18 @@ void SubWorld::addVariable(std::string& name, Reducer_ptr& rp)
 	std::ostringstream oss;
 	throw SplitWorldException(oss.str());    
     }
+    if (domain.get()==0)
+    {
+	throw SplitWorldException("No domain has been set yet.");
+    }
+    rp->setDomain(domain);
     reducemap[name]=rp;
     varstate[name]=reducerstatus::NONE;
     if (!manualimports)
     {
 	for (size_t i=0;i<jobvec.size();++i)
 	{
-	    jobvec[i].attr("requestImport")(name);
+	    jobvec[i].attr("declareImport")(name);
 	}
     }
 #ifdef ESYS_MPI
@@ -848,6 +1022,14 @@ void SubWorld::resetInterest()
     }
 }
 
+void SubWorld::newRunJobs()
+{
+    for (str2reduce::iterator it=reducemap.begin();it!=reducemap.end();++it)
+    {
+	it->second->newRunJobs();
+    }    
+}
+
 std::list<std::pair<std::string, bool> > SubWorld::getVarList()
 {
     std::list<std::pair<std::string,bool> > res;
@@ -857,3 +1039,23 @@ std::list<std::pair<std::string, bool> > SubWorld::getVarList()
     }
     return res;
 }
+
+void SubWorld::copyVariable(const std::string& src, const std::string& dest)
+{
+	if (reducemap.find(src)==reducemap.end())
+	{
+	    throw SplitWorldException("Source variable name is not known");
+	}
+	if (reducemap.find(dest)==reducemap.end())
+	{
+	    throw SplitWorldException("Destination variable name is not known");
+	}
+        Reducer_ptr sptr=reducemap[src];
+	Reducer_ptr dptr=reducemap[dest];
+	dptr->copyValueFrom(sptr);
+}
+
+
+
+
+
diff --git a/escriptcore/src/SubWorld.h b/escriptcore/src/SubWorld.h
index 8ce5df5..5ce86c4 100644
--- a/escriptcore/src/SubWorld.h
+++ b/escriptcore/src/SubWorld.h
@@ -79,6 +79,10 @@ public:
     bool synchVariableInfo(std::string& err);
     bool synchVariableValues(std::string& err);    
     void resetInterest();    
+
+    void copyVariable(const std::string& src, const std::string& dest);
+    
+    void newRunJobs();
     
 private:
     esysUtils::JMPI everyone;	// communicator linking all procs in all subworlds
@@ -92,40 +96,45 @@ private:
     
     
     unsigned int swcount;		// number of subwords
-    unsigned int localid;    	// my position within the sequence
+    unsigned int localid;    	// position of this subworld in that sequence
     
 typedef std::map<std::string, Reducer_ptr> str2reduce;  
 typedef std::map<std::string, unsigned char> str2char;
     str2reduce reducemap;		// map: name ->reducer for that variable
-    str2char varstate;		// using the state values from Reducer.h
+    str2char varstate;		// using the state values from AbstractReducer.h
 
     bool manualimports;
     
 #ifdef ESYS_MPI    
     std::vector<unsigned char> globalvarinfo;	// info about which worlds want which vars
+				  // [vars on process0][vars on process 1][vars on ...]
 typedef std::map<unsigned char, int> countmap;
 typedef std::map<std::string, countmap> str2countmap;
     str2countmap globalvarcounts;
     bool globalinfoinvalid;
     
     
-    bool makeComm(MPI_Comm& sourcecom, MPI_Comm& subcom,std::vector<int>& members);
+    bool makeComm(MPI_Comm& sourcecom, esysUtils::JMPI& sub,std::vector<int>& members);
 
 
     // a group with NEW nodes at the front and INT and OLDINT at the back
     // NONE worlds get an empty communicator
-    bool makeGroupComm1(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm& com);
+    bool makeGroupComm1(MPI_Comm& srccom, int vnum, char mystate, esysUtils::JMPI& com);
+
+    // reduce on the first group and copy from cop[0] to others in cop
+    bool makeGroupReduceGroups(MPI_Comm& srccom, int vnum, char mystate, esysUtils::JMPI& red, esysUtils::JMPI& cop, bool& incopy);
+
 
     // A group with a single OLD or OLDINT at the front and all the INT worlds 
     // following it
-    bool makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, MPI_Comm& com);    
+    bool makeGroupComm2(MPI_Comm& srccom, int vnum, char mystate, esysUtils::JMPI& com, bool& ingroup);    
     
 #endif
     
       // change the various views of a variable's state
     void setMyVarState(const std::string& vname, char state);
-    void setVarState(const std::string& vname, char state, int rank);
-    void setAllVarsState(std::string& name, char state);
+    void setVarState(const std::string& vname, char state, int swid);
+    void setAllVarsState(const std::string& name, char state);
 };
 
 typedef boost::shared_ptr<SubWorld> SubWorld_ptr;
diff --git a/escriptcore/src/escriptcpp.cpp b/escriptcore/src/escriptcpp.cpp
index f0a1ad2..ec42868 100644
--- a/escriptcore/src/escriptcpp.cpp
+++ b/escriptcore/src/escriptcpp.cpp
@@ -58,7 +58,7 @@ using namespace boost::python;
 
 /*! \mainpage Esys Documentation
  *
- * \version 4.0
+ * \version 4.1
  *
  * Main modules/namespaces:
  *
@@ -117,8 +117,7 @@ bool block_cmp_domains(const escript::AbstractDomain&, boost::python::object o)
     boost::python::throw_error_already_set();   
     return false;
 }
-
-
+  
 }
 
 BOOST_PYTHON_MODULE(escriptcpp)
@@ -132,28 +131,30 @@ BOOST_PYTHON_MODULE(escriptcpp)
   scope().attr("__doc__") = "To use this module, please import esys.escript";      
 
 /* begin SubWorld things */
-  
 
   class_<escript::AbstractReducer, escript::Reducer_ptr, boost::noncopyable>("Reducer", "", no_init);
   
   // Why doesn't this have a doc-string?   Because it doesn't compile if you try to add one
   // These functions take a SplitWorld instance as their first parameter
-  def("buildDomains", raw_function(escript::raw_buildDomains,2));
-  def("addJob", raw_function(escript::raw_addJob,2));
-  def("addJobPerWorld", raw_function(escript::raw_addJobPerWorld,2));
-  def("addVariable", raw_function(escript::raw_addVariable,3));
+  def("internal_buildDomains", raw_function(escript::raw_buildDomains,2));
+  def("internal_addJob", raw_function(escript::raw_addJob,2));
+  def("internal_addJobPerWorld", raw_function(escript::raw_addJobPerWorld,2));
+  def("internal_addVariable", raw_function(escript::raw_addVariable,3));
   
   
-  def("makeDataReducer", escript::makeDataReducer, arg("op"), "Create a reducer to work with Data and the specified operation.");
-  def("makeScalarReducer", escript::makeScalarReducer, arg("op"), "Create a reducer to work with doubles and the specified operation.");
-  def("makeLocalOnly", escript::makeNonReducedVariable, "Create a variable which is not connected to copies in other worlds.");
+  def("internal_makeDataReducer", escript::makeDataReducer, arg("op"), "Create a reducer to work with Data and the specified operation.");
+  def("internal_makeScalarReducer", escript::makeScalarReducer, arg("op"), "Create a reducer to work with doubles and the specified operation.");
+  def("internal_makeLocalOnly", escript::makeNonReducedVariable, "Create a variable which is not connected to copies in other worlds.");
       
-  class_<escript::SplitWorld, boost::noncopyable>("SplitWorld", "Manages a group of sub worlds", init<unsigned int>(args("num_worlds")))
+  class_<escript::SplitWorld, boost::noncopyable>("Internal_SplitWorld", "Manages a group of sub worlds. For internal use only.", init<unsigned int>(args("num_worlds")))
     .def("runJobs", &escript::SplitWorld::runJobs, "Execute pending jobs.")
     .def("removeVariable", &escript::SplitWorld::removeVariable, arg("name"), "Remove the named variable from the SplitWorld")
     .def("clearVariable", &escript::SplitWorld::clearVariable, arg("name"), "Remove the value from the named variable")
     .def("getVarList", &escript::SplitWorld::getVarPyList, "Lists variables known to the system")
-    .def("getDoubleVariable", &escript::SplitWorld::getScalarVariable);
+    .def("getDoubleVariable", &escript::SplitWorld::getScalarVariable)
+    .def("getSubWorldCount",&escript::SplitWorld::getSubWorldCount)
+    .def("getSubWorldID", &escript::SplitWorld::getSubWorldID)
+    .def("copyVariable", &escript::SplitWorld::copyVariable, args("source","destination"), "Copy the contents of one variable to another");
     
   // This class has no methods. This is deliberate - at this stage, I would like this to be an opaque type  
   class_ <escript::SubWorld, escript::SubWorld_ptr, boost::noncopyable>("SubWorld", "Information about a group of workers.", no_init);
diff --git a/escriptcore/test/DataTaggedTestCase.cpp b/escriptcore/test/DataTaggedTestCase.cpp
index 2e2be5c..81a0001 100644
--- a/escriptcore/test/DataTaggedTestCase.cpp
+++ b/escriptcore/test/DataTaggedTestCase.cpp
@@ -145,6 +145,9 @@ void DataTaggedTestCase::testOperations() {
     CPPUNIT_ASSERT(myData.getRank()==0);
     CPPUNIT_ASSERT(myData.getNoValues()==1);
     CPPUNIT_ASSERT(myData.getShape().size()==0);
+#ifdef EXWRITECHK		
+		myData.exclusivewritecalled=true;
+#endif	    
     CPPUNIT_ASSERT(myData.getDataAtOffsetRW(0)==0.0);
 
     // Test non-existent tag returns the default value.
@@ -312,6 +315,10 @@ void DataTaggedTestCase::testOperations() {
     // be used for missing tags in each object
 //     myData.getDefaultValue()()=1.0;
 //     right.getDefaultValue()()=2.0;
+#ifdef EXWRITECHK		
+    myData.exclusivewritecalled=true;
+#endif	    
+    
     myData.getVectorRW()[myData.getDefaultOffset()]=1.0;
     right.getVectorRW()[right.getDefaultOffset()]=2.0;
 
@@ -397,6 +404,9 @@ void DataTaggedTestCase::testOperations() {
     // be used for missing tags in each object
 /*    myData.getDefaultValue()()=2.0;
     right.getDefaultValue()()=3.0;*/
+#ifdef EXWRITECHK		
+    myData.exclusivewritecalled=true;
+#endif	
     myData.getVectorRW()[myData.getDefaultOffset()]=2.0;
     right.getVectorRW()[right.getDefaultOffset()]=3.0;
 
@@ -633,6 +643,9 @@ void DataTaggedTestCase::testOperations() {
     // be used for missing tags in each object
 //     myData.getDefaultValue()()=2.0;
 //     right.getDefaultValue()()=3.0;
+#ifdef EXWRITECHK		
+    myData.exclusivewritecalled=true;
+#endif	    
     myData.getVectorRW()[myData.getDefaultOffset()]=2.0;
     right.getVectorRW()[right.getDefaultOffset()]=3.0;
 
diff --git a/escriptcore/test/DataTestCase.cpp b/escriptcore/test/DataTestCase.cpp
index 08ce98b..a20bf87 100644
--- a/escriptcore/test/DataTestCase.cpp
+++ b/escriptcore/test/DataTestCase.cpp
@@ -596,6 +596,9 @@ void DataTestCase::testDataTagged()
     CPPUNIT_ASSERT(myData.getDataAtOffsetRO(1)==1.0);
     CPPUNIT_ASSERT(myData.getDataAtOffsetRO(2)==2.0);
 
+#ifdef EXWRITECHK		
+    myData.requireWrite();
+#endif	    
     double* sampleData=myData.getSampleDataRW(0);
     for (int i=0; i<myData.getNoValues(); i++) {
       CPPUNIT_ASSERT(sampleData[i]==i);
diff --git a/escriptcore/test/python/run_data_access.py b/escriptcore/test/python/run_data_access.py
index b4f7730..062801c 100644
--- a/escriptcore/test/python/run_data_access.py
+++ b/escriptcore/test/python/run_data_access.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/run_symbolic.py b/escriptcore/test/python/run_symbolic.py
index 9df0686..72f9182 100644
--- a/escriptcore/test/python/run_symbolic.py
+++ b/escriptcore/test/python/run_symbolic.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/run_testdomain.py b/escriptcore/test/python/run_testdomain.py
index 899229c..76f388b 100644
--- a/escriptcore/test/python/run_testdomain.py
+++ b/escriptcore/test/python/run_testdomain.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2012-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/run_units.py b/escriptcore/test/python/run_units.py
index ca3b95b..576387d 100644
--- a/escriptcore/test/python/run_units.py
+++ b/escriptcore/test/python/run_units.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/run_xml.py b/escriptcore/test/python/run_xml.py
index 98f6487..ef9452a 100644
--- a/escriptcore/test/python/run_xml.py
+++ b/escriptcore/test/python/run_xml.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_assemblage.py b/escriptcore/test/python/test_assemblage.py
index ff792a8..9ee0b84 100644
--- a/escriptcore/test/python/test_assemblage.py
+++ b/escriptcore/test/python/test_assemblage.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_assemblage_2Do1.py b/escriptcore/test/python/test_assemblage_2Do1.py
index 8556c12..e11eb9a 100644
--- a/escriptcore/test/python/test_assemblage_2Do1.py
+++ b/escriptcore/test/python/test_assemblage_2Do1.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_assemblage_2Do2.py b/escriptcore/test/python/test_assemblage_2Do2.py
index 7f11091..be7a8ae 100644
--- a/escriptcore/test/python/test_assemblage_2Do2.py
+++ b/escriptcore/test/python/test_assemblage_2Do2.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_assemblage_3Do1.py b/escriptcore/test/python/test_assemblage_3Do1.py
index 055119b..07aa41e 100644
--- a/escriptcore/test/python/test_assemblage_3Do1.py
+++ b/escriptcore/test/python/test_assemblage_3Do1.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_assemblage_3Do2.py b/escriptcore/test/python/test_assemblage_3Do2.py
index ab38a81..5b66ea9 100644
--- a/escriptcore/test/python/test_assemblage_3Do2.py
+++ b/escriptcore/test/python/test_assemblage_3Do2.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_condEval.py b/escriptcore/test/python/test_condEval.py
index 5146b5b..94fd2e7 100644
--- a/escriptcore/test/python/test_condEval.py
+++ b/escriptcore/test/python/test_condEval.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2010-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_linearPDEs.py b/escriptcore/test/python/test_linearPDEs.py
index 190b359..90efbde 100644
--- a/escriptcore/test/python/test_linearPDEs.py
+++ b/escriptcore/test/python/test_linearPDEs.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -30,7 +32,7 @@ Test suite for linearPDEs class
 __author__="Lutz Gross, l.gross at uq.edu.au"
 
 from esys.escript.util import Lsup,kronecker,interpolate,whereZero, outer, swap_axes
-from esys.escript import Function,FunctionOnBoundary,FunctionOnContactZero,Solution,ReducedSolution,Vector,ContinuousFunction,Scalar, ReducedFunction,ReducedFunctionOnBoundary,ReducedFunctionOnContactZero,Data, Tensor4, Tensor, getEscriptParamInt, canInterpolate
+from esys.escript import Function,FunctionOnBoundary,FunctionOnContactZero,Solution,ReducedSolution,Vector,ContinuousFunction,Scalar, ReducedFunction,ReducedFunctionOnBoundary,ReducedFunctionOnContactZero,Data, Tensor4, Tensor, getEscriptParamInt, canInterpolate, getMPISizeWorld
 from esys.escript.linearPDEs import SolverBuddy, LinearPDE,IllegalCoefficientValue,Poisson, IllegalCoefficientFunctionSpace, TransportPDE, IllegalCoefficient, Helmholtz, LameEquation, SolverOptions
 import numpy
 import esys.escriptcore.utestselect as unittest
@@ -551,8 +553,15 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
 
         self.assertTrue(sb.getSolverMethod() == so.DEFAULT, "initial SolverMethod is wrong.")
         self.assertRaises(ValueError,sb.setSolverMethod,-1)
-        sb.setSolverMethod(so.DIRECT)
-        self.assertTrue(sb.getSolverMethod() == so.DIRECT, "DIRECT is not set.")
+
+        if getMPISizeWorld() == 1 and not getEscriptParamInt('PASO_DIRECT'):
+            with self.assertRaises(ValueError) as package:
+                sb.setSolverMethod(so.DIRECT)
+            self.assertTrue('SolverOptionsException' in str(package.exception))
+        else:
+            sb.setSolverMethod(so.DIRECT)
+            self.assertTrue(sb.getSolverMethod() == so.DIRECT, "DIRECT is not set.")
+
         sb.setSolverMethod(so.CHOLEVSKY)
         self.assertTrue(sb.getSolverMethod() == so.CHOLEVSKY, "CHOLEVSKY is not set.")
         sb.setSolverMethod(so.PCG)
@@ -1673,10 +1682,21 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
     def test_symmetryOnDirect(self):
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
-        mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+        if getMPISizeWorld() == 1 and not getEscriptParamInt('PASO_DIRECT'):
+            with self.assertRaises(ValueError) as package:
+                mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+            self.assertTrue('SolverOptionsException' in str(package.exception))
+            return
+        else:
+            mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
         mypde.getSolverOptions().setVerbosity(self.VERBOSE)
-        u=mypde.getSolution()
-        self.assertTrue(self.check(u,1.),'solution is wrong.')
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as package:
+                u=mypde.getSolution()
+            self.assertTrue('PasoException' in str(package.exception))
+        else:
+            u=mypde.getSolution()
+            self.assertTrue(self.check(u,1.),'solution is wrong.')
     def test_PCG_JACOBI(self):
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
@@ -1732,10 +1752,22 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
     def test_DIRECT(self):
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
-        mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+        if getMPISizeWorld() == 1 and not getEscriptParamInt('PASO_DIRECT'):
+            with self.assertRaises(ValueError) as package:
+                mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+            self.assertTrue('SolverOptionsException' in str(package.exception))
+            return
+        else:
+            mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
         mypde.getSolverOptions().setVerbosity(self.VERBOSE)
-        u=mypde.getSolution()
-        self.assertTrue(self.check(u,1.),'solution is wrong.')
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as package:
+                u=mypde.getSolution()
+            self.assertTrue('PasoException' in str(package.exception))
+        else:
+            u=mypde.getSolution()
+            self.assertTrue(self.check(u,1.),'solution is wrong.')
+
     def test_BICGSTAB_JACOBI(self):
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.getSolverOptions().setSolverMethod(SolverOptions.BICGSTAB)
@@ -1756,7 +1788,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             mypde=LinearPDE(self.domain,debug=self.DEBUG)
             mypde.getSolverOptions().setSolverMethod(SolverOptions.BICGSTAB)
             mypde.getSolverOptions().setPreconditioner(SolverOptions.AMG)
@@ -1966,7 +1998,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             mypde=LinearPDE(self.domain,debug=self.DEBUG)
             mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
             mypde.getSolverOptions().setSolverMethod(SolverOptions.GMRES)
@@ -2022,7 +2054,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             mypde=LinearPDE(self.domain,debug=self.DEBUG)
             mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
             mypde.getSolverOptions().setSolverMethod(SolverOptions.GMRES)
@@ -2078,7 +2110,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             mypde=LinearPDE(self.domain,debug=self.DEBUG)
             mypde.setValue(A=kronecker(self.domain),D=1.,Y=1.)
             mypde.getSolverOptions().setSolverMethod(SolverOptions.GMRES)
@@ -2144,10 +2176,21 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
             Y[i]+=i
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.setValue(A=A,D=D,Y=Y)
-        mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+        if getMPISizeWorld() == 1 and not getEscriptParamInt('PASO_DIRECT'):
+            with self.assertRaises(ValueError) as package:
+                mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+            self.assertTrue('SolverOptionsException' in str(package.exception))
+            return
+        else:
+            mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
         mypde.getSolverOptions().setVerbosity(self.VERBOSE)
-        u=mypde.getSolution()
-        self.assertTrue(self.check(u,1.),'solution is wrong.')
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as package:
+                u=mypde.getSolution()
+            self.assertTrue('PasoException' in str(package.exception))
+        else:
+            u=mypde.getSolution()
+            self.assertTrue(self.check(u,1.),'solution is wrong.')
     def test_PCG_JACOBI_System(self):
         A=Tensor4(0.,Function(self.domain))
         D=Tensor(1.,Function(self.domain))
@@ -2222,10 +2265,21 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
             Y[i]+=i
         mypde=LinearPDE(self.domain,debug=self.DEBUG)
         mypde.setValue(A=A,D=D,Y=Y)
-        mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+        if getMPISizeWorld() == 1 and not getEscriptParamInt('PASO_DIRECT'):
+            with self.assertRaises(ValueError) as package:
+                mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
+            self.assertTrue('SolverOptionsException' in str(package.exception))
+            return
+        else:
+            mypde.getSolverOptions().setSolverMethod(SolverOptions.DIRECT)
         mypde.getSolverOptions().setVerbosity(self.VERBOSE)
-        u=mypde.getSolution()
-        self.assertTrue(self.check(u,1.),'solution is wrong.')
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as package:
+                u=mypde.getSolution()
+            self.assertTrue('PasoException' in str(package.exception))
+        else:
+            u=mypde.getSolution()
+            self.assertTrue(self.check(u,1.),'solution is wrong.')
     def test_BICGSTAB_JACOBI_System(self):
         A=Tensor4(0.,Function(self.domain))
         D=Tensor(1.,Function(self.domain))
@@ -2260,7 +2314,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             A=Tensor4(0.,Function(self.domain))
             D=Tensor(1.,Function(self.domain))
             Y=Vector(self.domain.getDim(),Function(self.domain))
@@ -2390,7 +2444,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG',0):
                 print("AMG test disabled on MPI build")
-                return   	  
+                return
             A=Tensor4(0.,Function(self.domain))
             D=Tensor(1.,Function(self.domain))
             Y=Vector(self.domain.getDim(),Function(self.domain))
@@ -2456,7 +2510,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             A=Tensor4(0.,Function(self.domain))
             D=Tensor(1.,Function(self.domain))
             Y=Vector(self.domain.getDim(),Function(self.domain))
@@ -2524,7 +2578,7 @@ class Test_LinearPDE_noLumping(Test_linearPDEs):
         if self.order!=2:
             if getEscriptParamInt('DISABLE_AMG', 0):
                 print("AMG test disabled on MPI build")
-                return 	  
+                return
             A=Tensor4(0.,Function(self.domain))
             D=Tensor(1.,Function(self.domain))
             Y=Vector(self.domain.getDim(),Function(self.domain))
diff --git a/escriptcore/test/python/test_modulefns.py b/escriptcore/test/python/test_modulefns.py
index 606ab9e..57543ac 100644
--- a/escriptcore/test/python/test_modulefns.py
+++ b/escriptcore/test/python/test_modulefns.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2009-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_nonLinearPDE.py b/escriptcore/test/python/test_nonLinearPDE.py
index f514cb0..001605f 100644
--- a/escriptcore/test/python/test_nonLinearPDE.py
+++ b/escriptcore/test/python/test_nonLinearPDE.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_objects.py b/escriptcore/test/python/test_objects.py
index 4ed5c59..712bc0a 100644
--- a/escriptcore/test/python/test_objects.py
+++ b/escriptcore/test/python/test_objects.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_pdetools.py b/escriptcore/test/python/test_pdetools.py
index b865e26..63e6975 100644
--- a/escriptcore/test/python/test_pdetools.py
+++ b/escriptcore/test/python/test_pdetools.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_shared.py b/escriptcore/test/python/test_shared.py
index c2cbe01..337dced 100644
--- a/escriptcore/test/python/test_shared.py
+++ b/escriptcore/test/python/test_shared.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -85,4 +87,4 @@ class Test_Shared(unittest.TestCase):
 
 
         
-        
\ No newline at end of file
+        
diff --git a/escriptcore/test/python/test_splitworld.py b/escriptcore/test/python/test_splitworld.py
index fb6d86e..0ed1f50 100644
--- a/escriptcore/test/python/test_splitworld.py
+++ b/escriptcore/test/python/test_splitworld.py
@@ -1,4 +1,3 @@
-
 ##############################################################################
 #
 # Copyright (c) 2015 by The University of Queensland
@@ -14,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -30,30 +31,404 @@ from esys.escript import *
 
 from esys.escriptcore.splitworld import *
 from esys.escript.linearPDEs import Poisson, Helmholtz
+from esys.escriptcore.testing import *
+import sys
+
+mpisize=getMPISizeWorld()
+
+def f1(self, **args):
+    x=Data(1, Function(self.domain))
+    self.exportValue('v_data',x)
+    self.exportValue('v_scalar', 1)
+    
+def f2(self, **kwargs):
+    x=self.importValue('v_data')
+    z=self.importValue('v_scalar')
+    b=kwargs['expected']
+    if abs(z-b)>0.001:
+        raise RuntimeError("Scalar value did not match expected value")
+    if abs(Lsup(x)-b)>0.001:
+        print("x=",str(x)," b=",str(b), file=sys.stderr)
+        raise RuntimeError("Data value did not match expected value")
+
+def f3(self, **kwargs):
+    print(";;;;;;", file=sys.stderr)
+    x=self.importValue('v_data')
+    z=self.importValue('v_scalar')
+    print("Data=", str(x), file=sys.stderr)
+    print("Scal=", str(z), file=sys.stderr)
+    
+
+
+
+def set_var(self, **kwargs):
+    self.exportValue("v_scalar", 7)
+
+def inp_var(self, **kwargs):
+    self.importValue("v_scalar")    
+    
+def sum_work(self, **args):
+    x=self.domain.getX()
+    id=self.jobid
+    x=x*self.jobid
+    self.exportValue("v_scalar", id)
+    self.exportValue("v_data", x)
+    
+def sum_check(self, **args):
+    high=args["high"]
+    low=args["low"]
+    x=self.domain.getX()
+    dtot=0
+    stot=0
+    for i in range(low, high+1):
+        stot+=i
+        dtot+=x*i
+    sactual=self.importValue("v_scalar")
+    dactual=self.importValue("v_data")
+    if abs(sactual-stot)>0.001:
+        raise RuntimeError("Scalar total is not as expected %e vs %e"%(sactual, stot))
+    if Lsup(dactual-dtot)>0.001:
+        print("Actual:"+str(dactual))
+        print("calced:"+str(dtot))
+        raise RuntimeError("Data total is not as expected")
+
+        
+def var_setup(self, **kwargs):
+    z=1
+    x=Data(1, Function(self.domain))
+    self.exportValue("v_scalar", z)
+    self.exportValue("v_data", x)
+    self.exportValue("v_list", [1])
+
+def var_increment(self, **kwargs):
+    z=self.importValue("v_scalar")
+    x=self.importValue("v_data")
+    l=self.importValue("v_list")
+    z+=1
+    x+=1
+    l.append([2])
+    self.exportValue("v_scalar", z)
+    self.exportValue("v_data", x)
+    self.exportValue("v_list", l)
+    
+def var_check(self, **kwargs):
+    zc=self.importValue("v_scalar_copy")
+    xc=self.importValue("v_data_copy")
+    lc=self.importValue("v_list_copy")
+    z=self.importValue("v_scalar")
+    x=self.importValue("v_data")
+    l=self.importValue("v_list")
+    if abs(z-zc)<0.001:
+        raise RuntimeError("Scalar variables appear to be incorrectly linked.")
+    if Lsup(x-xc)<0.001:
+        raise RuntimeError("Data variables appear to be incorrectly linked.")
+    if l!=lc:
+        raise RuntimeError("Python list appears not to linked.")
+
+        
+class sw_testmany(unittest.TestCase):
+    @staticmethod
+    def pde_work(self, **args):
+        x = self.domain.getX()
+        gammaD = whereZero(x[0])+whereZero(x[1])
+        mypde = Poisson(domain=self.domain)
+        mypde.setValue(f=1+self.swid,q=gammaD)
+        u = mypde.getSolution()
+        return True
+    def create_many_subworlds(self):
+        sw=SplitWorld(getMPISizeWorld())
+        sw.buildDomains(self.domain_ctr, *self.domain_vec, **self.domain_dict)
+        return sw
+        
+
+    def set_tester(self, sw):
+        import time
+        #time.sleep(20)
+        sw.addVariable("v_scalar", "float", "SET")
+        sw.addVariable("v_data", "Data", "SET")
+        sw.addVariable("v_list", "local")    # so we can use var_setup
+        sw.addVariable("ex", "float", "SUM")         # So we have something to read when the others are wiped out
+
+        def ex_set(self, **kwargs):
+            self.exportValue("ex",7)
+
+        sw.addJob(FunctionJob, ex_set)
+        sw.addJob(FunctionJob, var_setup)
+        sw.runJobs()
+
+        if sw.getSubWorldCount()>1:
+            sw.addJobPerWorld(FunctionJob, var_setup)
+            sw.runJobs()
+            self.assertRaises(RuntimeError, sw.getFloatVariable, "v_scalar")
+            self.assertRaises(RuntimeError, sw.getFloatVariable, "v_scalar")
+
+        sw.addJob(FunctionJob, var_setup)
+        sw.addJob(FunctionJob, var_setup)
+        if sw.getSubWorldCount()==1:
+            self.assertRaises(RuntimeError, sw.runJobs)
+        else:
+            sw.runJobs()
+            self.assertRaises(RuntimeError, sw.runJobs) # This gives up after the first dud resolve
+            self.assertRaises(RuntimeError, sw.runJobs) # since we have multiple vars we need to flush
+        print(sw.getVarList())
+        sw.getFloatVariable("ex")
+
+        sw.addJob(FunctionJob, var_setup)
+        sw.runJobs()
+        
+    def sum_vars_tester(self, sw):
+        sw.addVariable("v_scalar", "float", "SUM")
+        sw.addVariable("v_data", "Data", "SUM")
+        sw.addVariable("notused", "local")
+        flags1=[['notused', False], ['v_data', False], ['v_scalar', False]]
+        self.assertEqual(flags1, sw.getVarList())
+        lim=2*getMPISizeWorld()
+        for i in range(1,lim+1):
+          sw.addJob(FunctionJob, sum_work)
+        sw.runJobs()
+        flags2=[['notused', False], ['v_data', True], ['v_scalar', True]]
+
+        self.assertEqual(flags2, sw.getVarList())
+        sw.addJob(FunctionJob, sum_check, imports=['v_scalar', 'v_data'], low=1, high=lim)
+        sw.runJobs()
+        total=0
+        for i in range(1, lim+1):
+            total+=i
+        act=sw.getFloatVariable("v_scalar")
+        self.assertEqual(total, act, "Extract of double variable failed")
+        sw.removeVariable("v_scalar")
+        self.assertEqual([['notused', False], ['v_data', True]], sw.getVarList())
+        self.assertRaises(RuntimeError, sw.getFloatVariable, "v_scalar")
+        
+        sw.addJobPerWorld(FunctionJob, set_var)
+        self.assertRaises(RuntimeError, sw. runJobs)
+        
+        sw.addJobPerWorld(FunctionJob, inp_var)
+        self.assertRaises(RuntimeError, sw. runJobs)
+        
+        sw.addVariable("v_scalar", "float", "SUM")
+        flags3=[['notused', False], ['v_data', True], ['v_scalar', False]]
+        self.assertEqual(flags3, sw.getVarList())
+        
+        sw.addJob(FunctionJob, set_var)        # note that this will only set the value in one world
+        sw.runJobs()                            # want to test if getDouble is transporting values        
+        self.assertEqual(7, sw.getFloatVariable("v_scalar"))    
+        
+        
+    def copy_vars_tester(self, sw):
+        sw.addVariable("v_scalar", "float", "SUM")
+        sw.addVariable("v_data", "Data", "SUM")
+        sw.addVariable("v_list", "local")
+        sw.addVariable("v_scalar_copy", "float", "SUM")
+        sw.addVariable("v_data_copy", "Data", "SUM")
+        sw.addVariable("v_list_copy", "local")
+        sw.addJobPerWorld(FunctionJob, var_setup)
+        sw.runJobs()
+        sw.copyVariable("v_scalar", "v_scalar_copy")
+        sw.copyVariable("v_data", "v_data_copy")
+        sw.copyVariable("v_list", "v_list_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_scalar", "v_data_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_scalar", "v_list_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_data", "v_scalar_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_data", "v_list_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_list", "v_data_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_list", "v_scalar_copy")
+        self.assertRaises(RuntimeError, sw.copyVariable, "v_data", "v_data")
+       
+        sw.addJobPerWorld(FunctionJob, var_increment)
+        sw.runJobs()
+        sw.addJobPerWorld(FunctionJob, var_check)
+        sw.runJobs()
+        sw.runJobs()    # Just to make sure empty list doesn't break it
+
+ 
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")
+    def testmanyworld_singleround(self):
+        sw=self.create_many_subworlds()
+        sw.addJob(FunctionJob, self.pde_work) 
+        sw.runJobs()
+        
+        sw=self.create_many_subworlds()
+        sw.addJobPerWorld(FunctionJob, self.pde_work)
+        sw.runJobs()
+        
+        sw=self.create_many_subworlds()
+        for i in range(4):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()        
+        
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")
+    def testmanyworld_multiround(self):        
+        sw=self.create_many_subworlds()
+        for i in range(4*getMPISizeWorld()):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(2):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(3*getMPISizeWorld()+1):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")        
+    def testmanyworld_sum_vars(self):
+        sw=self.create_many_subworlds()
+        self.sum_vars_tester(sw) 
+        
+        
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")        
+    def testmanyworld_copy_vars(self):
+        sw=self.create_many_subworlds()
+        self.copy_vars_tester(sw)         
+        
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")        
+    def testmanyworld_partial_reduce(self):
+        sw=self.create_many_subworlds()
+        sw.addVariable('v_scalar', "float", "SUM")
+        sw.addVariable('v_data', "Data", "SUM")
+        sw.addJob(FunctionJob, f1)
+        sw.runJobs()    # only one world has the value
+                # value=1
+        sw.addJobPerWorld(FunctionJob, f2, expected=1, imports=['v_data']) # can everyone get the correct value
+        sw.runJobs()
+            # now we change some of the values (we know we have at least 3 worlds)
+        sw.addJob(FunctionJob, f1)
+        sw.addJob(FunctionJob, f1)
+        sw.runJobs()
+        sw.addJobPerWorld(FunctionJob, f2, expected=2) # can everyone get the correct value
+        sw.runJobs()
+            # Now we try the same with a clean start
+        sw.clearVariable('v_data')
+        sw.clearVariable('v_scalar')
+        sw.addJob(FunctionJob, f1)
+        sw.addJob(FunctionJob, f1)
+        sw.runJobs()
+        sw.addJobPerWorld(FunctionJob, f2, expected=2) # can everyone get the correct value
+        sw.runJobs()
+        
+        
+    @unittest.skipIf(mpisize<3, "test is redundant on fewer than three processes")        
+    def testmanyworld_set_vars(self):
+        sw=self.create_many_subworlds()
+        self.set_tester(sw)
+#------------------------------------------------         
+
+    def test_illegal_ws(self):
+        self.assertRaises(RuntimeError, SplitWorld, getMPISizeWorld()+1)
+   
+
+class sw_testing(sw_testmany):
+    def create_singleworld(self):
+        sw=SplitWorld(1)    
+        sw.buildDomains(self.domain_ctr, *self.domain_vec, **self.domain_dict)
+        return sw
+    
+    # This is to test multiple subworlds
+    def create_twoworlds(self):
+        sw=SplitWorld(2)
+        sw.buildDomains(self.domain_ctr, *self.domain_vec, **self.domain_dict)
+        return sw
+
+      
+
+#-------------------------------------------  
+
+    def testbigworld_singleround(self):
+        sw=self.create_singleworld()
+        sw.addJob(FunctionJob, self.pde_work) 
+        sw.runJobs()
+        
+        sw=self.create_singleworld()
+        sw.addJobPerWorld(FunctionJob, self.pde_work)
+        sw.runJobs()
+        
+        sw=self.create_singleworld()
+        for i in range(4):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        
+    def testbigworld_multiround(self):        
+        sw=self.create_singleworld()
+        for i in range(4*getMPISizeWorld()):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(2):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(3*getMPISizeWorld()+1):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        
+    def testbigworld_sum_vars(self):
+        sw=self.create_singleworld()
+        self.sum_vars_tester(sw) 
+        
+    def testbigworld_copy_vars(self):
+        sw=self.create_singleworld()
+        self.copy_vars_tester(sw)
+
+        
+    def testbigworld_set_vars(self):
+        sw=self.create_singleworld()
+        self.set_tester(sw)
+
+#--------------------------------------------------
+    @unittest.skipIf(mpisize%2!=0, "test only fires for even numbers of processes")
+    def test2world_singleround(self):
+        sw=self.create_twoworlds()
+        sw.addJob(FunctionJob, self.pde_work) 
+        sw.runJobs()
+        
+        sw=self.create_twoworlds()
+        sw.addJobPerWorld(FunctionJob, self.pde_work)
+        sw.runJobs()
+        
+        sw=self.create_twoworlds()
+        for i in range(4):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+
+       
+    @unittest.skipIf(mpisize%2!=0, "test only fires for even numbers of processes")   
+    def test2world_multiround(self):        
+        sw=self.create_twoworlds()
+        for i in range(4*getMPISizeWorld()):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(2):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+        for i in range(3*getMPISizeWorld()+1):
+            sw.addJob(FunctionJob, self.pde_work)
+        sw.runJobs()
+
+    @unittest.skipIf(mpisize%2!=0, "test only fires for even numbers of processes")
+    def test2world_sum_vars(self):
+        sw=self.create_twoworlds()
+        self.sum_vars_tester(sw) 
+
+    @unittest.skipIf(mpisize%2!=0, "test only fires for even numbers of processes")    
+    def test2world_copy_vars(self):
+        sw=self.create_twoworlds()
+        self.copy_vars_tester(sw)        
+
+    @unittest.skipIf(mpisize%2!=0, "test only fires for even numbers of processes")
+    def test2world_set_vars(self):
+        sw=self.create_twoworlds()
+        self.set_tester(sw)
+#------------------------------------------------         
+
+
+
 
 class Test_SplitWorld(unittest.TestCase):
   """
   Class to test splitworld functions.
-  Requires subclasses to supply self.domainpars which is a list of constructor function followed
-  by arguments.
+  Requires subclasses to supply self.domainpars  which is a list of constructor function followed
+  by arguments [also - self.domain.kwargs]
   eg:  if your domain is created with Rectangle(3,4), then your domainpars would be [Rectangle,3,4]
   """
   
-  
-  class PoissonJob1(Job):
-    def __init__(self, **kwargs):
-      super(PoissonJob1, self).__init__(**kwargs)
-    
-    def work(self):
-      x = self.domain.getX()
-      gammaD = whereZero(x[0])+whereZero(x[1])
-      # define PDE and get its solution u
-      mypde = Poisson(domain=self.domain)
-      mypde.setValue(f=1, q=gammaD)
-      u = mypde.getSolution()
-      self.exportValue("answer", Lsup(u))
-      return True
-
   class PoissonJob(Job):
     def __init__(self, **kwargs):
       super(Test_SplitWorld.PoissonJob, self).__init__(**kwargs)
@@ -64,7 +439,7 @@ class Test_SplitWorld(unittest.TestCase):
       # define PDE and get its solution u
       mypde = Poisson(domain=self.domain)
       mypde.setValue(f=self.jobid, q=gammaD)
-      u = Lsup(mypde.getSolution())	   # we won't actually export the value to make
+      u = Lsup(mypde.getSolution())   # we won't actually export the value to make
       self.exportValue("answer", self.jobid) # testing easier
       return True
       
@@ -80,7 +455,10 @@ class Test_SplitWorld(unittest.TestCase):
       self.exportValue("hanswer", 2*self.jobid)
       self.exportValue("v", self.jobid)
       return True
-      
+
+  eqnJob2=PoissonJob
+  eqnJob3=HelmholtzJob
+  
   class InjectJob(Job):
     """
     Tests jobs taking parameters
@@ -150,9 +528,9 @@ class Test_SplitWorld(unittest.TestCase):
   def test_faults(self):
       for x in range(1,5):
         sw=SplitWorld(getMPISizeWorld())
-        buildDomains(sw,*self.domainpars)
-        addVariable(sw, "answer", makeScalarReducer, "MAX") 
-        addJob(sw, Test_SplitWorld.ThrowJob, fault=x)
+        sw.buildDomains(*self.domainpars)
+        sw.addVariable("answer", "float", "MAX") 
+        sw.addJob(Test_SplitWorld.ThrowJob, fault=x)
         self.assertRaises(RuntimeError, sw.runJobs)
 
   @unittest.skipIf(getMPISizeWorld()>97, "Too many ranks for this test")
@@ -161,104 +539,104 @@ class Test_SplitWorld(unittest.TestCase):
       test importing, multiple phases, max as a flag
       """
       sw=SplitWorld(getMPISizeWorld())
-      buildDomains(sw,*self.domainpars)
-      addVariable(sw, "value", makeScalarReducer, "MAX")
-      addVariable(sw, "boolean", makeScalarReducer, "MAX")
+      sw.buildDomains(*self.domainpars)
+      sw.addVariable("value", "float", "MAX")
+      sw.addVariable("boolean", "float", "MAX")
          # first we will load in a value to factorise
          # Don't run this test with 99 or more processes
-      addJob(sw, Test_SplitWorld.InjectJob, name='value', val=101)      # Feed it a prime  
-      addJob(sw, Test_SplitWorld.InjectJob, name='boolean', val=0)              # so we have a value
+      sw.addJob(Test_SplitWorld.InjectJob, name='value', val=101)      # Feed it a prime  
+      sw.addJob(Test_SplitWorld.InjectJob, name='boolean', val=0)              # so we have a value
       sw.runJobs()
       for x in range(2,getMPISizeWorld()+2):
-        addJob(sw, Test_SplitWorld.FactorJob, fact=x)
+        sw.addJob(Test_SplitWorld.FactorJob, fact=x)
       sw.runJobs()
-      self.assertEquals(sw.getDoubleVariable('boolean'),0)
+      self.assertEquals(sw.getFloatVariable('boolean'),0)
       sw.clearVariable('value')
       sw.clearVariable('boolean')
-      addJob(sw, Test_SplitWorld.InjectJob, name='value', val=101)      # Feed it a prime  
-      addJob(sw, Test_SplitWorld.InjectJob, name='boolean', val=0)              # so we have a value
+      sw.addJob(Test_SplitWorld.InjectJob, name='value', val=101)      # Feed it a prime  
+      sw.addJob(Test_SplitWorld.InjectJob, name='boolean', val=0)              # so we have a value
       sw.runJobs()
       sw.clearVariable("value")
       
         # Now test with a value which has a factor
-      addJob(sw, Test_SplitWorld.InjectJob, name='value', val=100)       # Feed it a prime  
-      addJob(sw, Test_SplitWorld.InjectJob, name='boolean', val=0)               # so we have a value
+      sw.addJob(Test_SplitWorld.InjectJob, name='value', val=100)       # Feed it a prime  
+      sw.addJob(Test_SplitWorld.InjectJob, name='boolean', val=0)               # so we have a value
       sw.runJobs()
       m=0
       for x in range(2,getMPISizeWorld()+2):
-        addJob(sw, Test_SplitWorld.FactorJob, fact=x)
+        sw.addJob(Test_SplitWorld.FactorJob, fact=x)
         if 100%x==0:
           m=x
       sw.runJobs()
-      self.assertEquals(sw.getDoubleVariable('boolean'),m)      
+      self.assertEquals(sw.getFloatVariable('boolean'),m)      
       
   def test_split_simple_solve(self):
     """
     Solve a single equation
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")
-    addJob(sw, Test_SplitWorld.PoissonJob)
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")
+    sw.addJob(self.eqnJob2)
     sw.runJobs()
-    self.assertEquals(sw.getDoubleVariable("answer"),1)
+    self.assertEquals(sw.getFloatVariable("answer"),1)
     
   def test_split_simple_solve_multiple(self):
     """
     Solve a number of the same equation in one batch
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")
         # this gives us 1 job per world
     total=0
     jobid=1
     for x in range(0,getMPISizeWorld()):
-        addJob(sw, Test_SplitWorld.PoissonJob)
+        sw.addJob(self.eqnJob2)
         total+=jobid
         jobid+=1
     sw.runJobs()
-    self.assertEquals(sw.getDoubleVariable("answer"), total)
+    self.assertEquals(sw.getFloatVariable("answer"), total)
     
   def test_split_simple_and_dummy(self):
     """
     Solve a number of the same equation with some worlds doing dummy Jobs
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")
         # this gives us 1 job per world
     total=0
     mid=getMPISizeWorld()//2
     if getMPISizeWorld()%2==1:
       mid=mid+1
     for x in range(0,mid):
-        addJob(sw, Test_SplitWorld.PoissonJob)
+        sw.addJob(self.eqnJob2)
         total=total+(x+1)
     for x in range(0,mid):
-        addJob(sw, Test_SplitWorld.DummyJob)
+        sw.addJob(Test_SplitWorld.DummyJob)
     sw.runJobs()
       # expecting this to fail until I work out the answer
-    self.assertEqual(sw.getDoubleVariable("answer"), total)
+    self.assertEqual(sw.getFloatVariable("answer"), total)
     
   def test_split_simple_and_empty(self):
     """
     Solve a number of the same equation with some worlds doing nothing
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw, *self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")
+    sw.buildDomains( *self.domainpars)
+    sw.addVariable("answer", "float", "SUM")
         # this gives us at most 1 job per world
     total=0    
     mid=getMPISizeWorld()//2
     if getMPISizeWorld()%2==1:
       mid=mid+1    
     for x in range(0,mid):
-        addJob(sw, Test_SplitWorld.PoissonJob)
+        sw.addJob(self.eqnJob2)
         total=total+(x+1)
     sw.runJobs()
       # expecting this to fail until I work out the answer
-    self.assertEquals(sw.getDoubleVariable("answer"),total)    
+    self.assertEquals(sw.getFloatVariable("answer"),total)    
     
     
   def test_split_multiple_batches(self):
@@ -266,24 +644,24 @@ class Test_SplitWorld(unittest.TestCase):
     Solve a number of the same equation in multiple batches
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")
         # this gives us 1 job per world
     total=0
     sw.runJobs()
     for x in range(0,getMPISizeWorld()):
-        addJob(sw, Test_SplitWorld.PoissonJob)
+        sw.addJob(self.eqnJob2)
         total=total+x
     sw.runJobs()
     sw.runJobs()
     sw.clearVariable("answer")
     total=0
     for x in range(0,getMPISizeWorld()):
-        addJob(sw, Test_SplitWorld.PoissonJob)
+        sw.addJob(self.eqnJob2)
         total=total+(x+1+getMPISizeWorld())
     sw.runJobs()
       # expecting this to fail until I work out the answer
-    self.assertEquals(sw.getDoubleVariable("answer"),total)    
+    self.assertEquals(sw.getFloatVariable("answer"),total)    
   
   @unittest.skipIf(getMPISizeWorld()%2!=0, "Test requires even number of processes")
   def test_multiple_equations_size2world(self):
@@ -295,26 +673,26 @@ class Test_SplitWorld(unittest.TestCase):
     """
     wc=getMPISizeWorld()//2
     sw=SplitWorld(wc)
-    buildDomains(sw, *self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")   
-    addVariable(sw, "hanswer", makeScalarReducer, "SUM")  
-    addVariable(sw, "v", makeScalarReducer, "MAX")
+    sw.buildDomains( *self.domainpars)
+    sw.addVariable("answer", "float", "SUM")   
+    sw.addVariable("hanswer", "float", "SUM")  
+    sw.addVariable("v", "float", "MAX")
     
     tot=0
     jobid=1
        #first put jobs of the same type close.
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*(jobid)
       jobid+=1
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.DummyJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -322,17 +700,17 @@ class Test_SplitWorld(unittest.TestCase):
     tot=0
       # similar but separated by dummy Jobs
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.DummyJob)      
+      sw.addJob(Test_SplitWorld.DummyJob)      
       jobid+=1
     for x in range(0, max(wc//3,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -340,14 +718,14 @@ class Test_SplitWorld(unittest.TestCase):
       # mixed
     tot=0
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
-      addJob(sw, Test_SplitWorld.DummyJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
       jobid+=2
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)    
 
   @unittest.skipIf(getMPISizeWorld()%4!=0, "Test requires number of processes divisible by 4")
@@ -360,26 +738,26 @@ class Test_SplitWorld(unittest.TestCase):
     """
     wc=getMPISizeWorld()//4
     sw=SplitWorld(wc)
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")   
-    addVariable(sw, "hanswer", makeScalarReducer, "SUM")  
-    addVariable(sw, "v", makeScalarReducer, "MAX")
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")   
+    sw.addVariable("hanswer", "float", "SUM")  
+    sw.addVariable("v", "float", "MAX")
     
     jobid=1
     tot=0
        #first put jobs of the same type close.
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1      
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.DummyJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -387,17 +765,17 @@ class Test_SplitWorld(unittest.TestCase):
     tot=0
       # similar but separated by dummy Jobs
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1      
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.DummyJob) 
+      sw.addJob(Test_SplitWorld.DummyJob) 
       jobid+=1     
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -405,14 +783,14 @@ class Test_SplitWorld(unittest.TestCase):
     tot=0
       # mixed
     for x in range(0, max(wc//2,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1
-      addJob(sw, Test_SplitWorld.DummyJob)
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
+      sw.addJob(self.eqnJob2)
       jobid+=2
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)        
     
     
@@ -423,26 +801,26 @@ class Test_SplitWorld(unittest.TestCase):
     worlds in different patterns
     """
     sw=SplitWorld(getMPISizeWorld())
-    buildDomains(sw,*self.domainpars)
-    addVariable(sw, "answer", makeScalarReducer, "SUM")   
-    addVariable(sw, "hanswer", makeScalarReducer, "SUM")  
-    addVariable(sw, "v", makeScalarReducer, "MAX")
+    sw.buildDomains(*self.domainpars)
+    sw.addVariable("answer", "float", "SUM")   
+    sw.addVariable("hanswer", "float", "SUM")  
+    sw.addVariable("v", "float", "MAX")
     
     tot=0
     jobid=1
        #first put jobs of the same type close together.
     for x in range(0,max(getMPISizeWorld()//3,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     for x in range(0,max(getMPISizeWorld()//3,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1
     for x in range(0,getMPISizeWorld()//3):
-      addJob(sw, Test_SplitWorld.DummyJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
       jobid+=1
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -450,17 +828,17 @@ class Test_SplitWorld(unittest.TestCase):
     tot=0
       # similar but separated by dummy Jobs
     for x in range(0,max(getMPISizeWorld()//3,1)):
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(self.eqnJob2)
       jobid+=1
     for x in range(0,getMPISizeWorld()//3):
-      addJob(sw, Test_SplitWorld.DummyJob)      
+      sw.addJob(Test_SplitWorld.DummyJob)      
       jobid+=1      
     for x in range(0,max(getMPISizeWorld()//3,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=2*jobid
       jobid+=1      
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)
     sw.clearVariable("answer")
     sw.clearVariable("hanswer")
@@ -468,11 +846,11 @@ class Test_SplitWorld(unittest.TestCase):
     tot=0
       # mixed
     for x in range(0, max(getMPISizeWorld()//2,1)):
-      addJob(sw, Test_SplitWorld.HelmholtzJob)
+      sw.addJob(self.eqnJob3)
       tot+=jobid*2
-      addJob(sw, Test_SplitWorld.DummyJob)
-      addJob(sw, Test_SplitWorld.PoissonJob)
+      sw.addJob(Test_SplitWorld.DummyJob)
+      sw.addJob(self.eqnJob2)
       jobid+=3
     sw.runJobs()
-    ha=sw.getDoubleVariable("hanswer")
+    ha=sw.getFloatVariable("hanswer")
     self.assertEquals(ha, tot)     
diff --git a/escriptcore/test/python/test_symfuncs.py b/escriptcore/test/python/test_symfuncs.py
index 8dcf080..81520fb 100644
--- a/escriptcore/test/python/test_symfuncs.py
+++ b/escriptcore/test/python/test_symfuncs.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util.py b/escriptcore/test/python/test_util.py
index 946d1d2..034361e 100644
--- a/escriptcore/test/python/test_util.py
+++ b/escriptcore/test/python/test_util.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_NaN_funcs.py b/escriptcore/test/python/test_util_NaN_funcs.py
index 1d103c5..9e89701 100644
--- a/escriptcore/test/python/test_util_NaN_funcs.py
+++ b/escriptcore/test/python/test_util_NaN_funcs.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 #here be nan tests
 import esys.escript as es
 import esys.escriptcore.utestselect as unittest
@@ -29,4 +47,4 @@ class Test_util_NaN_funcs(unittest.TestCase):
         dat=(sigma*0)/0
         sigma.setTaggedValue(1 , es.Lsup(dat))
         sigma.replaceNaN(10)
-        self.assertEqual(es.Lsup(sigma), 10)
\ No newline at end of file
+        self.assertEqual(es.Lsup(sigma), 10)
diff --git a/escriptcore/test/python/test_util_base.py b/escriptcore/test/python/test_util_base.py
index ec45540..186e12f 100644
--- a/escriptcore/test/python/test_util_base.py
+++ b/escriptcore/test/python/test_util_base.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_binary_no_tagged_data.py b/escriptcore/test/python/test_util_binary_no_tagged_data.py
index d1bde59..842bf04 100644
--- a/escriptcore/test/python/test_util_binary_no_tagged_data.py
+++ b/escriptcore/test/python/test_util_binary_no_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_binary_with_tagged_data.py b/escriptcore/test/python/test_util_binary_with_tagged_data.py
index 9e93484..9c93244 100644
--- a/escriptcore/test/python/test_util_binary_with_tagged_data.py
+++ b/escriptcore/test/python/test_util_binary_with_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_overloaded_binary_no_tagged_data.py b/escriptcore/test/python/test_util_overloaded_binary_no_tagged_data.py
index 79c8288..7b903ee 100644
--- a/escriptcore/test/python/test_util_overloaded_binary_no_tagged_data.py
+++ b/escriptcore/test/python/test_util_overloaded_binary_no_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_overloaded_binary_with_tagged_data.py b/escriptcore/test/python/test_util_overloaded_binary_with_tagged_data.py
index 4a8c28e..309e959 100644
--- a/escriptcore/test/python/test_util_overloaded_binary_with_tagged_data.py
+++ b/escriptcore/test/python/test_util_overloaded_binary_with_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_reduction_no_tagged_data.py b/escriptcore/test/python/test_util_reduction_no_tagged_data.py
index 9550e9a..cbb47fd 100644
--- a/escriptcore/test/python/test_util_reduction_no_tagged_data.py
+++ b/escriptcore/test/python/test_util_reduction_no_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_reduction_with_tagged_data.py b/escriptcore/test/python/test_util_reduction_with_tagged_data.py
index 78e1a52..34741d4 100644
--- a/escriptcore/test/python/test_util_reduction_with_tagged_data.py
+++ b/escriptcore/test/python/test_util_reduction_with_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -353,4 +355,4 @@ class Test_util_reduction_with_tagged_data(Test_util_base):
                 arg=arg/arg     #will give a NaN in the last position, yes we could have just sqrt(arg) but I wanted last pos
                 self.assertTrue(numpy.isnan(sup(arg)),"wrong result")
                 self.assertTrue(numpy.isnan(inf(arg)),"wrong result")
-                self.assertTrue(numpy.isnan(Lsup(arg)),"wrong result")  
\ No newline at end of file
+                self.assertTrue(numpy.isnan(Lsup(arg)),"wrong result")  
diff --git a/escriptcore/test/python/test_util_slicing_no_tagged_data.py b/escriptcore/test/python/test_util_slicing_no_tagged_data.py
index e5b3eb5..d9ceff1 100644
--- a/escriptcore/test/python/test_util_slicing_no_tagged_data.py
+++ b/escriptcore/test/python/test_util_slicing_no_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_slicing_with_tagged_data.py b/escriptcore/test/python/test_util_slicing_with_tagged_data.py
index ccac09d..c294b41 100644
--- a/escriptcore/test/python/test_util_slicing_with_tagged_data.py
+++ b/escriptcore/test/python/test_util_slicing_with_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_spatial_functions1.py b/escriptcore/test/python/test_util_spatial_functions1.py
index c423e4b..b1e472c 100644
--- a/escriptcore/test/python/test_util_spatial_functions1.py
+++ b/escriptcore/test/python/test_util_spatial_functions1.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_spatial_functions2.py b/escriptcore/test/python/test_util_spatial_functions2.py
index 723bb99..209995e 100644
--- a/escriptcore/test/python/test_util_spatial_functions2.py
+++ b/escriptcore/test/python/test_util_spatial_functions2.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_spatial_functions3.py b/escriptcore/test/python/test_util_spatial_functions3.py
index 809442a..9de3c9e 100644
--- a/escriptcore/test/python/test_util_spatial_functions3.py
+++ b/escriptcore/test/python/test_util_spatial_functions3.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_unary_no_tagged_data.py b/escriptcore/test/python/test_util_unary_no_tagged_data.py
index a488497..040020a 100644
--- a/escriptcore/test/python/test_util_unary_no_tagged_data.py
+++ b/escriptcore/test/python/test_util_unary_no_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/escriptcore/test/python/test_util_unary_with_tagged_data.py b/escriptcore/test/python/test_util_unary_with_tagged_data.py
index e18a7ec..4a0eefe 100644
--- a/escriptcore/test/python/test_util_unary_with_tagged_data.py
+++ b/escriptcore/test/python/test_util_unary_with_tagged_data.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/esysUtils/src/Esys_MPI.cpp b/esysUtils/src/Esys_MPI.cpp
index bf18999..d5b662d 100644
--- a/esysUtils/src/Esys_MPI.cpp
+++ b/esysUtils/src/Esys_MPI.cpp
@@ -48,10 +48,18 @@ JMPI_::JMPI_(MPI_Comm mpicomm, bool owncom)
 {
         msg_tag_counter = 0;
 #ifdef ESYS_MPI
+    if (mpicomm!=MPI_COMM_NULL)
+    {
         if (MPI_Comm_rank(comm, &rank)!=MPI_SUCCESS || MPI_Comm_size(comm, &size)!=MPI_SUCCESS)
         {
             Esys_setError( ESYS_MPI_ERROR, "Esys_MPIInfo_alloc : error finding comm rank/size" );
         }
+    }
+    else
+    {
+	rank=0;
+	size=0;
+    }
 #else
         rank=0;
         size=1;        
@@ -61,7 +69,7 @@ JMPI_::JMPI_(MPI_Comm mpicomm, bool owncom)
 JMPI_::~JMPI_()
 {
 #ifdef ESYS_MPI
-    if (ownscomm)
+    if (ownscomm && (comm!=MPI_COMM_NULL))
     {
         MPI_Comm_free(&comm);
     }
diff --git a/esysUtils/src/Esys_MPI.h b/esysUtils/src/Esys_MPI.h
index d3840b5..58ed11e 100644
--- a/esysUtils/src/Esys_MPI.h
+++ b/esysUtils/src/Esys_MPI.h
@@ -50,6 +50,7 @@
    #define MPI_MIN 101
    #define MPI_MAX 102
 
+   #define MPI_OP_NULL 17
 // end MPI_op
 
    
@@ -102,6 +103,11 @@ public:
     {
 	msg_tag_counter%=1010201;
     }
+
+    bool isValid()
+    {
+	return comm!=MPI_COMM_NULL;
+    }
 private:
     JMPI_(MPI_Comm comm, bool ocomm);
     friend JMPI makeInfo(MPI_Comm comm, bool owncom);
@@ -145,7 +151,7 @@ ESYSUTILS_DLL_API
 bool checkResult(int input, int& output, const JMPI& comm);
 
 
-// Do not cope with nested calls
+// Does not cope with nested calls
 class NoCOMM_WORLD
 {
 public:
diff --git a/esysUtils/src/pyerr.cpp b/esysUtils/src/pyerr.cpp
index be8ea2a..ae66066 100644
--- a/esysUtils/src/pyerr.cpp
+++ b/esysUtils/src/pyerr.cpp
@@ -14,6 +14,11 @@
 *****************************************************************************/
 
 #define ESNEEDPYTHON
+#include <boost/python/object.hpp>
+#include <boost/python/import.hpp>
+#include <boost/python/list.hpp>
+#include <boost/python/extract.hpp>
+
 #include "first.h"
 #include "pyerr.h"
 
@@ -22,20 +27,48 @@
 void getStringFromPyException(boost::python::error_already_set e, std::string& errormsg)
 {
 	using namespace boost::python;
+
   	PyObject* ptype=0;
  	PyObject* pvalue=0;
  	PyObject* ptraceback=0;
  	PyErr_Fetch(&ptype, &pvalue, &ptraceback);
 	PyErr_NormalizeException(&ptype, &pvalue, &ptraceback);
- 
-	PyObject* errobj=PyObject_Str(pvalue);
+	object tb = import("traceback"); 
+	object trace(handle<>(borrowed(ptraceback)));
+	object li=tb.attr("extract_tb")(trace);
+	object li2=tb.attr("format_list")(li);
+	list l=extract<list>(li2)();
+	
+
 
 #ifdef ESPYTHON3	
+	std::string ss;
+	for (int i=0;i<len(l);++i) {
+	    object o=l[i];
+	    PyObject* rr=PyUnicode_AsASCIIString(o.ptr());
+	    ss+=PyBytes_AsString(rr);
+	    Py_XDECREF(rr);
+	}
+	
+	PyObject* errobj=PyObject_Str(pvalue);	
+	
 	PyObject* rr=PyUnicode_AsASCIIString(errobj);
 	errormsg=PyBytes_AsString(rr);
+	errormsg+="\n";
 	Py_XDECREF(rr);
+	errormsg+=ss;
 #else
+	
+	std::string ss;
+	for (int i=0;i<len(l);++i) {
+	    ss+=extract<std::string>(l[i])();
+	}
+	
+	PyObject* errobj=PyObject_Str(pvalue);	
+	
 	errormsg=PyString_AsString(errobj);
+	errormsg+="\n";
+	errormsg+=ss;
 #endif
 	Py_XDECREF(errobj);
 
diff --git a/esysUtils/src/pyerr.h b/esysUtils/src/pyerr.h
index 64ce9c9..ab9c1c2 100644
--- a/esysUtils/src/pyerr.h
+++ b/esysUtils/src/pyerr.h
@@ -25,4 +25,5 @@
 ESYSUTILS_DLL_API
 void getStringFromPyException(boost::python::error_already_set e, std::string& errormsg);
 
-#endif
\ No newline at end of file
+#endif
+
diff --git a/finley/benchmarks/finleybench.py b/finley/benchmarks/finleybench.py
index 2a8be13..f3f84d0 100644
--- a/finley/benchmarks/finleybench.py
+++ b/finley/benchmarks/finleybench.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/finley/benchmarks/runbenchmark.py b/finley/benchmarks/runbenchmark.py
index 82b7cc2..8489d8c 100755
--- a/finley/benchmarks/runbenchmark.py
+++ b/finley/benchmarks/runbenchmark.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/finley/py_src/__init__.py b/finley/py_src/__init__.py
index 2f30c7f..be066a0 100644
--- a/finley/py_src/__init__.py
+++ b/finley/py_src/__init__.py
@@ -17,6 +17,8 @@
 """Our most general domain representation. Imports submodules into its namespace
 """
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -26,10 +28,10 @@ __url__="https://launchpad.net/escript-finley"
 
 
 import esys.escript
-import esys.pasowrap	#if you don't import this, you won't be able to see methods not in AbstractSystemmatrix
+import esys.pasowrap    #if you don't import this, you won't be able to see methods not in AbstractSystemmatrix
 #from esys.escript import *
 from .finleycpp import *
 from .factorywrappers import *
 from .readers import *
 
-__nodocorecursion=['finleycpp', 'factorywrappers', 'readers']
\ No newline at end of file
+__nodocorecursion=['finleycpp', 'factorywrappers', 'readers']
diff --git a/finley/py_src/factorywrappers.py b/finley/py_src/factorywrappers.py
index 6b5a798..d0da25a 100644
--- a/finley/py_src/factorywrappers.py
+++ b/finley/py_src/factorywrappers.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2011-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -69,9 +71,9 @@ def Rectangle(n0=1, n1=1, order=1, l0=1.0, l1=1.0, periodic0=False, periodic1=Fa
     if 'diracTags' in kwargs:
         tags=kwargs['diracTags']
     faceon=useElementsOnFace
-    if useElementsOnFace is None:	#We want to use 1 as the default, but only where it makes sense
+    if useElementsOnFace is None:       #We want to use 1 as the default, but only where it makes sense
         if useFullElementOrder or order==-1:
-            faceon=0	#Don't use it
+            faceon=0    #Don't use it
         else:
             faceon=1
     args=[n0, n1, order, l0, l1, periodic0, periodic1, integrationOrder, 
@@ -94,9 +96,9 @@ def Brick(n0=1, n1=1, n2=1, order=1, l0=1.0, l1=1.0, l2=1.0, periodic0=0, period
     if 'diracTags' in kwargs:
         tags=kwargs['diracTags']
     faceon=useElementsOnFace
-    if useElementsOnFace is None:	#We want to use 1 as the default, but only where it makes sense
+    if useElementsOnFace is None:       #We want to use 1 as the default, but only where it makes sense
         if useFullElementOrder or order==-1:
-            faceon=0	#Don't use it
+            faceon=0    #Don't use it
         else:
             faceon=1
     args=[n0, n1, n2, order, l0, l1, l2, periodic0,  periodic1, periodic2,
diff --git a/finley/py_src/readers.py b/finley/py_src/readers.py
index 6531a88..9579902 100644
--- a/finley/py_src/readers.py
+++ b/finley/py_src/readers.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/src/Assemble.h b/finley/src/Assemble.h
index 4a1e4e8..1ed93c8 100644
--- a/finley/src/Assemble.h
+++ b/finley/src/Assemble.h
@@ -55,18 +55,18 @@ struct AssembleParameters {
     /// leading dimension of element node table
     int NN;
     /// number of elements
-    int numElements;
+    dim_t numElements;
 
     int numEqu;
-    const int* row_DOF;
-    int row_DOF_UpperBound;
+    const index_t* row_DOF;
+    index_t row_DOF_UpperBound;
     ElementFile_Jacobians* row_jac;
     const int* row_node;
     int row_numShapesTotal;
     int row_numShapes;
     int numComp;
-    const int* col_DOF;
-    int col_DOF_UpperBound;
+    const index_t* col_DOF;
+    index_t col_DOF_UpperBound;
     ElementFile_Jacobians* col_jac;
     const int* col_node;
     int col_numShapesTotal;
@@ -123,8 +123,8 @@ void Assemble_PDE_System_C(const AssembleParameters& p, const escript::Data& D,
                            const escript::Data& Y);
 
 void Assemble_addToSystemMatrix(paso::SystemMatrix_ptr S, const int NN_Equa,
-        const int* Nodes_Equa, const int num_Equa, const int NN_Sol,
-        const int* Nodes_Sol, const int num_Sol, const double* array);
+        const index_t* Nodes_Equa, const int num_Equa, const int NN_Sol,
+        const index_t* Nodes_Sol, const int num_Sol, const double* array);
 
 void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
                            escript::Data& lumpedMat, const escript::Data& D,
@@ -158,59 +158,59 @@ void Assemble_interpolate(const NodeFile* nodes, const ElementFile* elements,
 
 void Assemble_jacobians_1D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_2D_M1D_E1D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_2D_M1D_E1D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_2D_M1D_E2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_2D_M1D_E2D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_3D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_3D_M2D_E2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_3D_M2D_E2D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_3D_M2D_E3D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 void Assemble_jacobians_3D_M2D_E3D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId);
+                           double* dTdX, double* volume, const index_t* elementId);
 
 } // namespace finley
 
diff --git a/finley/src/Assemble_CopyNodalData.cpp b/finley/src/Assemble_CopyNodalData.cpp
index 4136628..895f149 100644
--- a/finley/src/Assemble_CopyNodalData.cpp
+++ b/finley/src/Assemble_CopyNodalData.cpp
@@ -77,7 +77,7 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
         setError(TYPE_ERROR, "Assemble_CopyNodalData: illegal function space type for target object");
     }
 
-    int numOut=0;
+    dim_t numOut=0;
     switch (out_data_type) {
         case FINLEY_NODES:
             numOut=nodes->getNumNodes();
@@ -113,28 +113,28 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
         out.requireWrite();
         if (out_data_type == FINLEY_NODES) {
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(n), numComps_size);
             }
         } else if (out_data_type == FINLEY_REDUCED_NODES) {
-            const std::vector<int>& map = nodes->borrowReducedNodesTarget();
-            const int mapSize = map.size();
+            const std::vector<index_t>& map = nodes->borrowReducedNodesTarget();
+            const dim_t mapSize = map.size();
 #pragma omp parallel for
-            for (int n=0; n<mapSize; n++) {
+            for (index_t n=0; n<mapSize; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(map[n]),
                        numComps_size);
             }
         } else if (out_data_type == FINLEY_DEGREES_OF_FREEDOM) {
-            const std::vector<int>& map = nodes->borrowDegreesOfFreedomTarget();
+            const std::vector<index_t>& map = nodes->borrowDegreesOfFreedomTarget();
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(map[n]),
                        numComps_size);
             }
         } else if (out_data_type == FINLEY_REDUCED_DEGREES_OF_FREEDOM) {
-            const std::vector<int>& map = nodes->borrowReducedDegreesOfFreedomTarget();
+            const std::vector<index_t>& map = nodes->borrowReducedDegreesOfFreedomTarget();
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(map[n]),
                        numComps_size);
             }
@@ -146,19 +146,19 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
             setError(TYPE_ERROR,"Assemble_CopyNodalData: cannot copy from reduced nodes to nodes.");
         } else if (out_data_type == FINLEY_REDUCED_NODES) {
             out.requireWrite();
-            const int nNodes = nodes->getNumNodes();
+            const dim_t nNodes = nodes->getNumNodes();
 #pragma omp parallel for
-            for (int n=0; n < nNodes; n++) {
+            for (index_t n=0; n < nNodes; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(n), numComps_size);
             }
        } else if (out_data_type == FINLEY_DEGREES_OF_FREEDOM) {
             setError(TYPE_ERROR,"Assemble_CopyNodalData: cannot copy from reduced nodes to degrees of freedom.");
        } else if (out_data_type == FINLEY_REDUCED_DEGREES_OF_FREEDOM) {
             out.requireWrite();
-            const int* target = nodes->borrowTargetReducedNodes();
-            const std::vector<int>& map = nodes->borrowReducedDegreesOfFreedomTarget();
+            const index_t* target = nodes->borrowTargetReducedNodes();
+            const std::vector<index_t>& map = nodes->borrowReducedDegreesOfFreedomTarget();
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                memcpy(out.getSampleDataRW(n),
                       in.getSampleDataRO(target[map[n]]), numComps_size);
             }
@@ -175,12 +175,12 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
                 const_cast<escript::Data*>(&in)->resolve();
                 coupler->startCollect(in.getDataRO());
                 const double *recv_buffer=coupler->finishCollect();
-                const int upperBound=nodes->getNumDegreesOfFreedom();
-                const int* target = nodes->borrowTargetDegreesOfFreedom();
-                const int nNodes = nodes->numNodes;
+                const index_t upperBound=nodes->getNumDegreesOfFreedom();
+                const index_t* target = nodes->borrowTargetDegreesOfFreedom();
+                const dim_t nNodes = nodes->numNodes;
 #pragma omp parallel for
-                for (int n=0; n < nNodes; n++) {
-                    const int k=target[n];
+                for (index_t n=0; n < nNodes; n++) {
+                    const index_t k=target[n];
                     if (k < upperBound) {
                         memcpy(out.getSampleDataRW(n), in.getSampleDataRO(k),
                                numComps_size);
@@ -197,14 +197,14 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
                 const_cast<escript::Data*>(&in)->resolve();
                 coupler->startCollect(in.getDataRO());
                 const double *recv_buffer=coupler->finishCollect();
-                const int upperBound=nodes->getNumDegreesOfFreedom();
-                const std::vector<int>& map = nodes->borrowReducedNodesTarget();
-                const int* target = nodes->borrowTargetDegreesOfFreedom();
-                const int mapSize = map.size();
+                const index_t upperBound=nodes->getNumDegreesOfFreedom();
+                const std::vector<index_t>& map = nodes->borrowReducedNodesTarget();
+                const index_t* target = nodes->borrowTargetDegreesOfFreedom();
+                const dim_t mapSize = map.size();
 
 #pragma omp parallel for
-                for (int n=0; n < mapSize; n++) {
-                    const int k=target[map[n]];
+                for (index_t n=0; n < mapSize; n++) {
+                    const index_t k=target[map[n]];
                     if (k < upperBound) {
                         memcpy(out.getSampleDataRW(n), in.getSampleDataRO(k),
                                numComps_size);
@@ -217,15 +217,15 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
             }
         } else if (out_data_type == FINLEY_DEGREES_OF_FREEDOM) {
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(n),
                        numComps_size);
             }
         } else if (out_data_type == FINLEY_REDUCED_DEGREES_OF_FREEDOM) {
-            const std::vector<int>& map = nodes->borrowReducedDegreesOfFreedomTarget();
-            const int* target = nodes->borrowTargetDegreesOfFreedom();
+            const std::vector<index_t>& map = nodes->borrowReducedDegreesOfFreedomTarget();
+            const index_t* target = nodes->borrowTargetDegreesOfFreedom();
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n),
                        in.getSampleDataRO(target[map[n]]), numComps_size);
             }
@@ -241,14 +241,14 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
                 const_cast<escript::Data*>(&in)->resolve();
                 coupler->startCollect(in.getDataRO());
                 out.requireWrite();
-                const int upperBound=nodes->getNumReducedDegreesOfFreedom();
-                const std::vector<int>& map=nodes->borrowReducedNodesTarget();
-                const int mapSize = map.size();
-                const int* target=nodes->borrowTargetReducedDegreesOfFreedom();
+                const index_t upperBound=nodes->getNumReducedDegreesOfFreedom();
+                const std::vector<index_t>& map=nodes->borrowReducedNodesTarget();
+                const dim_t mapSize = map.size();
+                const index_t* target=nodes->borrowTargetReducedDegreesOfFreedom();
                 const double *recv_buffer=coupler->finishCollect();
 #pragma omp parallel for
-                for (int n=0; n < mapSize; n++) {
-                    const int k=target[map[n]];
+                for (index_t n=0; n < mapSize; n++) {
+                    const index_t k=target[map[n]];
                     if (k < upperBound) {
                         memcpy(out.getSampleDataRW(n), in.getSampleDataRO(k),
                                numComps_size);
@@ -262,7 +262,7 @@ void Assemble_CopyNodalData(const NodeFile* nodes, escript::Data& out,
         } else if (out_data_type == FINLEY_REDUCED_DEGREES_OF_FREEDOM) {
             out.requireWrite();
 #pragma omp parallel for
-            for (int n=0; n<numOut; n++) {
+            for (index_t n=0; n<numOut; n++) {
                 memcpy(out.getSampleDataRW(n), in.getSampleDataRO(n), numComps_size);
             }
         } else if (out_data_type == FINLEY_DEGREES_OF_FREEDOM ) {
diff --git a/finley/src/Assemble_LumpedSystem.cpp b/finley/src/Assemble_LumpedSystem.cpp
index cb02f6f..6fe7ccc 100644
--- a/finley/src/Assemble_LumpedSystem.cpp
+++ b/finley/src/Assemble_LumpedSystem.cpp
@@ -106,7 +106,7 @@ void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
             for (int color=elements->minColor; color<=elements->maxColor; color++) {
                 // loop over all elements:
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     if (elements->Color[e]==color) {
                         const double *D_p=D.getSampleDataRO(e);
                         util::addScatter(1,
@@ -124,13 +124,13 @@ void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
 #pragma omp parallel
         {
             std::vector<double> EM_lumpedMat(p.row_numShapesTotal*p.numEqu);
-            std::vector<int> row_index(p.row_numShapesTotal);
+            std::vector<index_t> row_index(p.row_numShapesTotal);
             if (p.numEqu == 1) { // single equation
                 if (expandedD) { // with expanded D
                     for (int color=elements->minColor; color<=elements->maxColor; color++) {
                         // loop over all elements:
 #pragma omp for
-                        for (int e=0; e<elements->numElements; e++) {
+                        for (index_t e=0; e<elements->numElements; e++) {
                             if (elements->Color[e]==color) {
                                 for (int isub=0; isub<p.numSub; isub++) {
                                     const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e, p.numQuadSub,p.numSub)]);
@@ -183,7 +183,7 @@ void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
                     for (int color=elements->minColor; color<=elements->maxColor; color++) {
                         // loop over all elements:
 #pragma omp for
-                        for (int e=0; e<elements->numElements; e++) {
+                        for (index_t e=0; e<elements->numElements; e++) {
                             if (elements->Color[e]==color) {
                                 for (int isub=0; isub<p.numSub; isub++) {
                                     const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e, p.numQuadSub,p.numSub)]);
@@ -237,7 +237,7 @@ void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
                     for (int color=elements->minColor; color<=elements->maxColor; color++) {
                         // loop over all elements:
 #pragma omp for
-                        for (int e=0; e<elements->numElements; e++) {
+                        for (index_t e=0; e<elements->numElements; e++) {
                             if (elements->Color[e]==color) {
                                 for (int isub=0; isub<p.numSub; isub++) {
                                     const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
@@ -296,7 +296,7 @@ void Assemble_LumpedSystem(const NodeFile* nodes, const ElementFile* elements,
                     for (int color=elements->minColor; color<=elements->maxColor; color++) {
                         // loop over all elements:
 #pragma omp for
-                        for (int e=0; e<elements->numElements; e++) {
+                        for (index_t e=0; e<elements->numElements; e++) {
                             if (elements->Color[e]==color) {
                                 for (int isub=0; isub<p.numSub; isub++) {
                                     const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e, p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_Points.cpp b/finley/src/Assemble_PDE_Points.cpp
index cb2ca2d..23f7291 100644
--- a/finley/src/Assemble_PDE_Points.cpp
+++ b/finley/src/Assemble_PDE_Points.cpp
@@ -56,9 +56,9 @@ void Assemble_PDE_Points(const AssembleParameters& p,
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
-                    int row_index=p.row_DOF[p.elements->Nodes[INDEX2(0,e,p.NN)]];
+                    index_t row_index=p.row_DOF[p.elements->Nodes[INDEX2(0,e,p.NN)]];
                     if (!y_dirac.isEmpty()) {
                         const double *y_dirac_p=y_dirac.getSampleDataRO(e);
                         util::addScatter(1, &row_index, p.numEqu,
diff --git a/finley/src/Assemble_PDE_Single_1D.cpp b/finley/src/Assemble_PDE_Single_1D.cpp
index 1dc3b5d..30f3485 100644
--- a/finley/src/Assemble_PDE_Single_1D.cpp
+++ b/finley/src/Assemble_PDE_Single_1D.cpp
@@ -69,7 +69,7 @@ void Assemble_PDE_Single_1D(const AssembleParameters& p,
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
@@ -238,7 +238,7 @@ void Assemble_PDE_Single_1D(const AssembleParameters& p,
                         }
                         // add the element matrices onto the matrix and
                         // right hand side
-                        std::vector<int> row_index(p.row_numShapesTotal);
+                        std::vector<index_t> row_index(p.row_numShapesTotal);
                         for (int q=0; q<p.row_numShapesTotal; q++)
                             row_index[q]=p.row_DOF[p.elements->Nodes[INDEX2(p.row_node[INDEX2(q,isub,p.row_numShapesTotal)],e,p.NN)]];
 
diff --git a/finley/src/Assemble_PDE_Single_2D.cpp b/finley/src/Assemble_PDE_Single_2D.cpp
index 10e00d9..51edfb9 100644
--- a/finley/src/Assemble_PDE_Single_2D.cpp
+++ b/finley/src/Assemble_PDE_Single_2D.cpp
@@ -66,14 +66,14 @@ void Assemble_PDE_Single_2D(const AssembleParameters& p,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(len_EM_F);
+        std::vector<index_t> row_index(len_EM_F);
         std::vector<double> EM_S(len_EM_S);
         std::vector<double> EM_F(len_EM_F);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_Single_3D.cpp b/finley/src/Assemble_PDE_Single_3D.cpp
index 7158d52..d253384 100644
--- a/finley/src/Assemble_PDE_Single_3D.cpp
+++ b/finley/src/Assemble_PDE_Single_3D.cpp
@@ -66,14 +66,14 @@ void Assemble_PDE_Single_3D(const AssembleParameters& p,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(len_EM_F);
+        std::vector<index_t> row_index(len_EM_F);
         std::vector<double> EM_S(len_EM_S);
         std::vector<double> EM_F(len_EM_F);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_Single_C.cpp b/finley/src/Assemble_PDE_Single_C.cpp
index 96bc07a..a24598f 100644
--- a/finley/src/Assemble_PDE_Single_C.cpp
+++ b/finley/src/Assemble_PDE_Single_C.cpp
@@ -53,14 +53,14 @@ void Assemble_PDE_Single_C(const AssembleParameters& p, const escript::Data& D,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(p.row_numShapesTotal);
+        std::vector<index_t> row_index(p.row_numShapesTotal);
         std::vector<double> EM_S(p.row_numShapesTotal*p.col_numShapesTotal);
         std::vector<double> EM_F(p.row_numShapesTotal);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_System_1D.cpp b/finley/src/Assemble_PDE_System_1D.cpp
index 243c64c..1b42afa 100644
--- a/finley/src/Assemble_PDE_System_1D.cpp
+++ b/finley/src/Assemble_PDE_System_1D.cpp
@@ -73,7 +73,7 @@ void Assemble_PDE_System_1D(const AssembleParameters& p,
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
@@ -287,7 +287,7 @@ void Assemble_PDE_System_1D(const AssembleParameters& p,
                         }
                         // add the element matrices onto the matrix and
                         // right hand side
-                        std::vector<int> row_index(p.row_numShapesTotal);
+                        std::vector<index_t> row_index(p.row_numShapesTotal);
                         for (int q=0; q<p.row_numShapesTotal; q++)
                             row_index[q]=p.row_DOF[p.elements->Nodes[INDEX2(p.row_node[INDEX2(q,isub,p.row_numShapesTotal)],e,p.NN)]];
 
diff --git a/finley/src/Assemble_PDE_System_2D.cpp b/finley/src/Assemble_PDE_System_2D.cpp
index 0e4bd5f..4949cd7 100644
--- a/finley/src/Assemble_PDE_System_2D.cpp
+++ b/finley/src/Assemble_PDE_System_2D.cpp
@@ -69,14 +69,14 @@ void Assemble_PDE_System_2D(const AssembleParameters& p,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(p.row_numShapesTotal);
+        std::vector<index_t> row_index(p.row_numShapesTotal);
         std::vector<double> EM_S(len_EM_S);
         std::vector<double> EM_F(len_EM_F);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_System_3D.cpp b/finley/src/Assemble_PDE_System_3D.cpp
index 501f6d0..422b3bd 100644
--- a/finley/src/Assemble_PDE_System_3D.cpp
+++ b/finley/src/Assemble_PDE_System_3D.cpp
@@ -69,14 +69,14 @@ void Assemble_PDE_System_3D(const AssembleParameters& p,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(p.row_numShapesTotal);
+        std::vector<index_t> row_index(p.row_numShapesTotal);
         std::vector<double> EM_S(len_EM_S);
         std::vector<double> EM_F(len_EM_F);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_PDE_System_C.cpp b/finley/src/Assemble_PDE_System_C.cpp
index f86af44..faa22ba 100644
--- a/finley/src/Assemble_PDE_System_C.cpp
+++ b/finley/src/Assemble_PDE_System_C.cpp
@@ -57,14 +57,14 @@ void Assemble_PDE_System_C(const AssembleParameters& p, const escript::Data& D,
 
 #pragma omp parallel
     {
-        std::vector<int> row_index(p.row_numShapesTotal);
+        std::vector<index_t> row_index(p.row_numShapesTotal);
         std::vector<double> EM_S(p.row_numShapesTotal*p.col_numShapesTotal*p.numEqu*p.numComp);
         std::vector<double> EM_F(p.row_numShapesTotal*p.numEqu);
 
         for (int color=p.elements->minColor; color<=p.elements->maxColor; color++) {
             // loop over all elements:
 #pragma omp for
-            for (int e=0; e<p.elements->numElements; e++) {
+            for (index_t e=0; e<p.elements->numElements; e++) {
                 if (p.elements->Color[e]==color) {
                     for (int isub=0; isub<p.numSub; isub++) {
                         const double *Vol=&(p.row_jac->volume[INDEX3(0,isub,e,p.numQuadSub,p.numSub)]);
diff --git a/finley/src/Assemble_addToSystemMatrix.cpp b/finley/src/Assemble_addToSystemMatrix.cpp
index ef17cf6..75217c6 100644
--- a/finley/src/Assemble_addToSystemMatrix.cpp
+++ b/finley/src/Assemble_addToSystemMatrix.cpp
@@ -38,25 +38,25 @@
 
 namespace finley {
 
-void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
-        const int NN_Equa, const int* Nodes_Equa, const int num_Equa,
-        const int NN_Sol, const int* Nodes_Sol, const int num_Sol,
-        const double* array);
+void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in, int NN_Equa,
+                                    const index_t* Nodes_Equa, int num_Equa,
+                                    int NN_Sol, const index_t* Nodes_Sol,
+                                    int num_Sol, const double* array);
 
 void Assemble_addToSystemMatrix_Trilinos(paso::SystemMatrix_ptr in,
-        const int NN_Equa, const int* Nodes_Equa, const int num_Equa,
-        const int NN_Sol, const int* Nodes_Sol, const int num_Sol,
-        const double* array);
+                        int NN_Equa, const index_t* Nodes_Equa, int num_Equa,
+                        int NN_Sol, const index_t* Nodes_Sol, int num_Sol,
+                        const double* array);
 
-void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in,
-        const int NN_Equa, const int* Nodes_Equa, const int num_Equa,
-        const int NN_Sol, const int* Nodes_Sol, const int num_Sol,
-        const double* array);
+void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in, int NN_Equa,
+                                    const index_t* Nodes_Equa, int num_Equa,
+                                    int NN_Sol, const index_t* Nodes_Sol,
+                                    int num_Sol, const double* array);
 
-void Assemble_addToSystemMatrix(paso::SystemMatrix_ptr in,
-        const int NN_Equa, const int* Nodes_Equa, const int num_Equa,
-        const int NN_Sol, const int* Nodes_Sol, const int num_Sol,
-        const double* array)
+void Assemble_addToSystemMatrix(paso::SystemMatrix_ptr in, int NN_Equa,
+                                const index_t* Nodes_Equa, int num_Equa,
+                                int NN_Sol, const index_t* Nodes_Sol,
+                                int num_Sol, const double* array)
 {
     // call the right function depending on storage type
     if (in->type & MATRIX_FORMAT_CSC) {
@@ -71,11 +71,10 @@ void Assemble_addToSystemMatrix(paso::SystemMatrix_ptr in,
     }
 }
 
-void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
-                                    const int NN_Equa, const int* Nodes_Equa,
-                                    const int num_Equa, const int NN_Sol,
-                                    const int* Nodes_Sol, const int num_Sol,
-                                    const double* array)
+void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in, int NN_Equa,
+                                    const index_t* Nodes_Equa, int num_Equa,
+                                    int NN_Sol, const index_t* Nodes_Sol,
+                                    int num_Sol, const double* array)
 {
     const int index_offset=(in->type & MATRIX_FORMAT_OFFSET1 ? 1:0);
     const int row_block_size=in->row_block_size;
@@ -83,37 +82,37 @@ void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
     const int block_size=in->block_size;
     const int num_subblocks_Equa=num_Equa/row_block_size;
     const int num_subblocks_Sol=num_Sol/col_block_size;
-    const int numMyCols=in->pattern->mainPattern->numInput;
-    const int numMyRows=in->pattern->mainPattern->numOutput;
-    const int *mainBlock_ptr=in->mainBlock->pattern->ptr;
-    const int *mainBlock_index=in->mainBlock->pattern->index;
+    const dim_t numMyCols=in->pattern->mainPattern->numInput;
+    const dim_t numMyRows=in->pattern->mainPattern->numOutput;
+    const index_t *mainBlock_ptr=in->mainBlock->pattern->ptr;
+    const index_t *mainBlock_index=in->mainBlock->pattern->index;
     double *mainBlock_val=in->mainBlock->val;
-    const int *col_coupleBlock_ptr=in->col_coupleBlock->pattern->ptr;
-    const int *col_coupleBlock_index=in->col_coupleBlock->pattern->index;
+    const index_t *col_coupleBlock_ptr=in->col_coupleBlock->pattern->ptr;
+    const index_t *col_coupleBlock_index=in->col_coupleBlock->pattern->index;
     double *col_coupleBlock_val=in->col_coupleBlock->val;
-    //const int *row_coupleBlock_ptr=in->row_coupleBlock->pattern->ptr;
-    const int *row_coupleBlock_index=in->row_coupleBlock->pattern->index;
+    //const index_t *row_coupleBlock_ptr=in->row_coupleBlock->pattern->ptr;
+    const index_t *row_coupleBlock_index=in->row_coupleBlock->pattern->index;
     double *row_coupleBlock_val=in->row_coupleBlock->val;
 
     for (int k_Sol=0; k_Sol<NN_Sol; ++k_Sol) {
         // Down columns of array
-        const int j_Sol=Nodes_Sol[k_Sol];
+        const index_t j_Sol=Nodes_Sol[k_Sol];
         for (int l_col=0; l_col<num_subblocks_Sol; ++l_col) {
-            const int i_col=j_Sol*num_subblocks_Sol+l_col;
+            const index_t i_col=j_Sol*num_subblocks_Sol+l_col;
             if (i_col < numMyCols) {
                 for (int k_Equa=0;k_Equa<NN_Equa;++k_Equa) {
                     // Across cols of array
-                    const int j_Equa=Nodes_Equa[k_Equa];
+                    const index_t j_Equa=Nodes_Equa[k_Equa];
                     for (int l_row=0; l_row<num_subblocks_Equa; ++l_row) {
-                        const int i_row=j_Equa*num_subblocks_Equa+index_offset+l_row;
+                        const index_t i_row=j_Equa*num_subblocks_Equa+index_offset+l_row;
                         if (i_row < numMyRows + index_offset ) {
-                            for (int k=mainBlock_ptr[i_col]-index_offset; k<mainBlock_ptr[i_col+1]-index_offset; ++k) {
+                            for (index_t k=mainBlock_ptr[i_col]-index_offset; k<mainBlock_ptr[i_col+1]-index_offset; ++k) {
                                 if (mainBlock_index[k]==i_row) {
                                     // Entry array(k_Equa, j_Sol) is a block (col_block_size x col_block_size)
                                     for (int ic=0; ic<col_block_size; ++ic) {
                                         const int i_Sol=ic+col_block_size*l_col;
                                         for (int ir=0; ir<row_block_size; ++ir) {
-                                            const int i_Equa=ir+row_block_size*l_row;
+                                            const index_t i_Equa=ir+row_block_size*l_row;
                                             mainBlock_val[k*block_size+ir+row_block_size*ic]+=
                                                     array[INDEX4(i_Equa,i_Sol,k_Equa,k_Sol,num_Equa,num_Sol,NN_Equa)];
                                         }
@@ -122,12 +121,12 @@ void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
                                 }
                             }
                         } else {
-                            for (int k=col_coupleBlock_ptr[i_col]-index_offset; k<col_coupleBlock_ptr[i_col+1]-index_offset; ++k) {
+                            for (index_t k=col_coupleBlock_ptr[i_col]-index_offset; k<col_coupleBlock_ptr[i_col+1]-index_offset; ++k) {
                                 if (row_coupleBlock_index[k] == i_row-numMyRows) {
                                     for (int ic=0; ic<col_block_size; ++ic) {
                                         const int i_Sol=ic+col_block_size*l_col;
                                         for (int ir=0; ir<row_block_size; ++ir) {
-                                            const int i_Equa=ir+row_block_size*l_row;
+                                            const index_t i_Equa=ir+row_block_size*l_row;
                                             row_coupleBlock_val[k*block_size+ir+row_block_size*ic]+=
                                                 array[INDEX4(i_Equa,i_Sol,k_Equa,k_Sol,num_Equa,num_Sol,NN_Equa)];
                                         }
@@ -141,11 +140,11 @@ void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
             } else { // i_col >= numMyCols
                 for (int k_Equa=0;k_Equa<NN_Equa;++k_Equa) {
                     // Across rows of array
-                    const int j_Equa=Nodes_Equa[k_Equa];
+                    const index_t j_Equa=Nodes_Equa[k_Equa];
                     for (int l_row=0; l_row<num_subblocks_Equa; ++l_row) {
-                        const int i_row=j_Equa*num_subblocks_Equa+index_offset+l_row;
+                        const index_t i_row=j_Equa*num_subblocks_Equa+index_offset+l_row;
                         if (i_row < numMyRows + index_offset ) {
-                            for (int k=col_coupleBlock_ptr[i_col-numMyCols]-index_offset; k<col_coupleBlock_ptr[i_col-numMyCols+1]-index_offset; ++k) {
+                            for (index_t k=col_coupleBlock_ptr[i_col-numMyCols]-index_offset; k<col_coupleBlock_ptr[i_col-numMyCols+1]-index_offset; ++k) {
                                 if (col_coupleBlock_index[k] == i_row) {
                                     for (int ic=0; ic<col_block_size; ++ic) {
                                         const int i_Sol=ic+col_block_size*l_col;
@@ -167,12 +166,9 @@ void Assemble_addToSystemMatrix_CSC(paso::SystemMatrix_ptr in,
 }
 
 void Assemble_addToSystemMatrix_Trilinos(paso::SystemMatrix_ptr in,
-                                         const int NN_Equa,
-                                         const int* Nodes_Equa,
-                                         const int num_Equa,
-                                         const int NN_Sol,
-                                         const int* Nodes_Sol,
-                                         const int num_Sol,
+                                         int NN_Equa, const index_t* Nodes_Equa,
+                                         int num_Equa, int NN_Sol,
+                                         const index_t* Nodes_Sol, int num_Sol,
                                          const double* array)
 {
     // FIXME: this needs to be modified
@@ -201,11 +197,10 @@ void Assemble_addToSystemMatrix_Trilinos(paso::SystemMatrix_ptr in,
 #endif
 }
 
-void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in,
-                                    const int NN_Equa, const int* Nodes_Equa,
-                                    const int num_Equa, const int NN_Sol,
-                                    const int* Nodes_Sol, const int num_Sol,
-                                    const double* array)
+void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in, int NN_Equa,
+                                    const index_t* Nodes_Equa, int num_Equa,
+                                    int NN_Sol, const index_t* Nodes_Sol,
+                                    int num_Sol, const double* array)
 {
     const int index_offset=(in->type & MATRIX_FORMAT_OFFSET1 ? 1:0);
     const int row_block_size=in->row_block_size;
@@ -213,40 +208,40 @@ void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in,
     const int block_size=in->block_size;
     const int num_subblocks_Equa=num_Equa/row_block_size;
     const int num_subblocks_Sol=num_Sol/col_block_size;
-    const int numMyCols=in->pattern->mainPattern->numInput;
-    const int numMyRows=in->pattern->mainPattern->numOutput;
-    const int *mainBlock_ptr=in->mainBlock->pattern->ptr;
-    const int *mainBlock_index=in->mainBlock->pattern->index;
+    const dim_t numMyCols=in->pattern->mainPattern->numInput;
+    const dim_t numMyRows=in->pattern->mainPattern->numOutput;
+    const index_t *mainBlock_ptr=in->mainBlock->pattern->ptr;
+    const index_t *mainBlock_index=in->mainBlock->pattern->index;
     double *mainBlock_val=in->mainBlock->val;
-    const int *col_coupleBlock_ptr=in->col_coupleBlock->pattern->ptr;
-    const int *col_coupleBlock_index=in->col_coupleBlock->pattern->index;
+    const index_t *col_coupleBlock_ptr=in->col_coupleBlock->pattern->ptr;
+    const index_t *col_coupleBlock_index=in->col_coupleBlock->pattern->index;
     double *col_coupleBlock_val=in->col_coupleBlock->val;
-    const int *row_coupleBlock_ptr=in->row_coupleBlock->pattern->ptr;
-    const int *row_coupleBlock_index=in->row_coupleBlock->pattern->index;
+    const index_t *row_coupleBlock_ptr=in->row_coupleBlock->pattern->ptr;
+    const index_t *row_coupleBlock_index=in->row_coupleBlock->pattern->index;
     double *row_coupleBlock_val=in->row_coupleBlock->val;
 
     for (int k_Equa=0; k_Equa<NN_Equa; ++k_Equa) {
         // Down columns of array
-        const int j_Equa=Nodes_Equa[k_Equa];
+        const index_t j_Equa=Nodes_Equa[k_Equa];
         for (int l_row=0; l_row<num_subblocks_Equa; ++l_row) {
-            const int i_row=j_Equa*num_subblocks_Equa+l_row;
+            const index_t i_row=j_Equa*num_subblocks_Equa+l_row;
             // only look at the matrix rows stored on this processor
             if (i_row < numMyRows) {
                 for (int k_Sol=0; k_Sol<NN_Sol; ++k_Sol) {
                     // Across rows of array
-                    const int j_Sol=Nodes_Sol[k_Sol];
+                    const index_t j_Sol=Nodes_Sol[k_Sol];
                     for (int l_col=0; l_col<num_subblocks_Sol; ++l_col) {
                         // only look at the matrix rows stored on this processor
-                        const int i_col=j_Sol*num_subblocks_Sol+index_offset+l_col;
+                        const index_t i_col=j_Sol*num_subblocks_Sol+index_offset+l_col;
                         if (i_col < numMyCols + index_offset ) {
-                            for (int k=mainBlock_ptr[i_row]-index_offset; k<mainBlock_ptr[i_row+1]-index_offset; ++k) {
+                            for (index_t k=mainBlock_ptr[i_row]-index_offset; k<mainBlock_ptr[i_row+1]-index_offset; ++k) {
                                 if (mainBlock_index[k]==i_col) {
                                     // Entry array(k_Sol, j_Equa) is a block
                                     // (row_block_size x col_block_size)
                                     for (int ic=0; ic<col_block_size; ++ic) {
                                         const int i_Sol=ic+col_block_size*l_col;
                                         for (int ir=0; ir<row_block_size; ++ir) {
-                                            const int i_Equa=ir+row_block_size*l_row;
+                                            const index_t i_Equa=ir+row_block_size*l_row;
                                             mainBlock_val[k*block_size+ir+row_block_size*ic]+=
                                                   array[INDEX4(i_Equa,i_Sol,k_Equa,k_Sol,num_Equa,num_Sol,NN_Equa)];
                                         }
@@ -255,14 +250,14 @@ void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in,
                                 }
                             }
                         } else {
-                            for (int k=col_coupleBlock_ptr[i_row]-index_offset; k<col_coupleBlock_ptr[i_row+1]-index_offset; ++k) {
+                            for (index_t k=col_coupleBlock_ptr[i_row]-index_offset; k<col_coupleBlock_ptr[i_row+1]-index_offset; ++k) {
                                 if (col_coupleBlock_index[k] == i_col-numMyCols) {
                                     // Entry array(k_Sol, j_Equa) is a block
                                     // (row_block_size x col_block_size)
                                     for (int ic=0; ic<col_block_size; ++ic) {
-                                        const int i_Sol=ic+col_block_size*l_col;
+                                        const index_t i_Sol=ic+col_block_size*l_col;
                                         for (int ir=0; ir<row_block_size; ++ir) {
-                                            const int i_Equa=ir+row_block_size*l_row;
+                                            const index_t i_Equa=ir+row_block_size*l_row;
                                             col_coupleBlock_val[k*block_size+ir+row_block_size*ic]+=
                                                   array[INDEX4(i_Equa,i_Sol,k_Equa,k_Sol,num_Equa,num_Sol,NN_Equa)];
                                         }
@@ -276,18 +271,18 @@ void Assemble_addToSystemMatrix_CSR(paso::SystemMatrix_ptr in,
             } else { // i_row >= numMyRows
                 for (int k_Sol=0; k_Sol<NN_Sol; ++k_Sol) {
                     // Across rows of array
-                    const int j_Sol=Nodes_Sol[k_Sol];
+                    const index_t j_Sol=Nodes_Sol[k_Sol];
                     for (int l_col=0; l_col<num_subblocks_Sol; ++l_col) {
-                        const int i_col=j_Sol*num_subblocks_Sol+index_offset+l_col;
+                        const index_t i_col=j_Sol*num_subblocks_Sol+index_offset+l_col;
                         if (i_col < numMyCols + index_offset ) {
-                            for (int k=row_coupleBlock_ptr[i_row-numMyRows]-index_offset; k<row_coupleBlock_ptr[i_row-numMyRows+1]-index_offset; ++k) {
+                            for (index_t k=row_coupleBlock_ptr[i_row-numMyRows]-index_offset; k<row_coupleBlock_ptr[i_row-numMyRows+1]-index_offset; ++k) {
                                 if (row_coupleBlock_index[k] == i_col) {
                                     // Entry array(k_Sol, j_Equa) is a block
                                     // (row_block_size x col_block_size)
                                     for (int ic=0; ic<col_block_size; ++ic) {
                                         const int i_Sol=ic+col_block_size*l_col;
                                         for (int ir=0; ir<row_block_size; ++ir) {
-                                            const int i_Equa=ir+row_block_size*l_row;
+                                            const index_t i_Equa=ir+row_block_size*l_row;
                                             row_coupleBlock_val[k*block_size+ir+row_block_size*ic]+=
                                                   array[INDEX4(i_Equa,i_Sol,k_Equa,k_Sol,num_Equa,num_Sol,NN_Equa)];
                                         }
diff --git a/finley/src/Assemble_getNormal.cpp b/finley/src/Assemble_getNormal.cpp
index a75f25c..a1d913c 100644
--- a/finley/src/Assemble_getNormal.cpp
+++ b/finley/src/Assemble_getNormal.cpp
@@ -73,7 +73,7 @@ void Assemble_getNormal(const NodeFile* nodes, const ElementFile* elements,
             std::vector<double> dVdv(numQuad*numDim*numDim_local);
             // open the element loop
 #pragma omp for
-            for (int e=0; e<elements->numElements; e++) {
+            for (index_t e=0; e<elements->numElements; e++) {
                 // gather local coordinates of nodes into local_X:
                 util::gather(NS, &(elements->Nodes[INDEX2(node_offset,e,NN)]),
                              numDim, nodes->Coordinates, &local_X[0]);
diff --git a/finley/src/Assemble_gradient.cpp b/finley/src/Assemble_gradient.cpp
index 12af3e3..916943c 100644
--- a/finley/src/Assemble_gradient.cpp
+++ b/finley/src/Assemble_gradient.cpp
@@ -121,12 +121,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
         if (data_type==FINLEY_NODES) {
             if (numDim==1) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(n);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -139,12 +139,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==2) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(n);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -158,12 +158,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==3) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e); 
                     memset(grad_data_e,0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(n);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -178,15 +178,15 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             }
         } else if (data_type==FINLEY_REDUCED_NODES) {
-            const int* target = nodes->borrowTargetReducedNodes();
+            const index_t* target = nodes->borrowTargetReducedNodes();
             if (numDim==1) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0;s<numShapes;s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);            
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -199,12 +199,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==2) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -218,12 +218,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==3) {
 #pragma omp for
-                for (int e=0;e<elements->numElements;e++) {
+                for (index_t e=0;e<elements->numElements;e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {   
 #pragma ivdep
@@ -238,15 +238,15 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             }
         } else if (data_type==FINLEY_DEGREES_OF_FREEDOM) {
-            const int* target = nodes->borrowTargetDegreesOfFreedom();
+            const index_t* target = nodes->borrowTargetDegreesOfFreedom();
             if (numDim==1) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -259,12 +259,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==2) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -278,12 +278,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==3) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -298,15 +298,15 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             }
         } else if (data_type==FINLEY_REDUCED_DEGREES_OF_FREEDOM) {
-            const int* target = nodes->borrowTargetReducedDegreesOfFreedom();
+            const index_t* target = nodes->borrowTargetReducedDegreesOfFreedom();
             if (numDim==1) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e,0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -319,12 +319,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
                 }
             } else if (numDim==2) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e, 0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
@@ -339,12 +339,12 @@ void Assemble_gradient(const NodeFile* nodes, const ElementFile* elements,
 
             } else if (numDim==3) {
 #pragma omp for
-                for (int e=0; e<elements->numElements; e++) {
+                for (index_t e=0; e<elements->numElements; e++) {
                     double *grad_data_e=grad_data.getSampleDataRW(e);
                     memset(grad_data_e,0, localGradSize);
                     for (int isub=0; isub<numSub; isub++) {
                         for (int s=0; s<numShapes; s++) {
-                            const int n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
+                            const index_t n=elements->Nodes[INDEX2(nodes_selector[INDEX2(s_offset+s,isub,numShapesTotal2)],e, NN)];
                             const double *data_array=data.getSampleDataRO(target[n]);
                             for (int q=0; q<numQuad; q++) {
 #pragma ivdep
diff --git a/finley/src/Assemble_interpolate.cpp b/finley/src/Assemble_interpolate.cpp
index 96fd6fa..efdd378 100644
--- a/finley/src/Assemble_interpolate.cpp
+++ b/finley/src/Assemble_interpolate.cpp
@@ -44,8 +44,10 @@ void Assemble_interpolate(const NodeFile* nodes, const ElementFile* elements,
     const_ReferenceElement_ptr refElement(elements->referenceElementSet->
                                         borrowReferenceElement(reducedOrder));
 
-    const int *resort_nodes = NULL, *map = NULL;
-    int numSub = 0, numNodes = 0;
+    const int *resort_nodes = NULL;
+    const index_t* map = NULL;
+    int numSub = 0;
+    dim_t numNodes = 0;
     const_ShapeFunction_ptr basis;
     int dof_offset = 0;
 
@@ -132,10 +134,10 @@ void Assemble_interpolate(const NodeFile* nodes, const ElementFile* elements,
             const size_t numComps_size=numComps*sizeof(double);
             // open the element loop
 #pragma omp for
-            for (int e=0; e<elements->numElements; e++) {
+            for (index_t e=0; e<elements->numElements; e++) {
                 for (int isub=0; isub<numSub; isub++) {
                     for (int q=0; q<NS_DOF; q++) {
-                        const int i=elements->Nodes[INDEX2(resort_nodes[INDEX2(dof_offset+q,isub,numShapesTotal)],e,NN)];
+                        const index_t i=elements->Nodes[INDEX2(resort_nodes[INDEX2(dof_offset+q,isub,numShapesTotal)],e,NN)];
                         const double *data_array=data.getSampleDataRO(map[i]);
                         memcpy(&local_data[INDEX3(0,q,isub, numComps,NS_DOF)], data_array, numComps_size);
                     }
diff --git a/finley/src/Assemble_jacobians.cpp b/finley/src/Assemble_jacobians.cpp
index a1fa71d..369e0af 100644
--- a/finley/src/Assemble_jacobians.cpp
+++ b/finley/src/Assemble_jacobians.cpp
@@ -20,13 +20,13 @@
     int numQuad
     const double* QuadWeights[numQuad]
     int numShape
-    int numElements
-    int numNodes
-    const int* nodes[numNodes*numElements]  where NUMSIDES*numShape<=numNodes
+    dim_t numElements
+    dim_t numNodes
+    const index_t* nodes[numNodes*numElements]  where NUMSIDES*numShape<=numNodes
     const double* DSDv[numShape*DIM*numQuad]
     int numTest
     double* DTDv[LOCDIM*numTest*numQuad] 
-    const int* elementId[numElements]
+    const index_t* elementId[numElements]
 
   output:
     double* dTdX[DIM*numTest*NUMSIDES*numQuad*numElements]
@@ -50,14 +50,14 @@ namespace finley {
 //
 void Assemble_jacobians_1D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=1;
     const int LOCDIM=1;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double D=0.;
             for (int s=0; s<numShape; s++) {
@@ -87,14 +87,14 @@ void Assemble_jacobians_1D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=2;
     const int LOCDIM=2;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0.;
             double dXdv10=0.;
@@ -141,14 +141,14 @@ void Assemble_jacobians_2D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_2D_M1D_E1D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=2;
     const int LOCDIM=1;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0.;
             double dXdv10=0.;
@@ -187,14 +187,14 @@ void Assemble_jacobians_2D_M1D_E1D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_2D_M1D_E1D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=2;
     const int LOCDIM=1;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00_0=0;
             double dXdv10_0=0;
@@ -247,14 +247,14 @@ void Assemble_jacobians_2D_M1D_E1D_C(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_2D_M1D_E2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=2;
     const int LOCDIM=2;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0;
             double dXdv10=0;
@@ -301,14 +301,14 @@ void Assemble_jacobians_2D_M1D_E2D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_2D_M1D_E2D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=2;
     const int LOCDIM=2;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00_0=0;
             double dXdv10_0=0;
@@ -377,14 +377,14 @@ void Assemble_jacobians_2D_M1D_E2D_C(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_3D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=3;
     const int LOCDIM=3;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0;
             double dXdv10=0;
@@ -453,14 +453,14 @@ void Assemble_jacobians_3D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_3D_M2D_E3D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=3;
     const int LOCDIM=3;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0;
             double dXdv10=0;
@@ -534,14 +534,14 @@ void Assemble_jacobians_3D_M2D_E3D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_3D_M2D_E3D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=3;
     const int LOCDIM=3;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00_0=0;
             double dXdv10_0=0;
@@ -667,14 +667,14 @@ void Assemble_jacobians_3D_M2D_E3D_C(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_3D_M2D_E2D(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=3;
     const int LOCDIM=2;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00=0;
             double dXdv10=0;
@@ -734,14 +734,14 @@ void Assemble_jacobians_3D_M2D_E2D(const double* coordinates, int numQuad,
 //
 void Assemble_jacobians_3D_M2D_E2D_C(const double* coordinates, int numQuad,
                            const double* QuadWeights, int numShape,
-                           int numElements, int numNodes, const int* nodes,
+                           dim_t numElements, dim_t numNodes, const index_t* nodes,
                            const double* DSDv, int numTest, const double* DTDv,
-                           double* dTdX, double* volume, const int* elementId)
+                           double* dTdX, double* volume, const index_t* elementId)
 {
     const int DIM=3;
     const int LOCDIM=2;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int q=0; q<numQuad; q++) {
             double dXdv00_0=0;
             double dXdv10_0=0;
diff --git a/finley/src/CPPAdapter/MeshAdapter.cpp b/finley/src/CPPAdapter/MeshAdapter.cpp
index 46be4c8..3e03052 100644
--- a/finley/src/CPPAdapter/MeshAdapter.cpp
+++ b/finley/src/CPPAdapter/MeshAdapter.cpp
@@ -15,7 +15,7 @@
 *****************************************************************************/
 
 #define ESNEEDPYTHON
-#include "esysUtils/first.h"
+#include <esysUtils/first.h>
 
 #include <pasowrap/PasoException.h>
 #include <pasowrap/TransportProblemAdapter.h>
@@ -125,16 +125,22 @@ void MeshAdapter::dump(const string& fileName) const
     const NcDim* ncdims[12] = {NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL};
     NcVar *ids;
     int *int_ptr;
+    index_t *index_ptr;
+#ifdef ESYS_INDEXTYPE_LONG
+    NcType ncIdxType = ncLong;
+#else
+    NcType ncIdxType = ncInt;
+#endif
     Mesh *mesh = m_finleyMesh.get();
     int num_Tags = 0;
     int mpi_size                         = mesh->MPIInfo->size;
     int mpi_rank                         = mesh->MPIInfo->rank;
     int numDim                           = mesh->Nodes->numDim;
-    int numNodes                         = mesh->Nodes->numNodes;
-    int num_Elements                     = mesh->Elements->numElements;
-    int num_FaceElements                 = mesh->FaceElements->numElements;
-    int num_ContactElements              = mesh->ContactElements->numElements;
-    int num_Points                       = mesh->Points->numElements;
+    dim_t numNodes                       = mesh->Nodes->numNodes;
+    dim_t num_Elements                   = mesh->Elements->numElements;
+    dim_t num_FaceElements               = mesh->FaceElements->numElements;
+    dim_t num_ContactElements            = mesh->ContactElements->numElements;
+    dim_t num_Points                     = mesh->Points->numElements;
     int num_Elements_numNodes            = mesh->Elements->numNodes;
     int num_FaceElements_numNodes        = mesh->FaceElements->numNodes;
     int num_ContactElements_numNodes     = mesh->ContactElements->numNodes;
@@ -198,6 +204,8 @@ void MeshAdapter::dump(const string& fileName) const
             throw FinleyAdapterException(msgPrefix+"add_dim(dim_Tags)");
 
     // Attributes: MPI size, MPI rank, Name, order, reduced_order
+    if (!dataFile.add_att("index_size", (int)sizeof(index_t)))
+        throw FinleyAdapterException(msgPrefix+"add_att(index_size)");
     if (!dataFile.add_att("mpi_size", mpi_size))
         throw FinleyAdapterException(msgPrefix+"add_att(mpi_size)");
     if (!dataFile.add_att("mpi_rank", mpi_rank))
@@ -240,216 +248,194 @@ void MeshAdapter::dump(const string& fileName) const
     // // // // // Nodes // // // // //
 
     // Nodes nodeDistribution
-    if (! (ids = dataFile.add_var("Nodes_NodeDistribution", ncInt, ncdims[2])) )
+    if (! (ids = dataFile.add_var("Nodes_NodeDistribution", ncIdxType, ncdims[2])) )
         throw FinleyAdapterException(msgPrefix+"add_var(Nodes_NodeDistribution)");
-    int_ptr = &mesh->Nodes->nodesDistribution->first_component[0];
-    if (! (ids->put(int_ptr, mpi_size+1)) )
+    index_ptr = &mesh->Nodes->nodesDistribution->first_component[0];
+    if (! (ids->put(index_ptr, mpi_size+1)) )
         throw FinleyAdapterException(msgPrefix+"put(Nodes_NodeDistribution)");
 
     // Nodes degreesOfFreedomDistribution
-    if (! ( ids = dataFile.add_var("Nodes_DofDistribution", ncInt, ncdims[2])) )
+    if (! ( ids = dataFile.add_var("Nodes_DofDistribution", ncIdxType, ncdims[2])) )
         throw FinleyAdapterException(msgPrefix+"add_var(Nodes_DofDistribution)");
-    int_ptr = &mesh->Nodes->degreesOfFreedomDistribution->first_component[0];
-    if (! (ids->put(int_ptr, mpi_size+1)) )
+    index_ptr = &mesh->Nodes->degreesOfFreedomDistribution->first_component[0];
+    if (! (ids->put(index_ptr, mpi_size+1)) )
         throw FinleyAdapterException(msgPrefix+"put(Nodes_DofDistribution)");
 
     // Only write nodes if non-empty because NetCDF doesn't like empty arrays
     // (it treats them as NC_UNLIMITED)
-    if (numNodes>0) {
+    if (numNodes > 0) {
         // Nodes Id
-        if (! ( ids = dataFile.add_var("Nodes_Id", ncInt, ncdims[0])) )
+        if (! ( ids = dataFile.add_var("Nodes_Id", ncIdxType, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_Id)");
-        int_ptr = &mesh->Nodes->Id[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->Id[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_Id)");
 
         // Nodes Tag
         if (! ( ids = dataFile.add_var("Nodes_Tag", ncInt, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_Tag)");
-        int_ptr = &mesh->Nodes->Tag[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->Tag[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_Tag)");
 
         // Nodes gDOF
-        if (! ( ids = dataFile.add_var("Nodes_gDOF", ncInt, ncdims[0])) )
+        if (! ( ids = dataFile.add_var("Nodes_gDOF", ncIdxType, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_gDOF)");
-        int_ptr = &mesh->Nodes->globalDegreesOfFreedom[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->globalDegreesOfFreedom[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_gDOF)");
 
         // Nodes global node index
-        if (! ( ids = dataFile.add_var("Nodes_gNI", ncInt, ncdims[0])) )
+        if (! ( ids = dataFile.add_var("Nodes_gNI", ncIdxType, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_gNI)");
-        int_ptr = &mesh->Nodes->globalNodesIndex[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->globalNodesIndex[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_gNI)");
 
         // Nodes grDof
-        if (! ( ids = dataFile.add_var("Nodes_grDfI", ncInt, ncdims[0])) )
+        if (! ( ids = dataFile.add_var("Nodes_grDfI", ncIdxType, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_grDfI)");
-        int_ptr = &mesh->Nodes->globalReducedDOFIndex[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->globalReducedDOFIndex[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_grDfI)");
 
         // Nodes grNI
-        if (! ( ids = dataFile.add_var("Nodes_grNI", ncInt, ncdims[0])) )
+        if (! ( ids = dataFile.add_var("Nodes_grNI", ncIdxType, ncdims[0])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_grNI)");
-        int_ptr = &mesh->Nodes->globalReducedNodesIndex[0];
-        if (! (ids->put(int_ptr, numNodes)) )
+        if (! (ids->put(&mesh->Nodes->globalReducedNodesIndex[0], numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_grNI)");
 
         // Nodes Coordinates
         if (! ( ids = dataFile.add_var("Nodes_Coordinates", ncDouble, ncdims[0], ncdims[1]) ) )
             throw FinleyAdapterException(msgPrefix+"add_var(Nodes_Coordinates)");
-        if (! (ids->put(&(mesh->Nodes->Coordinates[INDEX2(0,0,numDim)]), numNodes, numDim)) )
+        if (! (ids->put(&mesh->Nodes->Coordinates[INDEX2(0,0,numDim)], numNodes, numDim)) )
             throw FinleyAdapterException(msgPrefix+"put(Nodes_Coordinates)");
     }
 
     // // // // // Elements // // // // //
-    if (num_Elements>0) {
+    if (num_Elements > 0) {
         // Elements_Id
-        if (! ( ids = dataFile.add_var("Elements_Id", ncInt, ncdims[3])) )
+        if (! ( ids = dataFile.add_var("Elements_Id", ncIdxType, ncdims[3])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Elements_Id)");
-        int_ptr = &mesh->Elements->Id[0];
-        if (! (ids->put(int_ptr, num_Elements)) )
+        if (! (ids->put(&mesh->Elements->Id[0], num_Elements)) )
             throw FinleyAdapterException(msgPrefix+"put(Elements_Id)");
 
         // Elements_Tag
         if (! ( ids = dataFile.add_var("Elements_Tag", ncInt, ncdims[3])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Elements_Tag)");
-        int_ptr = &mesh->Elements->Tag[0];
-        if (! (ids->put(int_ptr, num_Elements)) )
+        if (! (ids->put(&mesh->Elements->Tag[0], num_Elements)) )
             throw FinleyAdapterException(msgPrefix+"put(Elements_Tag)");
 
         // Elements_Owner
         if (! ( ids = dataFile.add_var("Elements_Owner", ncInt, ncdims[3])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Elements_Owner)");
-        int_ptr = &mesh->Elements->Owner[0];
-        if (! (ids->put(int_ptr, num_Elements)) )
+        if (! (ids->put(&mesh->Elements->Owner[0], num_Elements)) )
             throw FinleyAdapterException(msgPrefix+"put(Elements_Owner)");
 
         // Elements_Color
         if (! ( ids = dataFile.add_var("Elements_Color", ncInt, ncdims[3])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Elements_Color)");
-        int_ptr = &mesh->Elements->Color[0];
-        if (! (ids->put(int_ptr, num_Elements)) )
+        if (! (ids->put(&mesh->Elements->Color[0], num_Elements)) )
             throw FinleyAdapterException(msgPrefix+"put(Elements_Color)");
 
         // Elements_Nodes
-        if (! ( ids = dataFile.add_var("Elements_Nodes", ncInt, ncdims[3], ncdims[7]) ) )
+        if (! ( ids = dataFile.add_var("Elements_Nodes", ncIdxType, ncdims[3], ncdims[7]) ) )
             throw FinleyAdapterException(msgPrefix+"add_var(Elements_Nodes)");
-        if (! (ids->put(&(mesh->Elements->Nodes[0]), num_Elements, num_Elements_numNodes)) )
+        if (! (ids->put(&mesh->Elements->Nodes[0], num_Elements, num_Elements_numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(Elements_Nodes)");
     }
 
     // // // // // Face_Elements // // // // //
-    if (num_FaceElements>0) {
+    if (num_FaceElements > 0) {
         // FaceElements_Id
-        if (! ( ids = dataFile.add_var("FaceElements_Id", ncInt, ncdims[4])) )
+        if (! ( ids = dataFile.add_var("FaceElements_Id", ncIdxType, ncdims[4])) )
             throw FinleyAdapterException(msgPrefix+"add_var(FaceElements_Id)");
-        int_ptr = &mesh->FaceElements->Id[0];
-        if (! (ids->put(int_ptr, num_FaceElements)) )
+        if (! (ids->put(&mesh->FaceElements->Id[0], num_FaceElements)) )
             throw FinleyAdapterException(msgPrefix+"put(FaceElements_Id)");
 
         // FaceElements_Tag
         if (! ( ids = dataFile.add_var("FaceElements_Tag", ncInt, ncdims[4])) )
             throw FinleyAdapterException(msgPrefix+"add_var(FaceElements_Tag)");
-        int_ptr = &mesh->FaceElements->Tag[0];
-        if (! (ids->put(int_ptr, num_FaceElements)) )
+        if (! (ids->put(&mesh->FaceElements->Tag[0], num_FaceElements)) )
             throw FinleyAdapterException(msgPrefix+"put(FaceElements_Tag)");
 
         // FaceElements_Owner
         if (! ( ids = dataFile.add_var("FaceElements_Owner", ncInt, ncdims[4])) )
             throw FinleyAdapterException(msgPrefix+"add_var(FaceElements_Owner)");
-        int_ptr = &mesh->FaceElements->Owner[0];
-        if (! (ids->put(int_ptr, num_FaceElements)) )
+        if (! (ids->put(&mesh->FaceElements->Owner[0], num_FaceElements)) )
             throw FinleyAdapterException(msgPrefix+"put(FaceElements_Owner)");
 
         // FaceElements_Color
         if (! ( ids = dataFile.add_var("FaceElements_Color", ncInt, ncdims[4])) )
             throw FinleyAdapterException(msgPrefix+"add_var(FaceElements_Color)");
-        int_ptr = &mesh->FaceElements->Color[0];
-        if (! (ids->put(int_ptr, num_FaceElements)) )
+        if (! (ids->put(&mesh->FaceElements->Color[0], num_FaceElements)) )
             throw FinleyAdapterException(msgPrefix+"put(FaceElements_Color)");
 
         // FaceElements_Nodes
-        if (! ( ids = dataFile.add_var("FaceElements_Nodes", ncInt, ncdims[4], ncdims[8]) ) )
+        if (! ( ids = dataFile.add_var("FaceElements_Nodes", ncIdxType, ncdims[4], ncdims[8]) ) )
             throw FinleyAdapterException(msgPrefix+"add_var(FaceElements_Nodes)");
-        if (! (ids->put(&(mesh->FaceElements->Nodes[0]), num_FaceElements, num_FaceElements_numNodes)) )
+        if (! (ids->put(&mesh->FaceElements->Nodes[0], num_FaceElements, num_FaceElements_numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(FaceElements_Nodes)");
     }
 
     // // // // // Contact_Elements // // // // //
-    if (num_ContactElements>0) {
+    if (num_ContactElements > 0) {
 
         // ContactElements_Id
-        if (! ( ids = dataFile.add_var("ContactElements_Id", ncInt, ncdims[5])) )
+        if (! ( ids = dataFile.add_var("ContactElements_Id", ncIdxType, ncdims[5])) )
             throw FinleyAdapterException(msgPrefix+"add_var(ContactElements_Id)");
-        int_ptr = &mesh->ContactElements->Id[0];
-        if (! (ids->put(int_ptr, num_ContactElements)) )
+        if (! (ids->put(&mesh->ContactElements->Id[0], num_ContactElements)) )
             throw FinleyAdapterException(msgPrefix+"put(ContactElements_Id)");
 
         // ContactElements_Tag
         if (! ( ids = dataFile.add_var("ContactElements_Tag", ncInt, ncdims[5])) )
             throw FinleyAdapterException(msgPrefix+"add_var(ContactElements_Tag)");
-        int_ptr = &mesh->ContactElements->Tag[0];
-        if (! (ids->put(int_ptr, num_ContactElements)) )
+        if (! (ids->put(&mesh->ContactElements->Tag[0], num_ContactElements)) )
             throw FinleyAdapterException(msgPrefix+"put(ContactElements_Tag)");
 
         // ContactElements_Owner
         if (! ( ids = dataFile.add_var("ContactElements_Owner", ncInt, ncdims[5])) )
             throw FinleyAdapterException(msgPrefix+"add_var(ContactElements_Owner)");
-        int_ptr = &mesh->ContactElements->Owner[0];
-        if (! (ids->put(int_ptr, num_ContactElements)) )
+        if (! (ids->put(&mesh->ContactElements->Owner[0], num_ContactElements)) )
             throw FinleyAdapterException(msgPrefix+"put(ContactElements_Owner)");
 
         // ContactElements_Color
         if (! ( ids = dataFile.add_var("ContactElements_Color", ncInt, ncdims[5])) )
             throw FinleyAdapterException(msgPrefix+"add_var(ContactElements_Color)");
-        int_ptr = &mesh->ContactElements->Color[0];
-        if (! (ids->put(int_ptr, num_ContactElements)) )
+        if (! (ids->put(&mesh->ContactElements->Color[0], num_ContactElements)) )
             throw FinleyAdapterException(msgPrefix+"put(ContactElements_Color)");
 
         // ContactElements_Nodes
-        if (! ( ids = dataFile.add_var("ContactElements_Nodes", ncInt, ncdims[5], ncdims[9]) ) )
+        if (! ( ids = dataFile.add_var("ContactElements_Nodes", ncIdxType, ncdims[5], ncdims[9]) ) )
             throw FinleyAdapterException(msgPrefix+"add_var(ContactElements_Nodes)");
-        if (! (ids->put(&(mesh->ContactElements->Nodes[0]), num_ContactElements, num_ContactElements_numNodes)) )
+        if (! (ids->put(&mesh->ContactElements->Nodes[0], num_ContactElements, num_ContactElements_numNodes)) )
             throw FinleyAdapterException(msgPrefix+"put(ContactElements_Nodes)");
     }
 
     // // // // // Points // // // // //
-    if (num_Points>0) {
+    if (num_Points > 0) {
         // Points_Id
-        if (! ( ids = dataFile.add_var("Points_Id", ncInt, ncdims[6])) )
+        if (! ( ids = dataFile.add_var("Points_Id", ncIdxType, ncdims[6])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Points_Id)");
-        int_ptr = &mesh->Points->Id[0];
-        if (! (ids->put(int_ptr, num_Points)) )
+        if (! (ids->put(&mesh->Points->Id[0], num_Points)) )
             throw FinleyAdapterException(msgPrefix+"put(Points_Id)");
 
         // Points_Tag
         if (! ( ids = dataFile.add_var("Points_Tag", ncInt, ncdims[6])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Points_Tag)");
-        int_ptr = &mesh->Points->Tag[0];
-        if (! (ids->put(int_ptr, num_Points)) )
+        if (! (ids->put(&mesh->Points->Tag[0], num_Points)) )
             throw FinleyAdapterException(msgPrefix+"put(Points_Tag)");
 
         // Points_Owner
         if (! ( ids = dataFile.add_var("Points_Owner", ncInt, ncdims[6])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Points_Owner)");
-        int_ptr = &mesh->Points->Owner[0];
-        if (! (ids->put(int_ptr, num_Points)) )
+        if (! (ids->put(&mesh->Points->Owner[0], num_Points)) )
             throw FinleyAdapterException(msgPrefix+"put(Points_Owner)");
 
         // Points_Color
         if (! ( ids = dataFile.add_var("Points_Color", ncInt, ncdims[6])) )
             throw FinleyAdapterException(msgPrefix+"add_var(Points_Color)");
-        int_ptr = &mesh->Points->Color[0];
-        if (! (ids->put(int_ptr, num_Points)) )
+        if (! (ids->put(&mesh->Points->Color[0], num_Points)) )
             throw FinleyAdapterException(msgPrefix+"put(Points_Color)");
 
         // Points_Nodes
         // mesh->Nodes->Id[mesh->Points->Nodes[INDEX2(0,i,1)]]
-        if (! ( ids = dataFile.add_var("Points_Nodes", ncInt, ncdims[6]) ) )
+        if (! ( ids = dataFile.add_var("Points_Nodes", ncIdxType, ncdims[6]) ) )
             throw FinleyAdapterException(msgPrefix+"add_var(Points_Nodes)");
         if (! (ids->put(&(mesh->Points->Nodes[0]), num_Points)) )
             throw FinleyAdapterException(msgPrefix+"put(Points_Nodes)");
@@ -632,7 +618,7 @@ int MeshAdapter::getDim() const
 //
 // Return the number of data points summed across all MPI processes
 //
-int MeshAdapter::getNumDataPointsGlobal() const
+dim_t MeshAdapter::getNumDataPointsGlobal() const
 {
     return m_finleyMesh.get()->Nodes->getGlobalNumNodes();
 }
@@ -641,10 +627,10 @@ int MeshAdapter::getNumDataPointsGlobal() const
 // return the number of data points per sample and the number of samples
 // needed to represent data on a parts of the mesh.
 //
-pair<int,int> MeshAdapter::getDataShape(int functionSpaceCode) const
+pair<int,dim_t> MeshAdapter::getDataShape(int functionSpaceCode) const
 {
     int numDataPointsPerSample=0;
-    int numSamples=0;
+    dim_t numSamples=0;
     Mesh* mesh=m_finleyMesh.get();
     switch (functionSpaceCode) {
         case Nodes:
@@ -1390,8 +1376,8 @@ bool MeshAdapter::ownSample(int fs_code, index_t id) const
 {
     if (getMPISize() > 1) {
 #ifdef ESYS_MPI
-        int myFirstNode=0, myLastNode=0, k=0;
-        int* globalNodeIndex=0;
+        index_t myFirstNode=0, myLastNode=0;
+        index_t* globalNodeIndex = NULL;
         Mesh* mesh_p=m_finleyMesh.get();
         /*
          * this method is only used by saveDataCSV which would use the returned
@@ -1410,8 +1396,8 @@ bool MeshAdapter::ownSample(int fs_code, index_t id) const
             throw FinleyAdapterException("Unsupported function space type for ownSample()");
         }
 
-        k=globalNodeIndex[id];
-        return ((myFirstNode <= k) && (k < myLastNode));
+        const index_t k = globalNodeIndex[id];
+        return (myFirstNode <= k && k < myLastNode);
 #endif
     }
     return true;
@@ -1837,7 +1823,7 @@ bool MeshAdapter::operator!=(const escript::AbstractDomain& other) const
     return !(operator==(other));
 }
 
-int MeshAdapter::getSystemMatrixTypeId(const bp::object& options) const
+int MeshAdapter::getSystemMatrixTypeId(const boost::python::object& options) const
 {
     const escript::SolverBuddy& sb = bp::extract<escript::SolverBuddy>(options);
 
@@ -1868,9 +1854,9 @@ escript::Data MeshAdapter::getSize() const
     return escript::function(*this).getSize();
 }
 
-const int* MeshAdapter::borrowSampleReferenceIDs(int functionSpaceType) const
+const index_t* MeshAdapter::borrowSampleReferenceIDs(int functionSpaceType) const
 {
-    int *out = NULL;
+    index_t *out = NULL;
     Mesh* mesh=m_finleyMesh.get();
     switch (functionSpaceType) {
         case Nodes:
@@ -1910,7 +1896,7 @@ const int* MeshAdapter::borrowSampleReferenceIDs(int functionSpaceType) const
     }
     return out;
 }
-int MeshAdapter::getTagFromSampleNo(int functionSpaceType, int sampleNo) const
+int MeshAdapter::getTagFromSampleNo(int functionSpaceType, index_t sampleNo) const
 {
     int out=0;
     Mesh* mesh=m_finleyMesh.get();
@@ -2175,7 +2161,8 @@ bool MeshAdapter::supportsContactElements() const
 }
 
 escript::Data MeshAdapter::randomFill(const escript::DataTypes::ShapeType& shape,
-       const escript::FunctionSpace& what, long seed, const bp::tuple& filter) const
+       const escript::FunctionSpace& what, long seed,
+       const boost::python::tuple& filter) const
 {
     escript::Data towipe(0, shape, what, true);
     // since we just made this object, no sharing is possible and we don't need to check for
@@ -2198,7 +2185,8 @@ void MeshAdapter::addDiracPoints(const vector<double>& points,
 
     if ( points.size() % dim != 0 ) {
         char err[200];
-        sprintf(err,"Error - number of coords in diractags is %ld this should be a multiple of the specified dimension:%d.",points.size(),dim);
+        unsigned long size = points.size();
+        sprintf(err,"Error - number of coords in diractags is %lu this should be a multiple of the specified dimension:%d.",size,dim);
         throw FinleyAdapterException(err);
     }
 
diff --git a/finley/src/CPPAdapter/MeshAdapter.h b/finley/src/CPPAdapter/MeshAdapter.h
index fc99178..2664651 100644
--- a/finley/src/CPPAdapter/MeshAdapter.h
+++ b/finley/src/CPPAdapter/MeshAdapter.h
@@ -45,33 +45,28 @@ namespace finley {
 // They are only fwd declared here so that vis.studio will accept the friend
 // decls
 FINLEY_DLL_API
-escript::Domain_ptr brick(esysUtils::JMPI& p,int n0, int n1, int n2, int order,
-                    double l0, double l1, double l2,
-                    bool periodic0, bool periodic1, bool periodic2,
-                    int integrationOrder, int reducedIntegrationOrder,
-                    bool useElementsOnFace, bool useFullElementOrder,
-                    bool optimize, const std::vector<double>& points,
-                    const std::vector<int>& tags,
-                    const std::map<std::string, int>& tagnamestonums
-		    );
-                    
+escript::Domain_ptr brick(esysUtils::JMPI& p, dim_t n0, dim_t n1, dim_t n2,
+                          int order, double l0, double l1, double l2,
+                          bool periodic0, bool periodic1, bool periodic2,
+                          int integrationOrder, int reducedIntegrationOrder,
+                          bool useElementsOnFace, bool useFullElementOrder,
+                          bool optimize, const std::vector<double>& points,
+                          const std::vector<int>& tags,
+                          const std::map<std::string, int>& tagNamesToNums
+                    );
+
 FINLEY_DLL_API              
-escript::Domain_ptr rectangle(esysUtils::JMPI& p, int n0, int n1, int order,
-                        double l0, double l1,
-                        bool periodic0, bool periodic1,
-                        int integrationOrder, int reducedIntegrationOrder,
-                        bool useElementsOnFace, bool useFullElementOrder,
-                        bool optimize, const std::vector<double>& points,
-                        const std::vector<int>& tags,
-                        const std::map<std::string, int>& tagnamestonums
- 			    );        
+escript::Domain_ptr rectangle(esysUtils::JMPI& p, dim_t n0, dim_t n1,
+                              int order, double l0, double l1,
+                              bool periodic0, bool periodic1,
+                              int integrationOrder, int reducedIntegrationOrder,
+                              bool useElementsOnFace, bool useFullElementOrder,
+                              bool optimize, const std::vector<double>& points,
+                              const std::vector<int>& tags,
+                              const std::map<std::string, int>& tagNamesToNums
+                    );        
   
-struct null_deleter
-{
-  void operator()(void const *ptr) const
-  {
-  }
-};
+struct null_deleter { void operator()(void const *ptr) const {} };
 
 
 /**
@@ -115,7 +110,7 @@ public:
                                Finley_Mesh_free in the MeshAdapter 
                                destructor.
   */
-  MeshAdapter(Mesh* finleyMesh=0);
+  MeshAdapter(Mesh* finleyMesh=NULL);
 
   /**
      \brief
@@ -170,7 +165,7 @@ public:
      \brief
      \param full
   */
-  void Print_Mesh_Info(const bool full=false) const;
+  void Print_Mesh_Info(bool full=false) const;
 
   /**
      \brief
@@ -191,14 +186,14 @@ public:
      \param functionSpaceType Input - The function space type.
      \param sampleNo Input - The sample number.
   */
-  int getTagFromSampleNo(int functionSpaceType, int sampleNo) const;
+  int getTagFromSampleNo(int functionSpaceType, index_t sampleNo) const;
 
   /**
      \brief
      Return the reference number of  the given sample number.
      \param functionSpaceType Input - The function space type.
   */
-  const int* borrowSampleReferenceIDs(int functionSpaceType) const;
+  const index_t* borrowSampleReferenceIDs(int functionSpaceType) const;
 
   /**
      \brief
@@ -328,14 +323,14 @@ public:
      \brief
      Return the number of data points summed across all MPI processes
   */
-  virtual int getNumDataPointsGlobal() const;
+  virtual dim_t getNumDataPointsGlobal() const;
 
   /**
      \brief
      Return the number of data points per sample, and the number of samples as a pair.
      \param functionSpaceCode Input -
   */
-  virtual std::pair<int,int> getDataShape(int functionSpaceCode) const;
+  virtual std::pair<int,dim_t> getDataShape(int functionSpaceCode) const;
 
   /**
      \brief
@@ -382,21 +377,18 @@ public:
      \brief
      interpolates data given on source onto target where source and target have to be given on the same domain.
   */
-  virtual void interpolateOnDomain(escript::Data& target,const escript::Data& source) const;
+  virtual void interpolateOnDomain(escript::Data& target, const escript::Data& source) const;
 
-
-  virtual bool probeInterpolationOnDomain(int functionSpaceType_source,int functionSpaceType_target) const;
+  virtual bool probeInterpolationOnDomain(int functionSpaceType_source, int functionSpaceType_target) const;
   
-  virtual signed char preferredInterpolationOnDomain(int functionSpaceType_source,int functionSpaceType_target) const;
+  virtual signed char preferredInterpolationOnDomain(int functionSpaceType_source, int functionSpaceType_target) const;
   
   
-
   /**
     \brief given a vector of FunctionSpace typecodes, pass back a code which then can all be interpolated to.
     \return true is result is valid, false if not
   */
-  bool
-  commonFunctionSpace(const std::vector<int>& fs, int& resultcode) const;
+  bool commonFunctionSpace(const std::vector<int>& fs, int& resultcode) const;
 
   /**
      \brief
@@ -409,7 +401,7 @@ public:
   \brief determines whether interpolation from source to target is possible.
   Must be implemented by the actual Domain adapter
   */
-  virtual bool probeInterpolationAcross(int functionSpaceType_source,const escript::AbstractDomain& targetDomain, int functionSpaceType_target) const;
+  virtual bool probeInterpolationAcross(int functionSpaceType_source, const escript::AbstractDomain& targetDomain, int functionSpaceType_target) const;
 
   /**
      \brief
@@ -430,14 +422,14 @@ public:
      copies the gradient of arg into grad. The actual function space to be considered
      for the gradient is defined by grad. arg and grad have to be defined on this.
   */
-  virtual void setToGradient(escript::Data& grad,const escript::Data& arg) const;
+  virtual void setToGradient(escript::Data& grad, const escript::Data& arg) const;
 
   /**
      \brief
      copies the integrals of the function defined by arg into integrals.
      arg has to be defined on this.
   */
-  virtual void setToIntegrals(std::vector<double>& integrals,const escript::Data& arg) const;
+  virtual void setToIntegrals(std::vector<double>& integrals, const escript::Data& arg) const;
 
   /**
      \brief
@@ -467,7 +459,7 @@ public:
   virtual bool isCellOriented(int functionSpaceCode) const;
 
 
-  virtual bool ownSample(int fs_code, index_t id) const;
+  virtual bool ownSample(int fsCode, index_t id) const;
 
   /**
      \brief
@@ -505,11 +497,11 @@ public:
   virtual void addPDEToRHS(escript::Data& rhs,
                      const escript::Data& X, const escript::Data& Y,
                      const escript::Data& y, const escript::Data& y_contact, const escript::Data& y_dirac) const;
+
   /**
      \brief
      adds a PDE onto a transport problem
   */
-
   virtual void addPDEToTransportProblem(
                      escript::AbstractTransportProblem& tp, escript::Data& source, 
                      const escript::Data& M,
@@ -518,7 +510,6 @@ public:
                      const escript::Data& d, const escript::Data& y,
                      const escript::Data& d_contact,const escript::Data& y_contact, const escript::Data& d_dirac,const escript::Data& y_dirac) const;
 
-
   /**
      \brief
     creates a SystemMatrixAdapter stiffness matrix and initializes it with zeros:
@@ -529,12 +520,12 @@ public:
                       const int column_blocksize,
                       const escript::FunctionSpace& column_functionspace,
                       const int type) const;
+
   /**
    \brief 
     creates a TransportProblemAdapter 
 
   */
-
   escript::ATP_ptr newTransportProblem(
                       const int blocksize,
                       const escript::FunctionSpace& functionspace,
@@ -576,7 +567,6 @@ public:
 
   virtual const int* borrowListOfTagsInUse(int functionSpaceCode) const;
 
-
   /**
      \brief Checks if this domain allows tags for the specified functionSpaceCode.
   */
@@ -591,13 +581,12 @@ public:
   int getApproximationOrder(const int functionSpaceCode) const;
 
   bool supportsContactElements() const;
-  
 
   virtual escript::Data randomFill(const escript::DataTypes::ShapeType& shape,
        const escript::FunctionSpace& what, long seed, const boost::python::tuple& filter) const;       
-  
-  
-  private:
+
+
+private:
   
   /**
    \brief  adds points to support more Dirac delta function.
@@ -605,13 +594,10 @@ public:
    Do NOT call these at any time other than construction!
    Using them later creates consistency problems
    */
-  void addDiracPoints( const std::vector<double>& points, const std::vector<int>& tags) const;
+  void addDiracPoints(const std::vector<double>& points, const std::vector<int>& tags) const;
 //  void addDiracPoint( const boost::python::list& points, const int tag=-1) const;
 //   void addDiracPointWithTagName( const boost::python::list& points, const std::string& tag) const;
 
- protected:
-
- private:
   //
   // pointer to the externally created finley mesh
   boost::shared_ptr<Mesh> m_finleyMesh;
@@ -625,7 +611,8 @@ public:
  
   static FunctionSpaceNamesMapType m_functionSpaceTypeNames;
 
-  friend escript::Domain_ptr brick(esysUtils::JMPI& p, int n0, int n1, int n2, int order,
+  friend escript::Domain_ptr brick(esysUtils::JMPI& p,
+                    dim_t n0, dim_t n1, dim_t n2, int order,
                     double l0, double l1, double l2,
                     bool periodic0, bool periodic1, bool periodic2,
                     int integrationOrder,
@@ -635,10 +622,11 @@ public:
                     bool optimize, 
                     const std::vector<double>& points,
                     const std::vector<int>& tags,
-                    const std::map<std::string, int>& tagnamestonums);
+                    const std::map<std::string, int>& tagNamesToNums);
                     
                     
-  friend escript::Domain_ptr rectangle(esysUtils::JMPI& p,int n0, int n1, int order,
+  friend escript::Domain_ptr rectangle(esysUtils::JMPI& p,
+                        dim_t n0, dim_t n1, int order,
                         double l0, double l1,
                         bool periodic0, bool periodic1,
                         int integrationOrder,
@@ -648,30 +636,29 @@ public:
                         bool optimize,
                         const std::vector<double>& points,
                         const std::vector<int>& tags,
-                        const std::map<std::string, int>& tagnamestonums); 
+                        const std::map<std::string, int>& tagNamesToNums); 
 
    friend escript::Domain_ptr readMesh_driver(const boost::python::list& args);
 
-   friend escript::Domain_ptr readMesh(esysUtils::JMPI& p, const std::string& fileName,
+   friend escript::Domain_ptr readMesh(esysUtils::JMPI& p,
+                                     const std::string& fileName,
                                      int integrationOrder,
                                      int reducedIntegrationOrder,
                                      bool optimize,
-				   const std::vector<double>& points,
-				   const std::vector<int>& tags
- 			      );
+                                     const std::vector<double>& points,
+                                     const std::vector<int>& tags);
+
   friend escript::Domain_ptr readGmsh_driver(const boost::python::list& args);
 
-  friend escript::Domain_ptr readGmsh(esysUtils::JMPI& p, const std::string& fileName,
+  friend escript::Domain_ptr readGmsh(esysUtils::JMPI& p,
+                               const std::string& fileName,
                                int numDim, 
                                int integrationOrder,
                                int reducedIntegrationOrder, 
                                bool optimize,
                                bool useMacroElements,
-			      const std::vector<double>& points,
-			      const std::vector<int>& tags
-			      );
-  
-  
+                               const std::vector<double>& points,
+                               const std::vector<int>& tags);
 };
 
 
diff --git a/finley/src/CPPAdapter/MeshAdapterFactory.cpp b/finley/src/CPPAdapter/MeshAdapterFactory.cpp
index 1867ed0..0baeb42 100644
--- a/finley/src/CPPAdapter/MeshAdapterFactory.cpp
+++ b/finley/src/CPPAdapter/MeshAdapterFactory.cpp
@@ -15,13 +15,12 @@
 *****************************************************************************/
 
 #define ESNEEDPYTHON
-#include "esysUtils/first.h"
+#include <esysUtils/first.h>
 
 #include "MeshAdapterFactory.h"
-#include "esysUtils/blocktimer.h"
-#ifdef ESYS_MPI
-#include "esysUtils/Esys_MPI.h"
-#endif
+#include <esysUtils/blocktimer.h>
+#include <esysUtils/Esys_MPI.h>
+
 #ifdef USE_NETCDF
 #include <netcdfcpp.h>
 #endif
@@ -31,45 +30,43 @@
 
 #include <sstream>
 
-
 using namespace std;
 using namespace escript;
 
 namespace finley {
 
 #ifdef USE_NETCDF
-  // A convenience method to retrieve an integer attribute from a NetCDF file
-  int NetCDF_Get_Int_Attribute(NcFile *dataFile, const std::string &fName, char *attr_name)
-  {
-    NcAtt *attr;
-    char error_msg[LenErrorMsg_MAX];
-    if (! (attr=dataFile->get_att(attr_name)) ) {
-      sprintf(error_msg,"loadMesh: Error retrieving integer attribute '%s' from NetCDF file '%s'", attr_name, fName.c_str());
-      throw DataException(error_msg);
+// A convenience method to retrieve an integer attribute from a NetCDF file
+template<typename T>
+T ncReadAtt(NcFile *dataFile, const string &fName, const string& attrName)
+{
+    NcAtt *attr = dataFile->get_att(attrName.c_str());
+    if (!attr) {
+        stringstream msg;
+        msg << "loadMesh: Error retrieving integer attribute '" << attrName
+            << "' from NetCDF file '" << fName << "'";
+        throw FinleyAdapterException(msg.str());
     }
-    int temp = attr->as_int(0);
+    T value = (sizeof(T) > 4 ? attr->as_long(0) : attr->as_int(0));
     delete attr;
-    return(temp);
-  }
+    return value;
+}
 #endif
 
-  inline void cleanupAndThrow(Mesh* mesh, string msg)
-  {
-      delete mesh;
-      string msgPrefix("loadMesh: NetCDF operation failed - ");
-      throw DataException(msgPrefix+msg);
-  }
+inline void cleanupAndThrow(Mesh* mesh, string msg)
+{
+    delete mesh;
+    string msgPrefix("loadMesh: NetCDF operation failed - ");
+    throw FinleyAdapterException(msgPrefix+msg);
+}
 
-//   AbstractContinuousDomain* loadMesh(const std::string& fileName)
-  Domain_ptr loadMesh(const std::string& fileName)
-  {
+Domain_ptr loadMesh(const std::string& fileName)
+{
 #ifdef USE_NETCDF
     esysUtils::JMPI mpi_info = esysUtils::makeInfo( MPI_COMM_WORLD );
-    Mesh *mesh_p=NULL;
-    char error_msg[LenErrorMsg_MAX];
 
-    const std::string fName(esysUtils::appendRankToFileName(fileName,
-                                              mpi_info->size, mpi_info->rank));
+    const string fName(esysUtils::appendRankToFileName(fileName,
+                        mpi_info->size, mpi_info->rank));
 
     double blocktimer_start = blocktimer_time();
     resetError();
@@ -82,54 +79,76 @@ namespace finley {
     // Create the NetCDF file.
     NcFile dataFile(fName.c_str(), NcFile::ReadOnly);
     if (!dataFile.is_valid()) {
-      sprintf(error_msg,"loadMesh: Opening NetCDF file '%s' for reading failed.", fName.c_str());
-      setError(IO_ERROR,error_msg);
-      throw DataException(error_msg);
+        stringstream msg;
+        msg << "loadMesh: Opening NetCDF file '" << fName << "' for reading failed.";
+        throw FinleyAdapterException(msg.str());
     }
 
     // Read NetCDF integer attributes
-    int mpi_size                        = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"mpi_size");
-    int mpi_rank                        = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"mpi_rank");
-    int numDim                          = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"numDim");
-    int order                           = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"order");
-    int reduced_order                   = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"reduced_order");
-    int numNodes                        = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"numNodes");
-    int num_Elements                    = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_Elements");
-    int num_FaceElements                = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_FaceElements");
-    int num_ContactElements             = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_ContactElements");
-    int num_Points                      = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_Points");
-    int num_Elements_numNodes           = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_Elements_numNodes");
-    int Elements_TypeId                 = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"Elements_TypeId");
-    int num_FaceElements_numNodes       = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_FaceElements_numNodes");
-    int FaceElements_TypeId             = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"FaceElements_TypeId");
-    int num_ContactElements_numNodes    = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_ContactElements_numNodes");
-    int ContactElements_TypeId          = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"ContactElements_TypeId");
-    int Points_TypeId                   = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"Points_TypeId");
-    int num_Tags                        = NetCDF_Get_Int_Attribute(&dataFile, fName, (char *)"num_Tags");
+
+    // index_size was only introduced with 64-bit index support so fall back
+    // to 32 bits if not found.
+    int index_size;
+    try {
+        index_size = ncReadAtt<int>(&dataFile, fName, "index_size");
+    } catch (FinleyAdapterException& e) {
+        index_size = 4;
+    }
+    // technically we could cast if reading 32-bit data on 64-bit escript
+    // but cost-benefit analysis clearly favours this implementation for now
+    if (sizeof(index_t) != index_size) {
+        throw FinleyAdapterException("loadMesh: size of index types at runtime differ from dump file");
+    }
+
+    int mpi_size = ncReadAtt<int>(&dataFile, fName, "mpi_size");
+    int mpi_rank = ncReadAtt<int>(&dataFile, fName, "mpi_rank");
+    int numDim = ncReadAtt<int>(&dataFile, fName, "numDim");
+    int order = ncReadAtt<int>(&dataFile, fName, "order");
+    int reduced_order = ncReadAtt<int>(&dataFile, fName, "reduced_order");
+    dim_t numNodes = ncReadAtt<dim_t>(&dataFile, fName, "numNodes");
+    dim_t num_Elements = ncReadAtt<dim_t>(&dataFile, fName, "num_Elements");
+    dim_t num_FaceElements = ncReadAtt<dim_t>(&dataFile, fName, "num_FaceElements");
+    dim_t num_ContactElements = ncReadAtt<dim_t>(&dataFile, fName, "num_ContactElements");
+    dim_t num_Points = ncReadAtt<dim_t>(&dataFile, fName, "num_Points");
+    int num_Elements_numNodes = ncReadAtt<int>(&dataFile, fName, "num_Elements_numNodes");
+    int Elements_TypeId = ncReadAtt<int>(&dataFile, fName, "Elements_TypeId");
+    int num_FaceElements_numNodes = ncReadAtt<int>(&dataFile, fName, "num_FaceElements_numNodes");
+    int FaceElements_TypeId = ncReadAtt<int>(&dataFile, fName, "FaceElements_TypeId");
+    int num_ContactElements_numNodes = ncReadAtt<int>(&dataFile, fName, "num_ContactElements_numNodes");
+    int ContactElements_TypeId = ncReadAtt<int>(&dataFile, fName, "ContactElements_TypeId");
+    int Points_TypeId = ncReadAtt<int>(&dataFile, fName, "Points_TypeId");
+    int num_Tags = ncReadAtt<int>(&dataFile, fName, "num_Tags");
 
     // Verify size and rank
     if (mpi_info->size != mpi_size) {
-      sprintf(error_msg, "loadMesh: The NetCDF file '%s' can only be read on %d CPUs instead of %d", fName.c_str(), mpi_size, mpi_info->size);
-      throw DataException(error_msg);
+        stringstream msg;
+        msg << "loadMesh: The NetCDF file '" << fName
+            << "' can only be read on " << mpi_size
+            << " CPUs. Currently running: " << mpi_info->size;
+        throw FinleyAdapterException(msg.str());
     }
     if (mpi_info->rank != mpi_rank) {
-      sprintf(error_msg, "loadMesh: The NetCDF file '%s' should be read on CPU #%d instead of %d", fName.c_str(), mpi_rank, mpi_info->rank);
-      throw DataException(error_msg);
+        stringstream msg;
+        msg << "loadMesh: The NetCDF file '" << fName
+            << "' should be ready on CPU #" << mpi_rank
+            << " and NOT on #" << mpi_info->rank;
+        throw FinleyAdapterException(msg.str());
     }
 
     // Read mesh name
     if (! (attr=dataFile.get_att("Name")) ) {
-      sprintf(error_msg,"loadMesh: Error retrieving mesh name from NetCDF file '%s'", fName.c_str());
-      throw DataException(error_msg);
+        stringstream msg;
+        msg << "loadMesh: Error retrieving mesh name from NetCDF file '"
+            << fName << "'";
+        throw FinleyAdapterException(msg.str());
     }
     boost::scoped_array<char> name(attr->as_string(0));
     delete attr;
 
-    /* allocate mesh */
-    mesh_p = new Mesh(name.get(), numDim, mpi_info);
+    // allocate mesh
+    Mesh *mesh_p = new Mesh(name.get(), numDim, mpi_info);
     if (noError()) {
-
-        /* read nodes */
+        // read nodes
         mesh_p->Nodes->allocTable(numNodes);
         // Nodes_Id
         if (! ( nc_var_temp = dataFile.get_var("Nodes_Id")) )
@@ -168,7 +187,7 @@ namespace finley {
             cleanupAndThrow(mesh_p, "get(Nodes_Coordinates)");
         mesh_p->Nodes->updateTagList();
 
-        /* read elements */
+        // read elements
         if (noError()) {
             const_ReferenceElementSet_ptr refElements(new ReferenceElementSet(
                         (ElementTypeId)Elements_TypeId, order, reduced_order));
@@ -361,7 +380,7 @@ namespace finley {
            }
         }
 
-        /* get the Points (nodal elements) */
+        // get the Points (nodal elements)
         if (noError()) {
             const_ReferenceElementSet_ptr refPoints(new ReferenceElementSet(
                         (ElementTypeId)Points_TypeId, order, reduced_order));
@@ -422,9 +441,9 @@ namespace finley {
             }
         }
 
-        /* get the tags */
+        // get the tags
         if (noError()) {
-          if (num_Tags>0) {
+          if (num_Tags > 0) {
             // Temp storage to gather node IDs
             int *Tags_keys = new int[num_Tags];
             char name_temp[4096];
@@ -444,8 +463,9 @@ namespace finley {
               sprintf(name_temp, "Tags_name_%d", i);
               if (! (attr=dataFile.get_att(name_temp)) ) {
                   delete[] Tags_keys;
-                  sprintf(error_msg,"get_att(%s)", name_temp);
-                  cleanupAndThrow(mesh_p, error_msg);
+                  stringstream msg;
+                  msg << "get_att(" << name_temp << ")";
+                  cleanupAndThrow(mesh_p, msg.str());
               }
               boost::scoped_array<char> name(attr->as_string(0));
               delete attr;
@@ -457,7 +477,7 @@ namespace finley {
    
         if (noError()) {
             // Nodes_DofDistribution
-            std::vector<int> first_DofComponent(mpi_size+1);
+            std::vector<index_t> first_DofComponent(mpi_size+1);
             if (! (nc_var_temp = dataFile.get_var("Nodes_DofDistribution")) ) {
                 cleanupAndThrow(mesh_p, "get_var(Nodes_DofDistribution)");
             }
@@ -466,7 +486,7 @@ namespace finley {
             }
 
             // Nodes_NodeDistribution
-            std::vector<int> first_NodeComponent(mpi_size+1);
+            std::vector<index_t> first_NodeComponent(mpi_size+1);
             if (! (nc_var_temp = dataFile.get_var("Nodes_NodeDistribution")) ) {
                 cleanupAndThrow(mesh_p, "get_var(Nodes_NodeDistribution)");
             }
@@ -486,21 +506,17 @@ namespace finley {
     blocktimer_increment("LoadMesh()", blocktimer_start);
     return dom;
 #else
-    throw DataException("loadMesh: not compiled with NetCDF. Please contact your installation manager.");
+    throw FinleyAdapterException("loadMesh: not compiled with NetCDF. Please contact your installation manager.");
 #endif /* USE_NETCDF */
-  }
+}
 
-  Domain_ptr readMesh(esysUtils::JMPI& info,
-		     const std::string& fileName,
-                      int integrationOrder,
-                      int reducedIntegrationOrder,
-                      bool optimize,
-		     const std::vector<double>& points,
-		     const std::vector<int>& tags 
-		    )
-  {
+Domain_ptr readMesh(esysUtils::JMPI& info, const std::string& fileName,
+                    int integrationOrder, int reducedIntegrationOrder,
+                    bool optimize, const std::vector<double>& points,
+                    const std::vector<int>& tags)
+{
     if (fileName.size() == 0 )
-        throw DataException("Null file name!");
+        throw FinleyAdapterException("Null file name!");
 
     double blocktimer_start = blocktimer_time();
     Mesh* fMesh=Mesh::read(info, fileName, integrationOrder, reducedIntegrationOrder, optimize);
@@ -509,117 +525,97 @@ namespace finley {
     ma->addDiracPoints(points, tags);    
     blocktimer_increment("ReadMesh()", blocktimer_start);
     return Domain_ptr(ma);
-  }
+}
 
   
-  Domain_ptr readMesh_driver(const boost::python::list& args)
-  {
-      using boost::python::extract;
-      int l=len(args);
-      if (l<7) 
-      {
-	  throw FinleyAdapterException("Insufficient arguments to readMesh_driver");
-      }
-      std::string fileName=extract<string>(args[0])();
-      int integrationOrder=extract<int>(args[1])();
-      int reducedIntegrationOrder=extract<int>(args[2])();
-      bool optimize=extract<bool>(args[3])();
-      std::vector<double> points;
-      std::vector<int> tags;
-      
-      
-      // we need to convert lists to stl vectors
-      boost::python::list pypoints=extract<boost::python::list>(args[4]);
-      boost::python::list pytags=extract<boost::python::list>(args[5]);
-      int numpts=extract<int>(pypoints.attr("__len__")());
-      int numtags=extract<int>(pytags.attr("__len__")());
-
-      boost::python::object pworld=args[6];
-      esysUtils::JMPI info;
-      if (!pworld.is_none())
-      {
-	  extract<SubWorld_ptr> ex(pworld);
-	  if (!ex.check())
-	  {
-	      throw FinleyAdapterException("Invalid escriptWorld parameter.");
-	  }
-	  info=ex()->getMPI();
-      }
-      else
-      {
-	  info=esysUtils::makeInfo(MPI_COMM_WORLD);
-
-      }
-      Domain_ptr result=readMesh(info, fileName,
-                      integrationOrder,
-                      reducedIntegrationOrder,
-                      optimize,
-		     points,
-		     tags 
-		    );
+Domain_ptr readMesh_driver(const boost::python::list& args)
+{
+    using boost::python::extract;
+    int l=len(args);
+    if (l<7) {
+        throw FinleyAdapterException("Insufficient arguments to readMesh_driver");
+    }
+    string fileName=extract<string>(args[0])();
+    int integrationOrder=extract<int>(args[1])();
+    int reducedIntegrationOrder=extract<int>(args[2])();
+    bool optimize=extract<bool>(args[3])();
+    vector<double> points;
+    vector<int> tags;
+
+    // we need to convert lists to stl vectors
+    boost::python::list pypoints=extract<boost::python::list>(args[4]);
+    boost::python::list pytags=extract<boost::python::list>(args[5]);
+    int numpts=extract<int>(pypoints.attr("__len__")());
+    int numtags=extract<int>(pytags.attr("__len__")());
+
+    boost::python::object pworld=args[6];
+    esysUtils::JMPI info;
+    if (!pworld.is_none()) {
+        extract<SubWorld_ptr> ex(pworld);
+        if (!ex.check()) {
+            throw FinleyAdapterException("Invalid escriptWorld parameter.");
+        }
+        info=ex()->getMPI();
+    } else {
+        info=esysUtils::makeInfo(MPI_COMM_WORLD);
+    }
+    Domain_ptr result=readMesh(info, fileName, integrationOrder,
+                               reducedIntegrationOrder, optimize, points, tags);
 
-      for (int i=0;i<numpts;++i) {
-          boost::python::object temp=pypoints[i];
-          int l=extract<int>(temp.attr("__len__")());
-          for (int k=0;k<l;++k) {
+    for (int i=0; i<numpts; ++i) {
+        boost::python::object temp=pypoints[i];
+        int l=extract<int>(temp.attr("__len__")());
+        for (int k=0;k<l;++k) {
               points.push_back(extract<double>(temp[k]));
-          }
-      }
-      int curmax=40; // bricks use up to 200 but the existing tag check 
-		     // will find that
-      TagMap& tagmap=dynamic_cast<MeshAdapter*>(result.get())->getMesh()->tagMap;
-		// first we work out what tags are already in use
-      for (TagMap::iterator it=tagmap.begin();
-		it!=tagmap.end();++it)
-      {
-	  if (it->second>curmax)
-	  {
-		curmax=it->second+1;
-	  }
-      }
-
-      tags.resize(numtags, -1);
-      for (int i=0;i<numtags;++i) {
-          extract<int> ex_int(pytags[i]);
-          extract<string> ex_str(pytags[i]);
-          if (ex_int.check()) {
-              tags[i]=ex_int();
-              if (tags[i]>= curmax) {
-                  curmax=tags[i]+1;
-              }
-          } else if (ex_str.check()) {
-              string s=ex_str();
-              map<string, int>::iterator it=tagmap.find(s);
-              if (it!=tagmap.end()) {
-                  // we have the tag already so look it up
-                  tags[i]=it->second;
-              } else {
-		  result->setTagMap(s,curmax);
-                  tags[i]=curmax;
-                  curmax++;
-              }
-          } else {
-              throw FinleyAdapterException("Error - Unable to extract tag value.");
-          }
-      }
-	// now we need to add the dirac points
-      dynamic_cast<MeshAdapter*>(result.get())->addDiracPoints(points, tags);
-      return result;
-
+        }
+    }
+    // bricks use up to 200 but the existing tag check will find that
+    int curmax=40;
+    TagMap& tagmap=dynamic_cast<MeshAdapter*>(result.get())->getMesh()->tagMap;
+    // first we work out what tags are already in use
+    for (TagMap::iterator it=tagmap.begin(); it!=tagmap.end(); ++it) {
+        if (it->second>curmax) {
+            curmax=it->second+1;
+        }
+    }
 
-  }  
+    tags.resize(numtags, -1);
+    for (int i=0;i<numtags;++i) {
+        extract<int> ex_int(pytags[i]);
+        extract<string> ex_str(pytags[i]);
+        if (ex_int.check()) {
+            tags[i]=ex_int();
+            if (tags[i]>= curmax) {
+                curmax=tags[i]+1;
+            }
+        } else if (ex_str.check()) {
+            string s=ex_str();
+            map<string, int>::iterator it=tagmap.find(s);
+            if (it!=tagmap.end()) {
+                // we have the tag already so look it up
+                tags[i]=it->second;
+            } else {
+                result->setTagMap(s,curmax);
+                tags[i]=curmax;
+                curmax++;
+            }
+        } else {
+            throw FinleyAdapterException("Error - Unable to extract tag value.");
+        }
+    }
+    // now we need to add the dirac points
+    dynamic_cast<MeshAdapter*>(result.get())->addDiracPoints(points, tags);
+    return result;
+}  
   
-  Domain_ptr readGmsh(esysUtils::JMPI& info, const std::string& fileName,
-                                     int numDim,
-                                     int integrationOrder,
-                                     int reducedIntegrationOrder,
-                                     bool optimize,
-                                     bool useMacroElements,
-				   const std::vector<double>& points,
-				   const std::vector<int>& tags)
-  {
+Domain_ptr readGmsh(esysUtils::JMPI& info, const std::string& fileName,
+                    int numDim, int integrationOrder,
+                    int reducedIntegrationOrder, bool optimize,
+                    bool useMacroElements, const std::vector<double>& points,
+                    const std::vector<int>& tags)
+{
     if (fileName.size() == 0 )
-        throw DataException("Null file name!");
+        throw FinleyAdapterException("Null file name!");
 
     double blocktimer_start = blocktimer_time();
     Mesh* fMesh=Mesh::readGmsh(info, fileName, numDim, integrationOrder, reducedIntegrationOrder, optimize, useMacroElements);
@@ -628,119 +624,99 @@ namespace finley {
     MeshAdapter* ma=new MeshAdapter(fMesh);
     ma->addDiracPoints(points, tags);
     return Domain_ptr(ma);
-  }
-  
-  
+}
   
+Domain_ptr readGmsh_driver(const boost::python::list& args)
+{
+    using boost::python::extract;
+    int l=len(args);
+    if (l<7) {
+        throw FinleyAdapterException("Insufficient arguments to readMesh_driver");
+    }
+    string fileName=extract<string>(args[0])();
+    int numDim=extract<int>(args[1])();
+    int integrationOrder=extract<int>(args[2])();
+    int reducedIntegrationOrder=extract<int>(args[3])();
+    bool optimize=extract<bool>(args[4])();
+    bool useMacroElements=extract<bool>(args[5])();
+    vector<double> points;
+    vector<int> tags;
+
+    // we need to convert lists to stl vectors
+    boost::python::list pypoints=extract<boost::python::list>(args[6]);
+    boost::python::list pytags=extract<boost::python::list>(args[7]);
+    int numpts=extract<int>(pypoints.attr("__len__")());
+    int numtags=extract<int>(pytags.attr("__len__")());
+    boost::python::object pworld=args[8];
+    esysUtils::JMPI info;
+    if (!pworld.is_none()) {
+        extract<SubWorld_ptr> ex(pworld);
+        if (!ex.check()) {
+            throw FinleyAdapterException("Invalid escriptWorld parameter.");
+        }
+        info=ex()->getMPI();
+    } else {
+        info=esysUtils::makeInfo(MPI_COMM_WORLD);
+    }
+    Domain_ptr result = readGmsh(info, fileName, numDim, integrationOrder,
+                                 reducedIntegrationOrder, optimize,
+                                 useMacroElements, points, tags);      
+
+    for (int i=0;i<numpts;++i) {
+        boost::python::object temp=pypoints[i];
+        int l=extract<int>(temp.attr("__len__")());
+        for (int k=0;k<l;++k) {
+            points.push_back(extract<double>(temp[k]));
+        }
+    }
+    int curmax=40; // bricks use up to 30
+    TagMap& tagmap=dynamic_cast<MeshAdapter*>(result.get())->getMesh()->tagMap;
+    // first we work out what tags are already in use
+    for (TagMap::iterator it=tagmap.begin(); it!=tagmap.end(); ++it) {
+        if (it->second>curmax) {
+            curmax=it->second+1;
+        }
+    }
 
-  Domain_ptr readGmsh_driver(const boost::python::list& args)
-  {
-      using boost::python::extract;
-      int l=len(args);
-      if (l<7) 
-      {
-	  throw FinleyAdapterException("Insufficient arguments to readMesh_driver");
-      }
-      std::string fileName=extract<string>(args[0])();
-      int numDim=extract<int>(args[1])();
-      int integrationOrder=extract<int>(args[2])();
-      int reducedIntegrationOrder=extract<int>(args[3])();
-      bool optimize=extract<bool>(args[4])();
-      bool useMacroElements=extract<bool>(args[5])();
-      std::vector<double> points;
-      std::vector<int> tags;
-      
-      
-      // we need to convert lists to stl vectors
-      boost::python::list pypoints=extract<boost::python::list>(args[6]);
-      boost::python::list pytags=extract<boost::python::list>(args[7]);
-      int numpts=extract<int>(pypoints.attr("__len__")());
-      int numtags=extract<int>(pytags.attr("__len__")());
-      boost::python::object pworld=args[8];
-      esysUtils::JMPI info;
-      if (!pworld.is_none())
-      {
-	  extract<SubWorld_ptr> ex(pworld);
-	  if (!ex.check())
-	  {
-	      throw FinleyAdapterException("Invalid escriptWorld parameter.");
-	  }
-	  info=ex()->getMPI();
-      }
-      else
-      {
-	  info=esysUtils::makeInfo(MPI_COMM_WORLD);
-
-      }
-      Domain_ptr result = readGmsh(info, fileName,
-                                     numDim,
-                                     integrationOrder,
-                                     reducedIntegrationOrder,
-                                     optimize,
-                                     useMacroElements,
-				   points,
-				   tags);      
-
-      for (int i=0;i<numpts;++i) {
-          boost::python::object temp=pypoints[i];
-          int l=extract<int>(temp.attr("__len__")());
-          for (int k=0;k<l;++k) {
-              points.push_back(extract<double>(temp[k]));
-          }
-      }
-      int curmax=40; // bricks use up to 30
-      TagMap& tagmap=dynamic_cast<MeshAdapter*>(result.get())->getMesh()->tagMap;
-                // first we work out what tags are already in use
-      for (TagMap::iterator it=tagmap.begin();
-                it!=tagmap.end();++it)
-      {
-          if (it->second>curmax)
-          {
-                curmax=it->second+1;
-          }
-      }
-
-      tags.resize(numtags, -1);
-      for (int i=0;i<numtags;++i) {
-          extract<int> ex_int(pytags[i]);
-          extract<string> ex_str(pytags[i]);
-          if (ex_int.check()) {
-              tags[i]=ex_int();
-              if (tags[i]>= curmax) {
-                  curmax=tags[i]+1;
-              }
-          } else if (ex_str.check()) {
-              string s=ex_str();
-              map<string, int>::iterator it=tagmap.find(s);
-              if (it!=tagmap.end()) {
-                  // we have the tag already so look it up
-                  tags[i]=it->second;
-              } else {
-                  result->setTagMap(s,curmax);
-                  tags[i]=curmax;
-                  curmax++;
-              }
-          } else {
-              throw FinleyAdapterException("Error - Unable to extract tag value");
-          }
-      }
-        // now we need to add the dirac points
-      dynamic_cast<MeshAdapter*>(result.get())->addDiracPoints(points, tags);
-      return result;
-
-  }   
+    tags.resize(numtags, -1);
+    for (int i=0;i<numtags;++i) {
+        extract<int> ex_int(pytags[i]);
+        extract<string> ex_str(pytags[i]);
+        if (ex_int.check()) {
+            tags[i]=ex_int();
+            if (tags[i]>= curmax) {
+                curmax=tags[i]+1;
+            }
+        } else if (ex_str.check()) {
+            string s=ex_str();
+            map<string, int>::iterator it=tagmap.find(s);
+            if (it!=tagmap.end()) {
+                // we have the tag already so look it up
+                tags[i]=it->second;
+            } else {
+                result->setTagMap(s,curmax);
+                tags[i]=curmax;
+                curmax++;
+            }
+        } else {
+            throw FinleyAdapterException("Error - Unable to extract tag value");
+        }
+    }
+    // now we need to add the dirac points
+    dynamic_cast<MeshAdapter*>(result.get())->addDiracPoints(points, tags);
+    return result;
+}   
   
-  Domain_ptr brick(esysUtils::JMPI& info, int n0, int n1, int n2, int order,
-                   double l0, double l1, double l2,
-                   bool periodic0, bool periodic1, bool periodic2,
-                   int integrationOrder, int reducedIntegrationOrder,
-                   bool useElementsOnFace, bool useFullElementOrder,
-                   bool optimize, const std::vector<double>& points,
-                   const std::vector<int>& tags,
-                   const std::map<std::string, int>& tagnamestonums
-		  )
-  {
-    const int numElements[] = {n0, n1, n2};
+Domain_ptr brick(esysUtils::JMPI& info, dim_t n0, dim_t n1, dim_t n2, int order,
+                 double l0, double l1, double l2,
+                 bool periodic0, bool periodic1, bool periodic2,
+                 int integrationOrder, int reducedIntegrationOrder,
+                 bool useElementsOnFace, bool useFullElementOrder,
+                 bool optimize, const std::vector<double>& points,
+                 const std::vector<int>& tags,
+                 const std::map<std::string, int>& tagNamesToNums)
+{
+    const dim_t numElements[] = {n0, n1, n2};
     const double length[] = {l0, l1, l2};
     const bool periodic[] = {periodic0, periodic1, periodic2};
 
@@ -748,18 +724,15 @@ namespace finley {
     if (order==1) {
         fMesh=RectangularMesh_Hex8(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace, useFullElementOrder, optimize,
-		info);
+                useElementsOnFace, useFullElementOrder, optimize, info);
     } else if (order==2) {
         fMesh=RectangularMesh_Hex20(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace, useFullElementOrder, false, optimize,
-		info);
+                useElementsOnFace, useFullElementOrder, false, optimize, info);
     } else if (order==-1) {
         fMesh=RectangularMesh_Hex20(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace, useFullElementOrder, true, optimize,
-		info);
+                useElementsOnFace, useFullElementOrder, true, optimize, info);
     } else {
         stringstream message;
         message << "Illegal interpolation order " << order;
@@ -771,102 +744,91 @@ namespace finley {
     MeshAdapter* dom = new MeshAdapter(fMesh);
     dom->addDiracPoints(points, tags);
     Mesh* out=dom->getMesh().get();     
-    for (map<string, int>::const_iterator it=tagnamestonums.begin();it!=tagnamestonums.end();++it)
+    for (map<string, int>::const_iterator it=tagNamesToNums.begin();it!=tagNamesToNums.end();++it)
     {
         out->addTagMap(it->first.c_str(), it->second);
     }
     out->Points->updateTagList();
     return Domain_ptr(dom);
-  }
-
-  Domain_ptr brick_driver(const boost::python::list& args)
-  {
-      using boost::python::extract;
-
-      // we need to convert lists to stl vectors
-      boost::python::list pypoints=extract<boost::python::list>(args[15]);
-      boost::python::list pytags=extract<boost::python::list>(args[16]);
-      int numpts=extract<int>(pypoints.attr("__len__")());
-      int numtags=extract<int>(pytags.attr("__len__")());
-      vector<double> points;
-      vector<int> tags;
-      tags.resize(numtags, -1);
-      for (int i=0;i<numpts;++i) {
-          boost::python::object temp=pypoints[i];
-          int l=extract<int>(temp.attr("__len__")());
-          for (int k=0;k<l;++k) {
-              points.push_back(extract<double>(temp[k]));           
-          }
-      }
-      map<string, int> namestonums;
-      int curmax=40; // bricks use up to 30
-      for (int i=0;i<numtags;++i) {
-          extract<int> ex_int(pytags[i]);
-          extract<string> ex_str(pytags[i]);
-          if (ex_int.check()) {
-              tags[i]=ex_int();
-              if (tags[i]>= curmax) {
-                  curmax=tags[i]+1;
-              }
-          } else if (ex_str.check()) {
-              string s=ex_str();
-              map<string, int>::iterator it=namestonums.find(s);
-              if (it!=namestonums.end()) {
-                  // we have the tag already so look it up
-                  tags[i]=it->second;
-              } else {
-                  namestonums[s]=curmax;
-                  tags[i]=curmax;
-                  curmax++;
-              }
-          } else {
-              throw FinleyAdapterException("Error - Unable to extract tag value.");
-          }
-        
-      }
-      boost::python::object pworld=args[17];
-      esysUtils::JMPI info;
-      if (!pworld.is_none())
-      {
-	  extract<SubWorld_ptr> ex(pworld);
-	  if (!ex.check())
-	  {
-	      throw FinleyAdapterException("Invalid escriptWorld parameter.");
-	  }
-	  info=ex()->getMPI();
-      }
-      else
-      {
-	  info=esysUtils::makeInfo(MPI_COMM_WORLD);
+}
 
-      }
-      return brick(info, static_cast<int>(extract<float>(args[0])),
-                   static_cast<int>(extract<float>(args[1])),
-                   static_cast<int>(extract<float>(args[2])),
+Domain_ptr brick_driver(const boost::python::list& args)
+{
+    using boost::python::extract;
+
+    // we need to convert lists to stl vectors
+    boost::python::list pypoints=extract<boost::python::list>(args[15]);
+    boost::python::list pytags=extract<boost::python::list>(args[16]);
+    int numpts=extract<int>(pypoints.attr("__len__")());
+    int numtags=extract<int>(pytags.attr("__len__")());
+    vector<double> points;
+    vector<int> tags;
+    tags.resize(numtags, -1);
+    for (int i=0;i<numpts;++i) {
+        boost::python::object temp=pypoints[i];
+        int l=extract<int>(temp.attr("__len__")());
+        for (int k=0;k<l;++k) {
+            points.push_back(extract<double>(temp[k]));           
+        }
+    }
+    map<string, int> namestonums;
+    int curmax=40; // bricks use up to 30
+    for (int i=0;i<numtags;++i) {
+        extract<int> ex_int(pytags[i]);
+        extract<string> ex_str(pytags[i]);
+        if (ex_int.check()) {
+            tags[i]=ex_int();
+            if (tags[i]>= curmax) {
+                curmax=tags[i]+1;
+            }
+        } else if (ex_str.check()) {
+            string s=ex_str();
+            map<string, int>::iterator it=namestonums.find(s);
+            if (it!=namestonums.end()) {
+                // we have the tag already so look it up
+                tags[i]=it->second;
+            } else {
+                namestonums[s]=curmax;
+                tags[i]=curmax;
+                curmax++;
+            }
+        } else {
+            throw FinleyAdapterException("Error - Unable to extract tag value.");
+        }
+    }
+    boost::python::object pworld=args[17];
+    esysUtils::JMPI info;
+    if (!pworld.is_none()) {
+        extract<SubWorld_ptr> ex(pworld);
+        if (!ex.check())
+        {
+            throw FinleyAdapterException("Invalid escriptWorld parameter.");
+        }
+        info=ex()->getMPI();
+    } else {
+        info=esysUtils::makeInfo(MPI_COMM_WORLD);
+    }
+    return brick(info, static_cast<dim_t>(extract<float>(args[0])),
+                   static_cast<dim_t>(extract<float>(args[1])),
+                   static_cast<dim_t>(extract<float>(args[2])),
                    extract<int>(args[3]), extract<double>(args[4]),
                    extract<double>(args[5]), extract<double>(args[6]),
                    extract<int>(args[7]), extract<int>(args[8]),
                    extract<int>(args[9]), extract<int>(args[10]),
                    extract<int>(args[11]), extract<int>(args[12]),
                    extract<int>(args[13]), extract<int>(args[14]),
-                   points, tags, namestonums
-		  );
-  }
+                   points, tags, namestonums);
+}
 
-  Domain_ptr rectangle(esysUtils::JMPI& info, int n0, int n1, int order,
-                       double l0, double l1,
-                       bool periodic0, bool periodic1,
-                       int integrationOrder,
-                       int reducedIntegrationOrder,
-                       bool useElementsOnFace,
-                       bool useFullElementOrder,
-                       bool optimize,
-                       const vector<double>& points,
-                       const vector<int>& tags,
-                       const std::map<std::string, int>& tagnamestonums
-		      )
-  {
-    const int numElements[] = {n0, n1};
+Domain_ptr rectangle(esysUtils::JMPI& info, dim_t n0, dim_t n1, int order,
+                     double l0, double l1, bool periodic0, bool periodic1,
+                     int integrationOrder, int reducedIntegrationOrder,
+                     bool useElementsOnFace, bool useFullElementOrder,
+                     bool optimize, const vector<double>& points,
+                     const vector<int>& tags,
+                     const std::map<std::string, int>& tagNamesToNums)
+{
+    const dim_t numElements[] = {n0, n1};
     const double length[] = {l0, l1};
     const bool periodic[] = {periodic0, periodic1};
 
@@ -874,18 +836,15 @@ namespace finley {
     if (order==1) {
         fMesh=RectangularMesh_Rec4(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace, useFullElementOrder, optimize,
-		info);
+                useElementsOnFace, useFullElementOrder, optimize, info);
     } else if (order==2) {
         fMesh=RectangularMesh_Rec8(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace,useFullElementOrder, false, optimize,
-		info);
+                useElementsOnFace,useFullElementOrder, false, optimize, info);
     } else if (order==-1) {
         fMesh=RectangularMesh_Rec8(numElements, length, periodic,
                 integrationOrder, reducedIntegrationOrder,
-                useElementsOnFace, useFullElementOrder, true, optimize,
-		info);
+                useElementsOnFace, useFullElementOrder, true, optimize, info);
     } else {
         stringstream message;
         message << "Illegal interpolation order " << order;
@@ -897,16 +856,16 @@ namespace finley {
     MeshAdapter* dom = new MeshAdapter(fMesh);
     dom->addDiracPoints(points, tags);
     Mesh* out=dom->getMesh().get();     
-    for (map<string, int>::const_iterator it=tagnamestonums.begin();it!=tagnamestonums.end();++it)
+    for (map<string, int>::const_iterator it=tagNamesToNums.begin();it!=tagNamesToNums.end();++it)
     {
         out->addTagMap(it->first.c_str(), it->second);
     }
     out->Points->updateTagList();
     return Domain_ptr(dom);
-  }
+}
 
-  Domain_ptr meshMerge(const boost::python::list& meshList)
-  {
+Domain_ptr meshMerge(const boost::python::list& meshList)
+{
     // extract the meshes from meshList
     int num=boost::python::extract<int>(meshList.attr("__len__")());
     vector<Mesh*> meshes(num);
@@ -922,136 +881,106 @@ namespace finley {
     // Convert any finley errors into a C++ exception
     checkFinleyError();
     return Domain_ptr(new MeshAdapter(fMesh));
-  }
-
-  Domain_ptr rectangle_driver(const boost::python::list& args)
-  {
-      using boost::python::extract;
+}
 
-      // we need to convert lists to stl vectors
-      boost::python::list pypoints=extract<boost::python::list>(args[12]);
-      boost::python::list pytags=extract<boost::python::list>(args[13]);
-      int numpts=extract<int>(pypoints.attr("__len__")());
-      int numtags=extract<int>(pytags.attr("__len__")());
-      vector<double> points;
-      vector<int> tags;
-      tags.resize(numtags, -1);
-      for (int i=0;i<numpts;++i)
-      {
-          boost::python::object temp=pypoints[i];
-          int l=extract<int>(temp.attr("__len__")());
-          for (int k=0;k<l;++k)
-          {
-              points.push_back(extract<double>(temp[k]));           
-          }
-      }
-      map<string, int> tagstonames;
-      int curmax=40;
-      // but which order to assign tags to names?????
-      for (int i=0;i<numtags;++i)
-      {
-          extract<int> ex_int(pytags[i]);
-          extract<string> ex_str(pytags[i]);
-          if (ex_int.check())
-          {
-              tags[i]=ex_int();
-              if (tags[i]>= curmax)
-              {
-                  curmax=tags[i]+1;
-              }
-          } 
-          else if (ex_str.check())
-          {
-              string s=ex_str();
-              map<string, int>::iterator it=tagstonames.find(s);
-              if (it!=tagstonames.end())
-              {
-                  // we have the tag already so look it up
-                  tags[i]=it->second;
-              }
-              else
-              {
-                  tagstonames[s]=curmax;
-                  tags[i]=curmax;
-                  curmax++;
-              }
-          }
-          else
-          {
-              throw FinleyAdapterException("Error - Unable to extract tag value.");
-          }
-      }
-      boost::python::object pworld=args[14];
-      esysUtils::JMPI info;
-      if (!pworld.is_none())
-      {
-          extract<SubWorld_ptr> ex(pworld);
-	  if (!ex.check())
-	  {
-	      throw FinleyAdapterException("Invalid escriptWorld parameter.");
-          }
-          info=ex()->getMPI();
-      }
-      else
-      {
-          info=esysUtils::makeInfo(MPI_COMM_WORLD);
-      }
+Domain_ptr rectangle_driver(const boost::python::list& args)
+{
+    using boost::python::extract;
+
+    // we need to convert lists to stl vectors
+    boost::python::list pypoints=extract<boost::python::list>(args[12]);
+    boost::python::list pytags=extract<boost::python::list>(args[13]);
+    int numpts=extract<int>(pypoints.attr("__len__")());
+    int numtags=extract<int>(pytags.attr("__len__")());
+    vector<double> points;
+    vector<int> tags;
+    tags.resize(numtags, -1);
+    for (int i=0;i<numpts;++i) {
+        boost::python::object temp=pypoints[i];
+        int l=extract<int>(temp.attr("__len__")());
+        for (int k=0;k<l;++k) {
+            points.push_back(extract<double>(temp[k]));           
+        }
+    }
+    map<string, int> tagstonames;
+    int curmax=40;
+    // but which order to assign tags to names?????
+    for (int i=0;i<numtags;++i) {
+        extract<int> ex_int(pytags[i]);
+        extract<string> ex_str(pytags[i]);
+        if (ex_int.check()) {
+            tags[i]=ex_int();
+            if (tags[i]>= curmax) {
+                curmax=tags[i]+1;
+            }
+        } else if (ex_str.check()) {
+            string s=ex_str();
+            map<string, int>::iterator it=tagstonames.find(s);
+            if (it!=tagstonames.end()) {
+                // we have the tag already so look it up
+                tags[i]=it->second;
+            } else {
+                tagstonames[s]=curmax;
+                tags[i]=curmax;
+                curmax++;
+            }
+        } else {
+            throw FinleyAdapterException("Error - Unable to extract tag value.");
+        }
+    }
+    boost::python::object pworld=args[14];
+    esysUtils::JMPI info;
+    if (!pworld.is_none()) {
+        extract<SubWorld_ptr> ex(pworld);
+        if (!ex.check()) {
+            throw FinleyAdapterException("Invalid escriptWorld parameter.");
+        }
+        info=ex()->getMPI();
+    } else {
+        info=esysUtils::makeInfo(MPI_COMM_WORLD);
+    }
 
-      return rectangle(info, static_cast<int>(extract<float>(args[0])),
-                       static_cast<int>(extract<float>(args[1])),
+    return rectangle(info, static_cast<dim_t>(extract<float>(args[0])),
+                       static_cast<dim_t>(extract<float>(args[1])),
                        extract<int>(args[2]), extract<double>(args[3]),
                        extract<double>(args[4]), extract<int>(args[5]),
                        extract<int>(args[6]), extract<int>(args[7]),
                        extract<int>(args[8]), extract<int>(args[9]),
                        extract<int>(args[10]), extract<int>(args[11]), 
-                       points, tags, tagstonames
-		       );
-  }  
-
+                       points, tags, tagstonames);
+}  
 
-  Domain_ptr glueFaces(const boost::python::list& meshList,
-                       double safety_factor, 
-                       double tolerance,
-                       bool optimize)
-  {
-    Mesh* fMesh=0;
-    //
+Domain_ptr glueFaces(const boost::python::list& meshList, double safety_factor,
+                     double tolerance, bool optimize)
+{
     // merge the meshes:
     Domain_ptr merged_meshes=meshMerge(meshList);
 
-    //
     // glue the faces:
     const MeshAdapter* merged_finley_meshes=dynamic_cast<const MeshAdapter*>(merged_meshes.get());
-    fMesh=merged_finley_meshes->getFinley_Mesh();
+    Mesh* fMesh = merged_finley_meshes->getFinley_Mesh();
     fMesh->glueFaces(safety_factor, tolerance, optimize);
 
-    //
     // Convert any finley errors into a C++ exception
     checkFinleyError();
     return merged_meshes;
-  }
+}
 
-  Domain_ptr joinFaces(const boost::python::list& meshList,
-                       double safety_factor, 
-                       double tolerance,
-                       bool optimize)
-  {
-    Mesh* fMesh=0;
-    //
+Domain_ptr joinFaces(const boost::python::list& meshList, double safety_factor,
+                     double tolerance, bool optimize)
+{
     // merge the meshes:
     Domain_ptr merged_meshes=meshMerge(meshList);
-    //
+
     // join the faces:
     const MeshAdapter* merged_finley_meshes=static_cast<const MeshAdapter*>(merged_meshes.get());
-    fMesh=merged_finley_meshes->getFinley_Mesh();
+    Mesh* fMesh=merged_finley_meshes->getFinley_Mesh();
     fMesh->joinFaces(safety_factor, tolerance, optimize);
-    //
+
     // Convert any finley errors into a C++ exception
     checkFinleyError();
     return merged_meshes;
-  }
-
-  // end of namespace
-
 }
 
+} // end of namespace
+
diff --git a/finley/src/CPPAdapter/MeshAdapterFactory.h b/finley/src/CPPAdapter/MeshAdapterFactory.h
index 4a4e9ce..31be8bb 100644
--- a/finley/src/CPPAdapter/MeshAdapterFactory.h
+++ b/finley/src/CPPAdapter/MeshAdapterFactory.h
@@ -15,24 +15,26 @@
 *****************************************************************************/
 
 
-#if !defined  finley_MeshAdapterFactory_20040526_H
-#define finley_MeshAdapterFactory_20040526_H
+#ifndef __FINLEY_MESHADAPTERFACTORY_H__
+#define __FINLEY_MESHADAPTERFACTORY_H__
+
 #include "system_dep.h"
 
-#include "finley/Finley.h"
-#include "finley/Mesh.h"
-#include "finley/RectangularMesh.h"
+#include <finley/Finley.h>
+#include <finley/Mesh.h>
+#include <finley/RectangularMesh.h>
 
 #include "MeshAdapter.h"
 
-#include "escript/AbstractContinuousDomain.h"
-#include "escript/SubWorld.h"
+#include <escript/AbstractContinuousDomain.h>
+#include <escript/SubWorld.h>
 
 #include <boost/python/list.hpp>
 
 #include <sstream>
 
 namespace finley {
+
   /**
      \brief
      A suite of factory methods for creating various MeshAdapters.
@@ -41,108 +43,102 @@ namespace finley {
      A suite of factory methods for creating various MeshAdapters.
   */
  
-  /**
-     \brief
-     recovers mesg from a dump file
-     \param fileName Input -  The name of the file.
-  */
-  FINLEY_DLL_API
-/*  escript::AbstractContinuousDomain* loadMesh(const std::string& fileName);*/
-  escript::Domain_ptr loadMesh(const std::string& fileName);
-  /**
-     \brief
-     Read a mesh from a file. For MPI parallel runs fan out the mesh to multiple processes.
-     \param fileName Input -  The name of the file.
-     \param integrationOrder Input - order of the quadrature scheme.  
-     If integrationOrder<0 the integration order is selected independently.
-     \param reducedIntegrationOrder Input - order of the reduced quadrature scheme.  
-     If reducedIntegrationOrder<0 the integration order is selected independently.
-     \param optimize Input - switches on the optimization of node labels 
-     
-     \warning These defaults are also encoded in readMesh_driver. Please ensure any changes are consistant
-  */
-  FINLEY_DLL_API
-   escript::Domain_ptr readMesh(const std::string& fileName,
-                                     int integrationOrder=-1,
-                                     int reducedIntegrationOrder=-1,
-                                     bool optimize=false,
-				   const std::vector<double>& points=std::vector<double>(),
-				   const std::vector<int>& tags=std::vector<int>()
- 			      );
-   
-   /**
-   \brief Python driver for readMesh()
-   \param args see readMesh() definition for order of params
-   */
-   FINLEY_DLL_API
-   escript::Domain_ptr readMesh_driver(const boost::python::list& args);
+/**
+    \brief
+    recovers mesg from a dump file
+    \param fileName Input -  The name of the file.
+*/
+FINLEY_DLL_API
+escript::Domain_ptr loadMesh(const std::string& fileName);
+
+/**
+    \brief
+    Read a mesh from a file. For MPI parallel runs fan out the mesh to multiple processes.
+    \param fileName Input -  The name of the file.
+    \param integrationOrder Input - order of the quadrature scheme.  
+    If integrationOrder<0 the integration order is selected independently.
+    \param reducedIntegrationOrder Input - order of the reduced quadrature scheme.  
+    If reducedIntegrationOrder<0 the integration order is selected independently.
+    \param optimize Input - switches on the optimization of node labels 
+    
+    \warning These defaults are also encoded in readMesh_driver. Please ensure any changes are consistant
+*/
+FINLEY_DLL_API
+escript::Domain_ptr readMesh(const std::string& fileName,
+                             int integrationOrder=-1,
+                             int reducedIntegrationOrder=-1,
+                             bool optimize=false,
+                             const std::vector<double>& points=std::vector<double>(),
+                             const std::vector<int>& tags=std::vector<int>());
+
+/**
+    \brief Python driver for readMesh()
+    \param args see readMesh() definition for order of params
+*/
+FINLEY_DLL_API
+escript::Domain_ptr readMesh_driver(const boost::python::list& args);
    
-  /**
-     \brief
-     Read a gmsh mesh file
-     \param fileName Input -  The name of the file.
-     \param numDim Input -  spatial dimension
-     \param integrationOrder Input - order of the quadrature scheme.  
-     If integrationOrder<0 the integration order is selected independently.
-     \param reducedIntegrationOrder Input - order of the reduced quadrature scheme.  
-     If reducedIntegrationOrder<0 the integration order is selected independently.
-     \param optimize Input - switches on the optimization of node labels 
-     \param useMacroElements
-  */
-  FINLEY_DLL_API
-  escript::Domain_ptr readGmsh(const std::string& fileName,
-                               int numDim, 
-                               int integrationOrder=-1,
-                               int reducedIntegrationOrder=-1, 
-                               bool optimize=false,
-                               bool useMacroElements=false,
-			      const std::vector<double>& points=std::vector<double>(),
-			      const std::vector<int>& tags=std::vector<int>()
-			      );
+/**
+    \brief
+    Read a gmsh mesh file
+    \param fileName Input -  The name of the file.
+    \param numDim Input -  spatial dimension
+    \param integrationOrder Input - order of the quadrature scheme.  
+    If integrationOrder<0 the integration order is selected independently.
+    \param reducedIntegrationOrder Input - order of the reduced quadrature scheme.  
+    If reducedIntegrationOrder<0 the integration order is selected independently.
+    \param optimize Input - switches on the optimization of node labels 
+    \param useMacroElements
+*/
+FINLEY_DLL_API
+escript::Domain_ptr readGmsh(const std::string& fileName, int numDim,
+                             int integrationOrder=-1,
+                             int reducedIntegrationOrder=-1,
+                             bool optimize=false, bool useMacroElements=false,
+                             const std::vector<double>& points=std::vector<double>(),
+                             const std::vector<int>& tags=std::vector<int>());
 
-   /**
-   \brief Python driver for readGMesh()
-   \param args see readGMesh() definition for order of params
-   */
-   FINLEY_DLL_API
-   escript::Domain_ptr readGmsh_driver(const boost::python::list& args); 
+/**
+    \brief Python driver for readGMesh()
+    \param args see readGMesh() definition for order of params
+*/
+FINLEY_DLL_API
+escript::Domain_ptr readGmsh_driver(const boost::python::list& args); 
   
-  /**
-     \brief
-     Creates a rectangular mesh with n0 x n1 x n2 elements over the brick 
-     [0,l0] x [0,l1] x [0,l2].
+/**
+    \brief
+    Creates a rectangular mesh with n0 x n1 x n2 elements over the brick 
+    [0,l0] x [0,l1] x [0,l2].
 
-     \param n0,n1,n2 number of elements in each dimension
-     \param order =1, =-1 or =2 gives the order of shape function
-                  (-1= macro elements of order 1)
-     \param l0,l1,l2 length of each side of brick
-     \param periodic0, periodic1, periodic2 whether or not boundary 
-            conditions of the dimension are periodic
-     \param integrationOrder order of the quadrature scheme.  
-          If integrationOrder<0 the integration order is selected independently.
-     \param reducedIntegrationOrder order of the reduced quadrature scheme.  
-          If reducedIntegrationOrder<0 the integration order is selected independently.
-     \param useElementsOnFace whether or not to use elements on face
-     \param useFullElementOrder whether to use second order elements
-     \param optimize whether to apply optimization
-     \param points
-     \param tags
-     \param tagnamestonums
-  */
+    \param n0,n1,n2 number of elements in each dimension
+    \param order =1, =-1 or =2 gives the order of shape function
+                 (-1= macro elements of order 1)
+    \param l0,l1,l2 length of each side of brick
+    \param periodic0, periodic1, periodic2 whether or not boundary 
+           conditions of the dimension are periodic
+    \param integrationOrder order of the quadrature scheme.  
+         If integrationOrder<0 the integration order is selected independently.
+    \param reducedIntegrationOrder order of the reduced quadrature scheme.  
+         If reducedIntegrationOrder<0 the integration order is selected independently.
+    \param useElementsOnFace whether or not to use elements on face
+    \param useFullElementOrder whether to use second order elements
+    \param optimize whether to apply optimization
+    \param points
+    \param tags
+    \param tagNamesToNums
+*/
 FINLEY_DLL_API
-escript::Domain_ptr brick(esysUtils::JMPI& info, int n0=1, int n1=1, int n2=1, int order=1,
-                    double l0=1.0, double l1=1.0, double l2=1.0,
-                    bool periodic0=false, bool periodic1=false,
-                    bool periodic2=false,
-                    int integrationOrder=-1,
-                    int reducedIntegrationOrder=-1,
-                    bool useElementsOnFace=false,
-                    bool useFullElementOrder=false,
-                    bool optimize=false,
-                    const std::vector<double>& points=std::vector<double>(),
-                    const std::vector<int>& tags=std::vector<int>(),
-                    const std::map<std::string, int>& tagnamestonums=std::map<std::string, int>()
-                    );
+escript::Domain_ptr brick(esysUtils::JMPI& info,
+                          dim_t n0=1, dim_t n1=1, dim_t n2=1, int order=1,
+                          double l0=1., double l1=1., double l2=1.,
+                          bool periodic0=false, bool periodic1=false,
+                          bool periodic2=false, int integrationOrder=-1,
+                          int reducedIntegrationOrder=-1,
+                          bool useElementsOnFace=false,
+                          bool useFullElementOrder=false, bool optimize=false,
+                          const std::vector<double>& points=std::vector<double>(),
+                          const std::vector<int>& tags=std::vector<int>(),
+                          const std::map<std::string, int>& tagNamesToNums=std::map<std::string, int>());
                     
    /**
    \brief Python driver for brick()
@@ -181,58 +177,57 @@ escript::Domain_ptr brick(esysUtils::JMPI& info, int n0=1, int n1=1, int n2=1, i
      \param optimize
      \param points
      \param tags
-     \param tagnamestonums
+     \param tagNamesToNums
   */
 FINLEY_DLL_API
-  escript::Domain_ptr rectangle(esysUtils::JMPI& info, int n0=1, int n1=1, int order=1,
-                                double l0=1.0, double l1=1.0,
-                                bool periodic0=false, bool periodic1=false,
-                                int integrationOrder=-1,
-                                int reducedIntegrationOrder=-1, 
-                                bool useElementsOnFace=false,
-                                bool useFullElementOrder=false,
-                                bool optimize=false,
-                                const std::vector<double>& points=std::vector<double>(),
-                                const std::vector<int>& tags=std::vector<int>(),
-                                const std::map<std::string, int>& tagnamestonums=std::map<std::string, int>()
-                        );
-  /**
-     \brief
-     Merges a list of meshes into one list.
-     \param meshList Input - The list of meshes.
-  */
-  FINLEY_DLL_API
-//   escript::AbstractContinuousDomain* meshMerge(const boost::python::list& meshList);
-  escript::Domain_ptr meshMerge(const boost::python::list& meshList);
-  /**
-     \brief
-     Detects matching faces in the mesh, removes them from the mesh 
-     and joins the elements touched by the face elements.
-     \param meshList Input - The list of meshes.
-     \param safetyFactor Input - ??
-     \param tolerance Input - ??
-     \param optimize Input - switches on the optimization of node labels 
-  */
-  FINLEY_DLL_API
-//   escript::AbstractContinuousDomain* glueFaces(const boost::python::list& meshList,
-  escript::Domain_ptr glueFaces(const boost::python::list& meshList,
-                           double safetyFactor=0.2, 
-                           double tolerance=1.e-8,
-                           bool optimize=false);
-  /**
-     \brief
-     Detects matching faces in the mesh and replaces them by joint elements.
-     \param meshList Input - The list of meshes.
-     \param safetyFactor Input - ??
-     \param tolerance Input - ??
-     \param optimize Input - switches on the optimization of node labels 
-  */
-  FINLEY_DLL_API
-//   escript::AbstractContinuousDomain* joinFaces(const boost::python::list& meshList,
-  escript::Domain_ptr joinFaces(const boost::python::list& meshList,
-                        double safetyFactor=0.2, 
-                        double tolerance=1.e-8,
-                        bool optimize=false);
+escript::Domain_ptr rectangle(esysUtils::JMPI& info,
+                              dim_t n0=1, dim_t n1=1, dim_t order=1,
+                              double l0=1.0, double l1=1.0,
+                              bool periodic0=false, bool periodic1=false,
+                              int integrationOrder=-1,
+                              int reducedIntegrationOrder=-1,
+                              bool useElementsOnFace=false,
+                              bool useFullElementOrder=false,
+                              bool optimize=false,
+                              const std::vector<double>& points=std::vector<double>(),
+                              const std::vector<int>& tags=std::vector<int>(),
+                              const std::map<std::string, int>& tagNamesToNums=std::map<std::string, int>());
+
+/**
+    \brief
+    Merges a list of meshes into one list.
+    \param meshList Input - The list of meshes.
+*/
+FINLEY_DLL_API
+escript::Domain_ptr meshMerge(const boost::python::list& meshList);
+/**
+    \brief
+    Detects matching faces in the mesh, removes them from the mesh 
+    and joins the elements touched by the face elements.
+    \param meshList Input - The list of meshes.
+    \param safetyFactor Input - ??
+    \param tolerance Input - ??
+    \param optimize Input - switches on the optimization of node labels 
+*/
+FINLEY_DLL_API
+escript::Domain_ptr glueFaces(const boost::python::list& meshList,
+                              double safetyFactor=0.2, double tolerance=1.e-8,
+                              bool optimize=false);
+
+/**
+    \brief
+    Detects matching faces in the mesh and replaces them by joint elements.
+    \param meshList Input - The list of meshes.
+    \param safetyFactor Input - ??
+    \param tolerance Input - ??
+    \param optimize Input - switches on the optimization of node labels 
+*/
+FINLEY_DLL_API
+escript::Domain_ptr joinFaces(const boost::python::list& meshList,
+                              double safetyFactor=0.2, double tolerance=1.e-8,
+                              bool optimize=false);
  
 } // end of namespace
-#endif
+
+#endif // __FINLEY_MESHADAPTERFACTORY_H__
+
diff --git a/finley/src/CPPAdapter/finleycpp.cpp b/finley/src/CPPAdapter/finleycpp.cpp
index 9374042..6de0316 100644
--- a/finley/src/CPPAdapter/finleycpp.cpp
+++ b/finley/src/CPPAdapter/finleycpp.cpp
@@ -101,16 +101,23 @@ BOOST_PYTHON_MODULE(finleycpp)
   def ("__Brick_driver",finley::brick_driver,
       (arg("params"))
 ,"Creates a rectangular mesh with n0 x n1 x n2 elements over the brick [0,l0] x [0,l1] x [0,l2]."
-"\n\n:param n0:\n:type n0:\n:param n1:\n:type n1:\n:param n2:\n:type n2:\n"
+"\n\n:param n0: number of elements in direction 0\n:type n0: ``int``\n:param n1: number of elements in direction 1\n:type n1: ``int``\n"
+":param n2:number of elements in direction 2\n:type n2: ``int``\n"
 ":param order: =1, =-1 or =2 gives the order of shape function. If -1 macro elements of order 1 are used.\n"
-":param l0: length of side 0\n:param l1:\n:param l2:\n"
+":param l0: length of side 0\n"
+":type  l0: ``float``\n"
+":param l1: length of side 1\n"
+":type  l1: ``float``\n"
+":param l2: length of side 2\n"
+":type  l2: ``float``\n"
+":param periodic0: whether or not boundary conditions are periodic in direction 0\n:type periodic0: ``bool``\n"
+":param periodic1: whether or not boundary conditions are periodic in direction 1\n:type periodic1: ``bool``\n"
+":param periodic2: whether or not boundary conditions are periodic in direction 2\n:type periodic2: ``bool``\n"
 ":param integrationOrder: order of the quadrature scheme. If integrationOrder<0 the integration order is selected independently.\n"
 ":param reducedIntegrationOrder: order of the quadrature scheme. If reducedIntegrationOrder<0 the integration order is selected independently.\n"
 ":param useElementsOnFace:  whether or not to use elements on face\n"
-":type useElementsOnFace: ``int``"
-":param periodic0:  whether or not boundary conditions are periodic\n"
-":param periodic1:\n:param periodic2:\n"
-":param useFullElementOrder:\n:param optimize:\n"
+":type useElementsOnFace: ``int``\n"
+":param useFullElementOrder: Whether or not to use Hex27 elements\n"":type useFullElementOrder: ``bool``\n"
 ":param optimize: Enable optimisation of node labels\n:type optimize: ``bool``"
 );
 
@@ -126,7 +133,7 @@ BOOST_PYTHON_MODULE(finleycpp)
 ":type useElementsOnFace: ``int``"
 ":param periodic0:  whether or not boundary conditions are periodic\n"
 ":param periodic1:\n"
-":param useFullElementOrder:\n:param optimize:\n"
+":param useFullElementOrder: Whether or not to use Rec9 elements\n"":type useFullElementOrder: ``bool``\n"
 ":param optimize: Enable optimisation of node labels\n:type optimize: ``bool``"
 );
 
diff --git a/finley/src/ElementFile.cpp b/finley/src/ElementFile.cpp
index ccc2db0..ec6baea 100644
--- a/finley/src/ElementFile.cpp
+++ b/finley/src/ElementFile.cpp
@@ -70,22 +70,22 @@ ElementFile::~ElementFile()
 }
 
 /// allocates the element table within this element file to hold NE elements.
-void ElementFile::allocTable(int NE) 
+void ElementFile::allocTable(dim_t NE) 
 {
     if (numElements>0)
         freeTable();
 
     numElements=NE;
     Owner=new int[numElements];
-    Id=new int[numElements];
-    Nodes=new int[numElements*numNodes];
+    Id=new index_t[numElements];
+    Nodes=new index_t[numElements*numNodes];
     Tag=new int[numElements];
     Color=new int[numElements];
   
     // this initialization makes sure that data are located on the right
     // processor
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         for (int i=0; i<numNodes; i++)
             Nodes[INDEX2(i,e,numNodes)]=-1;
         Owner[e]=-1;
@@ -121,7 +121,7 @@ void ElementFile::copyTable(int offset, int nodeOffset, int idOffset,
     }
 
 #pragma omp parallel for
-    for (int n=0; n<in->numElements; n++) {
+    for (index_t n=0; n<in->numElements; n++) {
           Owner[offset+n]=in->Owner[n];
           Id[offset+n]=in->Id[n]+idOffset;
           Tag[offset+n]=in->Tag[n];
@@ -135,7 +135,7 @@ void ElementFile::gather(int* index, const ElementFile* in)
 {
     const int NN_in=in->numNodes;
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++) {
+    for (index_t e=0; e<numElements; e++) {
         const int k=index[e];
         Id[e]=in->Id[k];
         Tag[e]=in->Tag[k];
@@ -154,7 +154,7 @@ void ElementFile::scatter(int* index, const ElementFile* in)
 {
     const int NN_in=in->numNodes;
 #pragma omp parallel for
-    for (int e=0; e<in->numElements; e++) {
+    for (index_t e=0; e<in->numElements; e++) {
         const int k=index[e];
         Owner[k]=in->Owner[e];
         Id[k]=in->Id[e];
@@ -192,15 +192,15 @@ void ElementFile::optimizeOrdering()
     out->allocTable(numElements);
     if (noError()) {
 #pragma omp parallel for
-        for (int e=0; e<numElements; e++) {
-            std::pair<int,int> entry(Nodes[INDEX2(0,e,NN)], e);
+        for (index_t e=0; e<numElements; e++) {
+            std::pair<index_t,index_t> entry(Nodes[INDEX2(0,e,NN)], e);
             for (int i=1; i<NN; i++)
                 entry.first=std::min(entry.first, Nodes[INDEX2(i,e,NN)]);
             item_list[e] = entry;
         }
         util::sortValueAndIndex(item_list);
 #pragma omp parallel for
-        for (int e=0; e<numElements; e++)
+        for (index_t e=0; e<numElements; e++)
             index[e]=item_list[e].second;
         out->gather(index, this);
         swapTable(out);
@@ -211,10 +211,10 @@ void ElementFile::optimizeOrdering()
 
 /// assigns new node reference numbers to the elements.
 /// If k is the old node, the new node is newNode[k-offset].
-void ElementFile::relabelNodes(const std::vector<int>& newNode, int offset)
+void ElementFile::relabelNodes(const std::vector<index_t>& newNode, index_t offset)
 {
 #pragma omp parallel for
-    for (int j=0; j<numElements; j++) {
+    for (index_t j=0; j<numElements; j++) {
         for (int i=0; i<numNodes; i++) {
             Nodes[INDEX2(i,j,numNodes)]=
                         newNode[Nodes[INDEX2(i,j,numNodes)]-offset];
@@ -240,13 +240,13 @@ void ElementFile::setTags(const int newTag, const escript::Data& mask)
 
     if (mask.actsExpanded()) {
 #pragma omp parallel for
-        for (int n=0; n<numElements; n++) {
+        for (index_t n=0; n<numElements; n++) {
             if (mask.getSampleDataRO(n)[0] > 0)
                 Tag[n]=newTag;
         }
     } else {
 #pragma omp parallel for
-        for (int n=0; n<numElements; n++) {
+        for (index_t n=0; n<numElements; n++) {
             const double *mask_array=mask.getSampleDataRO(n);
             bool check=false;
             for (int q=0; q<numQuad; q++)
@@ -259,31 +259,31 @@ void ElementFile::setTags(const int newTag, const escript::Data& mask)
 }
 
 /// Tries to reduce the number of colours used to colour the elements
-void ElementFile::createColoring(const std::vector<int>& dofMap)
+void ElementFile::createColoring(const std::vector<index_t>& dofMap)
 {
     if (numElements < 1)
         return;
 
     const int NN = numNodes;
-    const std::pair<int,int> idRange(util::getMinMaxInt(
+    const std::pair<index_t,index_t> idRange(util::getMinMaxInt(
                                             1, dofMap.size(), &dofMap[0]));
-    const int len=idRange.second-idRange.first+1;
+    const index_t len=idRange.second-idRange.first+1;
 
     // reset color vector
 #pragma omp parallel for
-    for (int e=0; e<numElements; e++)
+    for (index_t e=0; e<numElements; e++)
         Color[e]=-1;
 
-    int numUncoloredElements=numElements;
+    index_t numUncoloredElements=numElements;
     minColor=0;
     maxColor=-1;
     while (numUncoloredElements>0) {
         // initialize the mask marking nodes used by a color
-        std::vector<int> maskDOF(len, -1);
+        std::vector<index_t> maskDOF(len, -1);
         numUncoloredElements=0;
 
         // TODO: OMP
-        for (int e=0; e<numElements; e++) {
+        for (index_t e=0; e<numElements; e++) {
             if (Color[e] < 0) {
                 // find out if element e is independent from the elements
                 // already coloured:
@@ -330,7 +330,7 @@ void ElementFile::markNodes(std::vector<short>& mask, int offset, bool useLinear
         const int NN=refElement->numLinearNodes;
         const int *lin_nodes=refElement->Type->linearNodes;
 #pragma omp parallel for
-        for (int e=0; e<numElements; e++) {
+        for (index_t e=0; e<numElements; e++) {
             for (int i=0; i<NN; i++) {
                 mask[Nodes[INDEX2(lin_nodes[i],e,numNodes)]-offset]=1;
             }
@@ -338,7 +338,7 @@ void ElementFile::markNodes(std::vector<short>& mask, int offset, bool useLinear
     } else {
         const int NN=refElement->Type->numNodes;
 #pragma omp parallel for
-        for (int e=0; e<numElements; e++) {
+        for (index_t e=0; e<numElements; e++) {
             for (int i=0; i<NN; i++) {
                 mask[Nodes[INDEX2(i,e,numNodes)]-offset]=1;
             }
@@ -347,7 +347,7 @@ void ElementFile::markNodes(std::vector<short>& mask, int offset, bool useLinear
 }
 
 void ElementFile::markDOFsConnectedToRange(int* mask, int offset, int marker,
-        int firstDOF, int lastDOF, const int *dofIndex, bool useLinear) 
+        index_t firstDOF, index_t lastDOF, const index_t *dofIndex, bool useLinear) 
 {
     const_ReferenceElement_ptr refElement(referenceElementSet->
                                             borrowReferenceElement(false));
@@ -356,10 +356,10 @@ void ElementFile::markDOFsConnectedToRange(int* mask, int offset, int marker,
         const int *lin_nodes=refElement->Type->linearNodes;
         for (int color=minColor; color<=maxColor; color++) {
 #pragma omp parallel for
-            for (int e=0; e<numElements; e++) {
+            for (index_t e=0; e<numElements; e++) {
                 if (Color[e]==color) {
                     for (int i=0; i<NN; i++) {
-                        const int k=dofIndex[Nodes[INDEX2(lin_nodes[i],e,numNodes)]];
+                        const index_t k=dofIndex[Nodes[INDEX2(lin_nodes[i],e,numNodes)]];
                         if (firstDOF<=k && k<lastDOF) {
                             for (int j=0; j<NN; j++)
                                 mask[dofIndex[Nodes[INDEX2(lin_nodes[j],e,numNodes)]]-offset]=marker;
@@ -373,10 +373,10 @@ void ElementFile::markDOFsConnectedToRange(int* mask, int offset, int marker,
         const int NN=refElement->Type->numNodes;
         for (int color=minColor; color<=maxColor; color++) {
 #pragma omp parallel for
-            for (int e=0; e<numElements; e++) {
+            for (index_t e=0; e<numElements; e++) {
                 if (Color[e]==color) {
                     for (int i=0; i<NN; i++) {
-                        const int k=dofIndex[Nodes[INDEX2(i,e,numNodes)]];
+                        const index_t k=dofIndex[Nodes[INDEX2(i,e,numNodes)]];
                         if (firstDOF<=k && k<lastDOF) {
                             for (int j=0; j<NN; j++)
                                 mask[dofIndex[Nodes[INDEX2(j,e,numNodes)]]-offset]=marker;
@@ -390,7 +390,7 @@ void ElementFile::markDOFsConnectedToRange(int* mask, int offset, int marker,
 }
 
 /// redistributes the elements including overlap by rank
-void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, int* index)
+void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, index_t* index)
 {
     const int size=MPIInfo->size;
 
@@ -411,7 +411,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
         {
             std::vector<int> loc_send_count(size);
 #pragma omp for
-            for (int e=0; e<numElements; e++) {
+            for (index_t e=0; e<numElements; e++) {
                 if (Owner[e] == myRank) {
                     newOwner[e]=myRank;
                     std::vector<int> loc_proc_mask(size);
@@ -441,17 +441,17 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
         MPI_Alltoall(&send_count[0], 1, MPI_INT, &recv_count[0], 1, MPI_INT,
                      MPIInfo->comm);
         // get the new number of elements for this processor
-        int newNumElements=0;
+        index_t newNumElements=0;
         int numElementsInBuffer=0;
         for (int p=0; p<size; ++p) {
             newNumElements+=recv_count[p];
             numElementsInBuffer+=send_count[p];
         }
 
-        std::vector<int> Id_buffer(numElementsInBuffer);
+        std::vector<index_t> Id_buffer(numElementsInBuffer);
         std::vector<int> Tag_buffer(numElementsInBuffer);
         std::vector<int> Owner_buffer(numElementsInBuffer);
-        std::vector<int> Nodes_buffer(numElementsInBuffer*numNodes);
+        std::vector<index_t> Nodes_buffer(numElementsInBuffer*numNodes);
         std::vector<int> send_offset(size);
         std::vector<int> recv_offset(size);
         std::vector<unsigned char> proc_mask(size);
@@ -465,7 +465,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
         send_count.assign(size, 0);
         // copy element into buffers. proc_mask makes sure that an element is
         // copied once only for each processor
-        for (int e=0; e<numElements; e++) {
+        for (index_t e=0; e<numElements; e++) {
             if (Owner[e] == myRank) {
                 proc_mask.assign(size, TRUE);
                 for (int j=0; j<numNodes; j++) {
@@ -490,7 +490,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
         // start to receive new elements
         for (int p=0; p<size; ++p) {
             if (recv_count[p] > 0) {
-                MPI_Irecv(&Id[recv_offset[p]], recv_count[p], MPI_INT, p,
+                MPI_Irecv(&Id[recv_offset[p]], recv_count[p], MPI_DIM_T, p,
                         MPIInfo->msg_tag_counter+myRank, MPIInfo->comm,
                         &mpi_requests[numRequests]);
                 numRequests++;
@@ -503,7 +503,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
                         &mpi_requests[numRequests]);
                 numRequests++;
                 MPI_Irecv(&Nodes[recv_offset[p]*numNodes],
-                        recv_count[p]*numNodes, MPI_INT, p,
+                        recv_count[p]*numNodes, MPI_DIM_T, p,
                         MPIInfo->msg_tag_counter+3*size+myRank, MPIInfo->comm,
                         &mpi_requests[numRequests]);
                 numRequests++;
@@ -512,7 +512,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
         // now the buffers can be sent away
         for (int p=0; p<size; ++p) {
             if (send_count[p] > 0) {
-                MPI_Issend(&Id_buffer[send_offset[p]], send_count[p], MPI_INT,
+                MPI_Issend(&Id_buffer[send_offset[p]], send_count[p], MPI_DIM_T,
                         p, MPIInfo->msg_tag_counter+p, MPIInfo->comm,
                         &mpi_requests[numRequests]);
                 numRequests++;
@@ -525,7 +525,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
                         MPIInfo->comm, &mpi_requests[numRequests]);
                 numRequests++;
                 MPI_Issend(&Nodes_buffer[send_offset[p]*numNodes],
-                        send_count[p]*numNodes, MPI_INT, p,
+                        send_count[p]*numNodes, MPI_DIM_T, p,
                         MPIInfo->msg_tag_counter+3*size+p, MPIInfo->comm,
                         &mpi_requests[numRequests]);
                 numRequests++;
@@ -537,7 +537,7 @@ void ElementFile::distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, in
 #endif
     } else { // single rank
 #pragma omp parallel for
-        for (int e=0; e<numElements; e++) {
+        for (index_t e=0; e<numElements; e++) {
             Owner[e]=0;
             for (int i=0; i<numNodes; i++)
                 Nodes[INDEX2(i,e,numNodes)]=index[Nodes[INDEX2(i,e,numNodes)]];
diff --git a/finley/src/ElementFile.h b/finley/src/ElementFile.h
index b8c9f43..d1928e0 100644
--- a/finley/src/ElementFile.h
+++ b/finley/src/ElementFile.h
@@ -49,7 +49,7 @@ struct ElementFile_Jacobians {
     /// (borrowed reference)
     const int* node_selection;
     /// number of elements
-    int numElements;
+    dim_t numElements;
     /// local volume
     double* volume;
     /// derivatives of shape functions in global coordinates at quadrature
@@ -64,15 +64,15 @@ public:
                 esysUtils::JMPI& mpiInfo);
     ~ElementFile();
 
-    void allocTable(int numElements);
+    void allocTable(dim_t numElements);
     void freeTable();
 
-    void distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, int *Id);
-    void createColoring(const std::vector<int>& dofMap);
+    void distributeByRankOfDOF(const std::vector<int>& mpiRankOfDOF, index_t *Id);
+    void createColoring(const std::vector<index_t>& dofMap);
     /// reorders the elements so that they are stored close to the nodes
     void optimizeOrdering();
     /// assigns new node reference numbers to the elements
-    void relabelNodes(const std::vector<int>& newNode, int offset);
+    void relabelNodes(const std::vector<index_t>& newNode, index_t offset);
     void markNodes(std::vector<short>& mask, int offset, bool useLinear);
     void scatter(int* index, const ElementFile* in);
     void gather(int* index, const ElementFile* in);
@@ -80,14 +80,14 @@ public:
                    const ElementFile* in);
 
     void markDOFsConnectedToRange(int* mask, int offset, int marker,
-                                  int firstDOF, int lastDOF,
-                                  const int *dofIndex, bool useLinear);
+                                  index_t firstDOF, index_t lastDOF,
+                                  const index_t *dofIndex, bool useLinear);
 
     void setTags(const int newTag, const escript::Data& mask);
     ElementFile_Jacobians* borrowJacobians(const NodeFile*, bool, bool) const;
     /// returns the minimum and maximum reference number of nodes describing
     /// the elements
-    inline std::pair<int,int> getNodeRange() const;
+    inline std::pair<index_t,index_t> getNodeRange() const;
 
     /// updates the list of tags in use. This method must be called by all
     /// ranks.
@@ -102,11 +102,11 @@ public:
     /// the reference element to be used
     const_ReferenceElementSet_ptr referenceElementSet;
     /// number of elements
-    int numElements;
+    dim_t numElements;
     /// Id[i] is the id number of node i. This number is used when elements
     /// are resorted. In the entire code the term 'element id' refers to i and
     /// not to Id[i] unless explicitly stated otherwise.
-    int *Id;
+    index_t *Id;
     /// Tag[i] is the tag of element i
     int *Tag;
     /// Owner[i] contains the rank that owns element i
@@ -119,7 +119,7 @@ public:
     /// Note that in the way the nodes are ordered Nodes[INDEX(k, i, numNodes)
     /// is the k-th node of element i when referring to the linear version of
     /// the mesh.
-    int *Nodes;
+    index_t *Nodes;
     /// assigns each element a color. Elements with the same color don't share
     /// a node so they can be processed simultaneously.
     /// At any time Color must provide a valid value. In any case one can set
@@ -142,7 +142,7 @@ public:
     ElementFile_Jacobians* jacobians_reducedS_reducedQ;
 };
 
-inline std::pair<int,int> ElementFile::getNodeRange() const
+inline std::pair<index_t,index_t> ElementFile::getNodeRange() const
 {
     return util::getMinMaxInt(numNodes, numElements, Nodes);
 }
diff --git a/finley/src/IndexList.cpp b/finley/src/IndexList.cpp
index baa4736..7801a22 100644
--- a/finley/src/IndexList.cpp
+++ b/finley/src/IndexList.cpp
@@ -39,8 +39,8 @@
 namespace finley {
 
 void IndexList_insertElements(IndexList* index_list, ElementFile* elements,
-                              bool reduce_row_order, const int* row_map,
-                              bool reduce_col_order, const int* col_map)
+                              bool reduce_row_order, const index_t* row_map,
+                              bool reduce_col_order, const index_t* col_map)
 {
     // index_list is an array of linked lists. Each entry is a row (DOF) and
     // contains the indices to the non-zero columns
@@ -75,13 +75,13 @@ void IndexList_insertElements(IndexList* index_list, ElementFile* elements,
 
     for (int color=elements->minColor; color<=elements->maxColor; color++) {
 #pragma omp for
-        for (int e=0; e<elements->numElements; e++) {
+        for (index_t e=0; e<elements->numElements; e++) {
             if (elements->Color[e]==color) {
                 for (int isub=0; isub<numSub; isub++) {
                     for (int kr=0; kr<NN_row; kr++) {
-                        const int irow=row_map[elements->Nodes[INDEX2(row_node[INDEX2(kr,isub,NN_row)],e,NN)]];
+                        const index_t irow=row_map[elements->Nodes[INDEX2(row_node[INDEX2(kr,isub,NN_row)],e,NN)]];
                         for (int kc=0; kc<NN_col; kc++) {
-                            const int icol=col_map[elements->Nodes[INDEX2(col_node[INDEX2(kc,isub,NN_col)],e,NN)]];
+                            const index_t icol=col_map[elements->Nodes[INDEX2(col_node[INDEX2(kc,isub,NN_col)],e,NN)]];
                             index_list[irow].insertIndex(icol);
                         }
                     }
@@ -92,9 +92,9 @@ void IndexList_insertElements(IndexList* index_list, ElementFile* elements,
 }
 
 void IndexList_insertElementsWithRowRangeNoMainDiagonal(
-                            IndexList* index_list, int firstRow,
-                            int lastRow, ElementFile* elements,
-                            int* row_map, int* col_map)
+                            IndexList* index_list, index_t firstRow,
+                            index_t lastRow, ElementFile* elements,
+                            index_t* row_map, index_t* col_map)
 {
     if (!elements)
         return;
@@ -103,14 +103,14 @@ void IndexList_insertElementsWithRowRangeNoMainDiagonal(
     const int NN=elements->numNodes;
     for (int color=elements->minColor; color<=elements->maxColor; color++) {
 #pragma omp for
-        for (int e=0; e<elements->numElements; e++) {
+        for (index_t e=0; e<elements->numElements; e++) {
             if (elements->Color[e]==color) {
                 for (int kr=0; kr<NN; kr++) {
-                    const int irow=row_map[elements->Nodes[INDEX2(kr,e,NN)]];
+                    const index_t irow=row_map[elements->Nodes[INDEX2(kr,e,NN)]];
                     if (firstRow<=irow && irow<lastRow) {
-                        const int irow_loc=irow-firstRow;
+                        const index_t irow_loc=irow-firstRow;
                         for (int kc=0; kc<NN; kc++) {
-                            const int icol=col_map[elements->Nodes[INDEX2(kc,e,NN)]];
+                            const index_t icol=col_map[elements->Nodes[INDEX2(kc,e,NN)]];
                             if (icol != irow)
                                 index_list[irow_loc].insertIndex(icol);
                         }
diff --git a/finley/src/IndexList.h b/finley/src/IndexList.h
index 3f84429..ad6e9fb 100644
--- a/finley/src/IndexList.h
+++ b/finley/src/IndexList.h
@@ -37,12 +37,12 @@ using esysUtils::IndexList;
 class ElementFile;
 
 void IndexList_insertElements(IndexList* index_list, ElementFile* elements,
-                              bool reduce_row_order, const int* row_map,
-                              bool reduce_col_order, const int* col_map);
+                              bool reduce_row_order, const index_t* row_map,
+                              bool reduce_col_order, const index_t* col_map);
 
 void IndexList_insertElementsWithRowRangeNoMainDiagonal(
-        IndexList* index_list, int firstRow, int lastRow,
-        ElementFile* elements, int* row_map, int* col_map);
+        IndexList* index_list, index_t firstRow, index_t lastRow,
+        ElementFile* elements, index_t* row_map, index_t* col_map);
 
 } // namespace finley
 
diff --git a/finley/src/Mesh.cpp b/finley/src/Mesh.cpp
index 885f5ed..a26df2a 100644
--- a/finley/src/Mesh.cpp
+++ b/finley/src/Mesh.cpp
@@ -126,12 +126,12 @@ void Mesh::setOrders()
 }
 
 /// creates node mappings without (re-)distributing anything
-void Mesh::createMappings(const std::vector<int>& dofDistribution,
-                          const std::vector<int>& nodeDistribution)
+void Mesh::createMappings(const std::vector<index_t>& dofDistribution,
+                          const std::vector<index_t>& nodeDistribution)
 {
     std::vector<short> maskReducedNodes(Nodes->numNodes, -1);
     markNodes(maskReducedNodes, 0, true);
-    std::vector<int> indexReducedNodes = util::packMask(maskReducedNodes);
+    std::vector<index_t> indexReducedNodes = util::packMask(maskReducedNodes);
     if (noError())
         Nodes->createNodeMappings(indexReducedNodes, dofDistribution,
                                   nodeDistribution);
@@ -140,7 +140,7 @@ void Mesh::createMappings(const std::vector<int>& dofDistribution,
 /// redistributes the Nodes and Elements including overlap
 /// according to the DOF distribution. It will create an element colouring
 /// but will not create any mappings.
-void Mesh::distributeByRankOfDOF(const std::vector<int>& dof_distribution)
+void Mesh::distributeByRankOfDOF(const std::vector<index_t>& dof_distribution)
 {
     std::vector<int> mpiRankOfDOF(Nodes->numNodes);
     Nodes->assignMPIRankToDOFs(mpiRankOfDOF, dof_distribution);
@@ -163,14 +163,14 @@ void Mesh::distributeByRankOfDOF(const std::vector<int>& dof_distribution)
         resolveNodeIds();
 
     // create a local labeling of the DOFs
-    const std::pair<int,int> dof_range(Nodes->getDOFRange());
-    const int len=dof_range.second-dof_range.first+1;
+    const std::pair<index_t,index_t> dof_range(Nodes->getDOFRange());
+    const index_t len=dof_range.second-dof_range.first+1;
     // local mask for used nodes
-    std::vector<int> localDOF_mask(len, -1);
-    std::vector<int> localDOF_map(Nodes->numNodes, -1);
+    std::vector<index_t> localDOF_mask(len, -1);
+    std::vector<index_t> localDOF_map(Nodes->numNodes, -1);
 
 #pragma omp parallel for
-    for (int n=0; n<Nodes->numNodes; n++) {
+    for (index_t n=0; n<Nodes->numNodes; n++) {
 #ifdef BOUNDS_CHECK
         if ((Nodes->globalDegreesOfFreedom[n]-dof_range.first) >= len ||
                 (Nodes->globalDegreesOfFreedom[n]-dof_range.first) < 0) {
@@ -181,17 +181,17 @@ void Mesh::distributeByRankOfDOF(const std::vector<int>& dof_distribution)
         localDOF_mask[Nodes->globalDegreesOfFreedom[n]-dof_range.first]=n;
     }
 
-    int numDOFs=0;
+    index_t numDOFs=0;
     for (int n=0; n<len; n++) {
-        const int k=localDOF_mask[n];
+        const index_t k=localDOF_mask[n];
         if (k>=0) {
              localDOF_mask[n]=numDOFs;
              numDOFs++;
           }
     }
 #pragma omp parallel for
-    for (int n=0; n<Nodes->numNodes; n++) {
-        const int k=localDOF_mask[Nodes->globalDegreesOfFreedom[n]-dof_range.first];
+    for (index_t n=0; n<Nodes->numNodes; n++) {
+        const index_t k=localDOF_mask[Nodes->globalDegreesOfFreedom[n]-dof_range.first];
         localDOF_map[n]=k;
     }
     // create element coloring
@@ -210,80 +210,87 @@ void Mesh::print()
   
     // write elements
     if (Elements) {
-        printf("=== %s:\nnumber of elements=%d\ncolor range=[%d,%d]\n",
-               Elements->referenceElementSet->referenceElement->Type->Name,
-               Elements->numElements, Elements->minColor, Elements->maxColor);
+        std::cout << "=== "
+                 << Elements->referenceElementSet->referenceElement->Type->Name
+                 << ":\nnumber of elements=" << Elements->numElements
+                 << "\ncolor range=[" << Elements->minColor << ","
+                 << Elements->maxColor << "]\n";
         if (Elements->numElements > 0) {
             const int NN=Elements->referenceElementSet->referenceElement->Type->numNodes;
             const int NN2=Elements->numNodes;
-            printf("Id,Tag,Owner,Color,Nodes\n");
-            for (int i=0; i<Elements->numElements; i++) {
-                printf("%d,%d,%d,%d,", Elements->Id[i], Elements->Tag[i],
-                        Elements->Owner[i], Elements->Color[i]);
+            std::cout << "Id,Tag,Owner,Color,Nodes" << std::endl;
+            for (index_t i=0; i<Elements->numElements; i++) {
+                std::cout << Elements->Id[i] << "," << Elements->Tag[i] << ","
+                    << Elements->Owner[i] << "," << Elements->Color[i] << ",";
                 for (int j=0; j<NN; j++)
-                    printf(" %d", Nodes->Id[Elements->Nodes[INDEX2(j,i,NN2)]]);
-                printf("\n");
+                    std::cout << " " << Nodes->Id[Elements->Nodes[INDEX2(j,i,NN2)]];
+                std::cout << std::endl;
             }
         }
     }
 
     // write face elements
     if (FaceElements) {
-        printf("=== %s:\nnumber of elements=%d\ncolor range=[%d,%d]\n",
-               FaceElements->referenceElementSet->referenceElement->Type->Name,
-               FaceElements->numElements, FaceElements->minColor,
-               FaceElements->maxColor);
+        std::cout << "=== "
+                 << FaceElements->referenceElementSet->referenceElement->Type->Name
+                 << ":\nnumber of elements=" << FaceElements->numElements
+                 << "\ncolor range=[" << FaceElements->minColor << ","
+                 << FaceElements->maxColor << "]\n";
         if (FaceElements->numElements > 0) {
             const int NN=FaceElements->referenceElementSet->referenceElement->Type->numNodes;
             const int NN2=FaceElements->numNodes;
-            printf("Id,Tag,Owner,Color,Nodes\n");
-            for (int i=0; i<FaceElements->numElements; i++) {
-                printf("%d,%d,%d,%d,", FaceElements->Id[i],
-                        FaceElements->Tag[i], FaceElements->Owner[i],
-                        FaceElements->Color[i]);
+            std::cout << "Id,Tag,Owner,Color,Nodes" << std::endl;
+            for (index_t i=0; i<FaceElements->numElements; i++) {
+                std::cout << FaceElements->Id[i] << "," << FaceElements->Tag[i]
+                    << "," << FaceElements->Owner[i] << ","
+                    << FaceElements->Color[i] << ",";
                 for (int j=0; j<NN; j++)
-                    printf(" %d", Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN2)]]);
-                printf("\n");
+                    std::cout << " " << Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN2)]];
+                std::cout << std::endl;
             }
         }
     }
 
     // write Contact elements
     if (ContactElements) {
-        printf("=== %s:\nnumber of elements=%d\ncolor range=[%d,%d]\n",
-               ContactElements->referenceElementSet->referenceElement->Type->Name,
-               ContactElements->numElements, ContactElements->minColor,
-               ContactElements->maxColor);
+        std::cout << "=== "
+                 << ContactElements->referenceElementSet->referenceElement->Type->Name
+                 << ":\nnumber of elements=" << ContactElements->numElements
+                 << "\ncolor range=[" << ContactElements->minColor << ","
+                 << ContactElements->maxColor << "]\n";
         if (ContactElements->numElements > 0) {
             const int NN=ContactElements->referenceElementSet->referenceElement->Type->numNodes;
             const int NN2=ContactElements->numNodes;
-            printf("Id,Tag,Owner,Color,Nodes\n");
-            for (int i=0; i<ContactElements->numElements; i++) {
-                printf("%d,%d,%d,%d,", ContactElements->Id[i],
-                        ContactElements->Tag[i], ContactElements->Owner[i],
-                        ContactElements->Color[i]);
+            std::cout << "Id,Tag,Owner,Color,Nodes" << std::endl;
+            for (index_t i=0; i<ContactElements->numElements; i++) {
+                std::cout << ContactElements->Id[i] << ","
+                    << ContactElements->Tag[i] << ","
+                    << ContactElements->Owner[i] << ","
+                    << ContactElements->Color[i] << ",";
                 for (int j=0; j<NN; j++)
-                    printf(" %d", Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN2)]]);
-                printf("\n");
+                    std::cout << " " << Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN2)]];
+                std::cout << std::endl;
             }
         }
     }
   
     // write points
     if (Points) {
-        printf("=== %s:\nnumber of elements=%d\ncolor range=[%d,%d]\n",
-               Points->referenceElementSet->referenceElement->Type->Name,
-               Points->numElements, Points->minColor, Points->maxColor);
+        std::cout << "=== "
+                 << Points->referenceElementSet->referenceElement->Type->Name
+                 << ":\nnumber of elements=" << Points->numElements
+                 << "\ncolor range=[" << Points->minColor << ","
+                 << Points->maxColor << "]\n";
         if (Points->numElements > 0) {
             const int NN=Points->referenceElementSet->referenceElement->Type->numNodes;
             const int NN2=Points->numNodes;
-            printf("Id,Tag,Owner,Color,Nodes\n");
-            for (int i=0; i<Points->numElements; i++) {
-                printf("%d,%d,%d,%d,", Points->Id[i], Points->Tag[i],
-                        Points->Owner[i], Points->Color[i]);
+            std::cout << "Id,Tag,Owner,Color,Nodes" << std::endl;
+            for (index_t i=0; i<Points->numElements; i++) {
+                std::cout << Points->Id[i] << "," << Points->Tag[i] << ","
+                    << Points->Owner[i] << "," << Points->Color[i] << ",";
                 for (int j=0; j<NN; j++)
-                    printf(" %d", Nodes->Id[Points->Nodes[INDEX2(j,i,NN2)]]);
-                printf("\n");
+                    std::cout << " " << Nodes->Id[Points->Nodes[INDEX2(j,i,NN2)]];
+                std::cout << std::endl;
             }
         }
     }
@@ -298,9 +305,10 @@ void Mesh::markNodes(std::vector<short>& mask, int offset, bool useLinear)
 }
 
 void Mesh::markDOFsConnectedToRange(int* mask, int offset, int marker, 
-                                    int firstDOF, int lastDOF, bool useLinear)
+                                    index_t firstDOF, index_t lastDOF,
+                                    bool useLinear)
 {
-    const int *dofIndex = (useLinear ? Nodes->globalReducedDOFIndex
+    const index_t *dofIndex = (useLinear ? Nodes->globalReducedDOFIndex
                                      : Nodes->globalDegreesOfFreedom);
     Elements->markDOFsConnectedToRange(mask, offset, marker, firstDOF, lastDOF,
             dofIndex, useLinear);
@@ -313,19 +321,19 @@ void Mesh::markDOFsConnectedToRange(int* mask, int offset, int marker,
 }
 
 /// optimizes the labeling of the DOFs on each processor
-void Mesh::optimizeDOFLabeling(const std::vector<int>& distribution)
+void Mesh::optimizeDOFLabeling(const std::vector<index_t>& distribution)
 {
     const int myRank=MPIInfo->rank;
     const int mpiSize=MPIInfo->size;
-    const int myFirstVertex=distribution[myRank];
-    const int myLastVertex=distribution[myRank+1];
-    const int myNumVertices=myLastVertex-myFirstVertex;
-    int len=0;
+    const index_t myFirstVertex=distribution[myRank];
+    const index_t myLastVertex=distribution[myRank+1];
+    const dim_t myNumVertices=myLastVertex-myFirstVertex;
+    index_t len=0;
     for (int p=0; p<mpiSize; ++p)
         len=std::max(len, distribution[p+1]-distribution[p]);
 
     boost::scoped_array<IndexList> index_list(new IndexList[myNumVertices]);
-    std::vector<int> newGlobalDOFID(len);
+    std::vector<index_t> newGlobalDOFID(len);
     // create the adjacency structure xadj and adjncy
 #pragma omp parallel
     {
@@ -370,11 +378,11 @@ void Mesh::optimizeDOFLabeling(const std::vector<int>& distribution)
 #endif
         int current_rank=myRank;
         for (int p=0; p<mpiSize; ++p) {
-            const int firstVertex=distribution[current_rank];
-            const int lastVertex=distribution[current_rank+1];
+            const index_t firstVertex=distribution[current_rank];
+            const index_t lastVertex=distribution[current_rank+1];
 #pragma omp parallel for
-            for (int i=0; i<Nodes->numNodes; ++i) {
-                const int k=Nodes->globalDegreesOfFreedom[i];
+            for (index_t i=0; i<Nodes->numNodes; ++i) {
+                const index_t k=Nodes->globalDegreesOfFreedom[i];
                 if (firstVertex<=k && k<lastVertex) {
                     Nodes->globalDegreesOfFreedom[i]=newGlobalDOFID[k-firstVertex];
                 }
@@ -383,7 +391,7 @@ void Mesh::optimizeDOFLabeling(const std::vector<int>& distribution)
             if (p<mpiSize-1) { // the final send can be skipped
 #ifdef ESYS_MPI
                 MPI_Status status;
-                MPI_Sendrecv_replace(&newGlobalDOFID[0], len, MPI_INT,
+                MPI_Sendrecv_replace(&newGlobalDOFID[0], len, MPI_DIM_T,
                                      dest, MPIInfo->msg_tag_counter,
                                      source, MPIInfo->msg_tag_counter,
                                      MPIInfo->comm, &status);
@@ -402,10 +410,10 @@ void Mesh::prepare(bool optimize)
 
     // first step is to distribute the elements according to a global
     // distribution of DOF
-    std::vector<int> distribution(MPIInfo->size+1);
+    std::vector<index_t> distribution(MPIInfo->size+1);
 
     // first we create dense labeling for the DOFs
-    int newGlobalNumDOFs=Nodes->createDenseDOFLabeling();
+    index_t newGlobalNumDOFs=Nodes->createDenseDOFLabeling();
 
     // create a distribution of the global DOFs and determine the MPI rank
     // controlling the DOFs on this processor
@@ -437,9 +445,9 @@ void Mesh::prepare(bool optimize)
     // create the global indices
     if (noError()) {
         std::vector<short> maskReducedNodes(Nodes->numNodes, -1);
-        std::vector<int> nodeDistribution(MPIInfo->size+1);
+        std::vector<index_t> nodeDistribution(MPIInfo->size+1);
         markNodes(maskReducedNodes, 0, true);
-        std::vector<int> indexReducedNodes = util::packMask(maskReducedNodes);
+        std::vector<index_t> indexReducedNodes = util::packMask(maskReducedNodes);
 
         Nodes->createDenseNodeLabeling(nodeDistribution, distribution); 
         // created reduced DOF labeling
@@ -456,7 +464,7 @@ void Mesh::prepare(bool optimize)
 }
 
 /// tries to reduce the number of colours for all element files
-void Mesh::createColoring(const std::vector<int>& dofMap)
+void Mesh::createColoring(const std::vector<index_t>& dofMap)
 {
     if (noError())
         Elements->createColoring(dofMap);
@@ -492,7 +500,7 @@ void Mesh::updateTagList()
 }
 
 /// assigns new node reference numbers to all element files
-void Mesh::relabelElementNodes(const std::vector<int>& newNode, int offset)
+void Mesh::relabelElementNodes(const std::vector<index_t>& newNode, index_t offset)
 {
     Elements->relabelNodes(newNode, offset);
     FaceElements->relabelNodes(newNode, offset);
@@ -511,9 +519,9 @@ void Mesh::resolveNodeIds()
     // The function does not create a distribution of the degrees of freedom.
 
     // find the minimum and maximum id used by elements
-    int min_id=std::numeric_limits<int>::max();
-    int max_id=std::numeric_limits<int>::min();
-    std::pair<int,int> range(Elements->getNodeRange());
+    index_t min_id=std::numeric_limits<index_t>::max();
+    index_t max_id=std::numeric_limits<index_t>::min();
+    std::pair<index_t,index_t> range(Elements->getNodeRange());
     max_id=std::max(max_id,range.second);
     min_id=std::min(min_id,range.first);
     range=FaceElements->getNodeRange();
@@ -526,12 +534,12 @@ void Mesh::resolveNodeIds()
     max_id=std::max(max_id,range.second);
     min_id=std::min(min_id,range.first);
 #ifdef Finley_TRACE
-    int global_min_id, global_max_id;
+    index_t global_min_id, global_max_id;
 #ifdef ESYS_MPI
-    int id_range[2], global_id_range[2];
+    index_t id_range[2], global_id_range[2];
     id_range[0]=-min_id;
     id_range[1]=max_id;
-    MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
+    MPI_Allreduce(id_range, global_id_range, 2, MPI_DIM_T, MPI_MAX, MPIInfo->comm);
     global_min_id=-global_id_range[0];
     global_max_id=global_id_range[1];
 #else
@@ -549,7 +557,7 @@ void Mesh::resolveNodeIds()
     // (newLocalToGlobalNodeLabels) and global node labeling to the new local
     // node labeling (globalToNewLocalNodeLabels[i-min_id] is the new local id
     // of global node i)
-    int len=(max_id>=min_id) ? max_id-min_id+1 : 0;
+    index_t len=(max_id>=min_id) ? max_id-min_id+1 : 0;
 
     // mark the nodes referred by elements in usedMask
     std::vector<short> usedMask(len, -1);
@@ -557,16 +565,16 @@ void Mesh::resolveNodeIds()
 
     // create a local labeling newLocalToGlobalNodeLabels of the local nodes
     // by packing the mask usedMask
-    std::vector<int> newLocalToGlobalNodeLabels=util::packMask(usedMask);
-    const int newNumNodes=newLocalToGlobalNodeLabels.size();
+    std::vector<index_t> newLocalToGlobalNodeLabels=util::packMask(usedMask);
+    const dim_t newNumNodes=newLocalToGlobalNodeLabels.size();
     usedMask.clear();
 
     // invert the new labeling and shift the index newLocalToGlobalNodeLabels
     // to global node ids
-    std::vector<int> globalToNewLocalNodeLabels(len, -1);
+    std::vector<index_t> globalToNewLocalNodeLabels(len, -1);
 
 #pragma omp parallel for
-    for (int n=0; n<newNumNodes; n++) {
+    for (index_t n=0; n<newNumNodes; n++) {
 #ifdef BOUNDS_CHECK
         if (newLocalToGlobalNodeLabels[n] >= len || newLocalToGlobalNodeLabels[n] < 0) {
             printf("BOUNDS_CHECK %s %d n=%d\n", __FILE__, __LINE__, n);
diff --git a/finley/src/Mesh.h b/finley/src/Mesh.h
index 135ee4a..eaaa367 100644
--- a/finley/src/Mesh.h
+++ b/finley/src/Mesh.h
@@ -82,11 +82,11 @@ public:
     ~Mesh();
 
     static Mesh* load(esysUtils::JMPI& mpi_info, const std::string fname);
-    static Mesh* read(esysUtils::JMPI& mpi_info, const std::string fname, int order, int reducedOrder,
-                      bool optimize);
-    static Mesh* readGmsh(esysUtils::JMPI& mpi_info, const std::string fname, int numDim, int order,
-                          int reducedOrder, bool optimize,
-                          bool useMacroElements);
+    static Mesh* read(esysUtils::JMPI& mpi_info, const std::string fname,
+                      int order, int reducedOrder, bool optimize);
+    static Mesh* readGmsh(esysUtils::JMPI& mpi_info, const std::string fname,
+                          int numDim, int order, int reducedOrder,
+                          bool optimize, bool useMacroElements);
 
     void write(const std::string fname) const;
 
@@ -109,12 +109,12 @@ public:
 
     void prepare(bool optimize);
     void resolveNodeIds();
-    void createMappings(const std::vector<int>& dofDistribution,
-                        const std::vector<int>& nodeDistribution);
+    void createMappings(const std::vector<index_t>& dofDistribution,
+                        const std::vector<index_t>& nodeDistribution);
     void markDOFsConnectedToRange(int* mask, int offset, int marker,
-                                  int firstDOF, int lastDOF, bool useLinear);
+                                  index_t firstDOF, index_t lastDOF, bool useLinear);
     
-    void relabelElementNodes(const std::vector<int>&, int offset);
+    void relabelElementNodes(const std::vector<index_t>&, index_t offset);
 
     void glueFaces(double safetyFactor, double tolerance, bool);
     void joinFaces(double safetyFactor, double tolerance, bool);
@@ -123,20 +123,20 @@ public:
     void print();
 
 private:
-    void createColoring(const std::vector<int>& dofMap);
-    void distributeByRankOfDOF(const std::vector<int>& distribution);
+    void createColoring(const std::vector<index_t>& dofMap);
+    void distributeByRankOfDOF(const std::vector<index_t>& distribution);
     void markNodes(std::vector<short>& mask, int offset, bool useLinear);
-    void optimizeDOFDistribution(std::vector<int>& distribution);
-    void optimizeDOFLabeling(const std::vector<int>& distribution);
+    void optimizeDOFDistribution(std::vector<index_t>& distribution);
+    void optimizeDOFLabeling(const std::vector<index_t>& distribution);
     void optimizeElementOrdering();
     void setOrders();
     void updateTagList();
-    static Mesh* readGmshSlave(esysUtils::JMPI& mpi_info, const std::string fname, int numDim, int order,
-                          int reducedOrder, bool optimize,
-                          bool useMacroElements);
-    static Mesh* readGmshMaster(esysUtils::JMPI& mpi_info, const std::string fname, int numDim, int order,
-                          int reducedOrder, bool optimize,
-                          bool useMacroElements);
+    static Mesh* readGmshSlave(esysUtils::JMPI& mpi_info, const std::string fname,
+                               int numDim, int order, int reducedOrder,
+                               bool optimize, bool useMacroElements);
+    static Mesh* readGmshMaster(esysUtils::JMPI& mpi_info, const std::string fname,
+                                int numDim, int order, int reducedOrder,
+                                bool optimize, bool useMacroElements);
 
 public:
     // the name of the mesh
diff --git a/finley/src/Mesh_getPattern.cpp b/finley/src/Mesh_getPattern.cpp
index 1eb905d..f7d721f 100644
--- a/finley/src/Mesh_getPattern.cpp
+++ b/finley/src/Mesh_getPattern.cpp
@@ -82,7 +82,7 @@ paso::SystemMatrixPattern_ptr Mesh::makePattern(bool reduce_row_order, bool redu
 
     int myNumColTargets, myNumRowTargets;
     int numColTargets, numRowTargets;
-    const int *colTarget, *rowTarget;
+    const index_t *colTarget, *rowTarget;
 
     if (reduce_col_order) {
         myNumColTargets=Nodes->getNumReducedDegreesOfFreedom();
diff --git a/finley/src/Mesh_glueFaces.cpp b/finley/src/Mesh_glueFaces.cpp
index fa34164..aeba92b 100644
--- a/finley/src/Mesh_glueFaces.cpp
+++ b/finley/src/Mesh_glueFaces.cpp
@@ -58,13 +58,13 @@ void Mesh::glueFaces(double safety_factor, double tolerance, bool optimize)
     int* elem0=new int[FaceElements->numElements];
     std::vector<int> elem_mask(FaceElements->numElements, 0);
     int* matching_nodes_in_elem1=new int[FaceElements->numElements*NN];
-    std::vector<int> new_node_label(Nodes->numNodes);
+    std::vector<index_t> new_node_label(Nodes->numNodes);
     // find the matching face elements
     int numPairs;
     findMatchingFaces(safety_factor, tolerance, &numPairs, elem0, elem1,
                       matching_nodes_in_elem1);
     if (noError()) {
-        for (int n=0; n<Nodes->numNodes; n++)
+        for (index_t n=0; n<Nodes->numNodes; n++)
             new_node_label[n]=n;
         // mark matching face elements to be removed
         for (int e=0; e<numPairs; e++) {
@@ -76,27 +76,27 @@ void Mesh::glueFaces(double safety_factor, double tolerance, bool optimize)
             }
         }
         // create an index of face elements
-        int new_numFaceElements=0;
-        for (int e=0; e<FaceElements->numElements; e++) {
+        dim_t new_numFaceElements=0;
+        for (index_t e=0; e<FaceElements->numElements; e++) {
             if (elem_mask[e] < 1) {
                 elem_mask[new_numFaceElements]=e;
                 new_numFaceElements++;
             }
         }
         // get the new number of nodes
-        std::vector<int> new_node_mask(Nodes->numNodes, -1);
-        std::vector<int> new_node_list;
-        int newNumNodes=0;
-        for (int n=0; n<Nodes->numNodes; n++)
+        std::vector<index_t> new_node_mask(Nodes->numNodes, -1);
+        std::vector<index_t> new_node_list;
+        dim_t newNumNodes=0;
+        for (index_t n=0; n<Nodes->numNodes; n++)
             new_node_mask[new_node_label[n]]=1;
-        for (int n=0; n<Nodes->numNodes; n++) {
+        for (index_t n=0; n<Nodes->numNodes; n++) {
             if (new_node_mask[n]>0) {
                 new_node_mask[n]=newNumNodes;
                 new_node_list.push_back(n);
                 newNumNodes++;
             }
         }
-        for (int n=0; n<Nodes->numNodes; n++)
+        for (index_t n=0; n<Nodes->numNodes; n++)
             new_node_label[n]=new_node_mask[new_node_label[n]];
         // allocate new node and element files
         NodeFile *newNodeFile=new NodeFile(numDim, MPIInfo); 
diff --git a/finley/src/Mesh_hex20.cpp b/finley/src/Mesh_hex20.cpp
index 3ec58cd..35c0762 100644
--- a/finley/src/Mesh_hex20.cpp
+++ b/finley/src/Mesh_hex20.cpp
@@ -27,581 +27,581 @@
 *****************************************************************************/
 
 #define ESNEEDPYTHON
-#include "esysUtils/first.h"
-
+#include <esysUtils/first.h>
 
 #include "RectangularMesh.h"
 
 namespace finley {
 
-Mesh* RectangularMesh_Hex20(const int* numElements, const double* Length,
-                            const bool* periodic, int order, int reduced_order, 
+Mesh* RectangularMesh_Hex20(const dim_t* numElements, const double* Length,
+                            const bool* periodic, int order, int reduced_order,
                             bool useElementsOnFace, bool useFullElementOrder,
                             bool useMacroElements, bool optimize,
-			    esysUtils::JMPI& mpi_info) 
+                            esysUtils::JMPI& mpiInfo)
 {
-#define N_PER_E 2
-#define DIM 3
-  dim_t N0,N1,N2,NE0,NE1,NE2,i0,i1,i2,k,Nstride0=0, Nstride1=0, Nstride2=0, local_NE0, local_NE1, local_NE2;
-  dim_t totalNECount,faceNECount,NDOF0=0, NDOF1=0, NDOF2=0, NFaceElements=0, local_N0=0, local_N1=0, local_N2=0, NN;
-  index_t node0, myRank, e_offset0, e_offset1, e_offset2, offset0=0, offset1=0, offset2=0, global_i0, global_i1, global_i2;
-  const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
-  char name[50];
-  bool generateAllNodes=(useFullElementOrder || useMacroElements);
-#ifdef Finley_TRACE
-  double time0=timer();
-#endif
-
-  /* get MPI information */
-  if (!noError()) {
-        return NULL;
-  }
-  myRank=mpi_info->rank;
-
-  /* set up the global dimensions of the mesh */
-
-  NE0=MAX(1,numElements[0]);
-  NE1=MAX(1,numElements[1]);
-  NE2=MAX(1,numElements[2]);
-  N0=N_PER_E*NE0+1;
-  N1=N_PER_E*NE1+1;
-  N2=N_PER_E*NE2+1;
-
-  /*  allocate mesh: */  
-  sprintf(name,"Brick %d x %d x %d mesh",N0,N1,N2);
-  Mesh* out=new Mesh(name, DIM, mpi_info);
-
-  if (generateAllNodes) {
-     /* setError(SYSTEM_ERROR,"full element order for Hex elements is not supported yet."); */
-     if (useMacroElements) {
-          refElements.reset(new ReferenceElementSet(Hex27Macro, order, reduced_order));
-     } else {
-          refElements.reset(new ReferenceElementSet(Hex27, order, reduced_order));
-     }
-     if (useElementsOnFace) {
-         setError(SYSTEM_ERROR, "rich elements for Hex27 elements is not supported yet.");
-     } else {
-         if (useMacroElements) { 
-             refFaceElements.reset(new ReferenceElementSet(Rec9Macro, order, reduced_order));
-         } else {
-             refFaceElements.reset(new ReferenceElementSet(Rec9, order, reduced_order));
-         }
-         refContactElements.reset(new ReferenceElementSet(Rec9_Contact, order, reduced_order));
-     }
-
-  } else  {
-     refElements.reset(new ReferenceElementSet(Hex20, order, reduced_order));
-     if (useElementsOnFace) {
-         refFaceElements.reset(new ReferenceElementSet(Hex20Face, order, reduced_order));
-         refContactElements.reset(new ReferenceElementSet(Hex20Face_Contact, order, reduced_order));
-     } else {
-         refFaceElements.reset(new ReferenceElementSet(Rec8, order, reduced_order));
-         refContactElements.reset(new ReferenceElementSet(Rec8_Contact, order, reduced_order));
-
-     }
-  }
-  refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
-
-  if (noError()) {
-      out->setPoints(new ElementFile(refPoints, mpi_info));
-      out->setContactElements(new ElementFile(refContactElements, mpi_info));
-      out->setFaceElements(new ElementFile(refFaceElements, mpi_info));
-      out->setElements(new ElementFile(refElements, mpi_info));
-
-      /* work out the largest dimension */
-      if (N2==MAX3(N0,N1,N2)) {
-          Nstride0=1;
-          Nstride1=N0;
-          Nstride2=N0*N1;
-          local_NE0=NE0;
-          e_offset0=0;
-          local_NE1=NE1;
-          e_offset1=0;
-          mpi_info->split(NE2,&local_NE2,&e_offset2);
-      } else if (N1==MAX3(N0,N1,N2)) {
-          Nstride0=N2;
-          Nstride1=N0*N2;
-          Nstride2=1;
-          local_NE0=NE0;
-          e_offset0=0;
-          mpi_info->split(NE1,&local_NE1,&e_offset1);
-          local_NE2=NE2;
-          e_offset2=0;
-      } else {
-          Nstride0=N1*N2;
-          Nstride1=1;
-          Nstride2=N1;
-          mpi_info->split(NE0,&local_NE0,&e_offset0);
-          local_NE1=NE1;
-          e_offset1=0;
-          local_NE2=NE2;
-          e_offset2=0;
-      }
-      offset0=e_offset0*N_PER_E;
-      offset1=e_offset1*N_PER_E;
-      offset2=e_offset2*N_PER_E;
-      local_N0=local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
-      local_N1=local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
-      local_N2=local_NE2>0 ? local_NE2*N_PER_E+1 : 0;
-
-      /* get the number of surface elements */
-
-      NFaceElements=0;  
-      if (!periodic[2] && (local_NE2>0) ) {
-          NDOF2=N2;
-          if (offset2==0) NFaceElements+=local_NE1*local_NE0;
-          if (local_NE2+e_offset2 == NE2) NFaceElements+=local_NE1*local_NE0;
-      } else {
-          NDOF2=N2-1;
-      }
- 
-      if (!periodic[0] && (local_NE0>0) ) {
-          NDOF0=N0;
-          if (e_offset0 == 0) NFaceElements+=local_NE1*local_NE2;
-          if (local_NE0+e_offset0 == NE0) NFaceElements+=local_NE1*local_NE2;
-      } else {
-          NDOF0=N0-1;
-      }
-      if (!periodic[1] && (local_NE1>0) ) {
-          NDOF1=N1;
-          if (e_offset1 == 0) NFaceElements+=local_NE0*local_NE2;
-          if (local_NE1+e_offset1 == NE1) NFaceElements+=local_NE0*local_NE2;
-      } else {
-          NDOF1=N1-1;
-      }
-  }
-  /*  allocate tables: */
-  if (noError()) {
-      out->Nodes->allocTable(local_N0*local_N1*local_N2);
-      out->Elements->allocTable(local_NE0*local_NE1*local_NE2);
-      out->FaceElements->allocTable(NFaceElements);
-  }
-  if (noError()) {
-
-      /* create nodes */
-   
-#pragma omp parallel for private(i0,i1,i2,k,global_i0,global_i1,global_i2)
-     for (i2=0;i2<local_N2;i2++) {
-       for (i1=0;i1<local_N1;i1++) {
-         for (i0=0;i0<local_N0;i0++) {
-           k=i0+local_N0*i1+local_N0*local_N1*i2;
-           global_i0=i0+offset0;
-           global_i1=i1+offset1;
-           global_i2=i2+offset2;
-           out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
-           out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
-           out->Nodes->Coordinates[INDEX2(2,k,DIM)]=DBLE(global_i2)/DBLE(N2-1)*Length[2];
-           out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1+Nstride2*global_i2;
-           out->Nodes->Tag[k]=0;
-           out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0) 
-                                               +Nstride1*(global_i1%NDOF1) 
-                                               +Nstride2*(global_i2%NDOF2);
-         }
-       }
-     }
-     /*   set the elements: */
-     NN=out->Elements->numNodes;
-#pragma omp parallel for private(i0,i1,i2,k,node0) 
-     for (i2=0;i2<local_NE2;i2++) {
-       for (i1=0;i1<local_NE1;i1++) {
-         for (i0=0;i0<local_NE0;i0++) {
-           
-           k=i0+local_NE0*i1+local_NE0*local_NE1*i2;        
-           node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-   
-           out->Elements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+NE0*NE1*(i2+e_offset2);
-           out->Elements->Tag[k]=0;
-           out->Elements->Owner[k]=myRank;
-
-           out->Elements->Nodes[INDEX2(0,k,NN)] =node0                                 ;
-           out->Elements->Nodes[INDEX2(1,k,NN)] =node0+                      2*Nstride0;
-           out->Elements->Nodes[INDEX2(2,k,NN)] =node0+           2*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(3,k,NN)] =node0+           2*Nstride1;
-           out->Elements->Nodes[INDEX2(4,k,NN)] =node0+2*Nstride2                      ;
-           out->Elements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride2           +2*Nstride0;
-           out->Elements->Nodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(7,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-           out->Elements->Nodes[INDEX2(8,k,NN)] =node0+                      1*Nstride0;
-           out->Elements->Nodes[INDEX2(9,k,NN)] =node0+           1*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(10,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-           out->Elements->Nodes[INDEX2(11,k,NN)]=node0+           1*Nstride1           ;
-           out->Elements->Nodes[INDEX2(12,k,NN)]=node0+1*Nstride2                      ;
-           out->Elements->Nodes[INDEX2(13,k,NN)]=node0+1*Nstride2           +2*Nstride0;
-           out->Elements->Nodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(15,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-           out->Elements->Nodes[INDEX2(16,k,NN)]=node0+2*Nstride2           +1*Nstride0;
-           out->Elements->Nodes[INDEX2(17,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(18,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-           out->Elements->Nodes[INDEX2(19,k,NN)]=node0+2*Nstride2+1*Nstride1           ;
-           if (generateAllNodes) {
-              out->Elements->Nodes[INDEX2(20,k,NN)]=node0+           1*Nstride1+1*Nstride0;
-              out->Elements->Nodes[INDEX2(21,k,NN)]=node0+1*Nstride2           +1*Nstride0;
-              out->Elements->Nodes[INDEX2(22,k,NN)]=node0+1*Nstride2+1*Nstride1+2*Nstride0;
-              out->Elements->Nodes[INDEX2(23,k,NN)]=node0+1*Nstride2+2*Nstride1+1*Nstride0;
-              out->Elements->Nodes[INDEX2(24,k,NN)]=node0+1*Nstride2+1*Nstride1           ;
-              out->Elements->Nodes[INDEX2(25,k,NN)]=node0+2*Nstride2+1*Nstride1+1*Nstride0;
-              out->Elements->Nodes[INDEX2(26,k,NN)]=node0+1*Nstride2+1*Nstride1+1*Nstride0;        
-           }
-         }
-       }
-     }
-     /* face elements */
-     NN=out->FaceElements->numNodes;
-     totalNECount=NE0*NE1*NE2;
-     faceNECount=0;
-     /*   these are the quadrilateral elements on boundary 1 (x3=0): */
-     if (!periodic[2] && (local_NE2>0)) {
-       /* **  elements on boundary 100 (x3=0): */
-       if (offset2==0) {
-#pragma omp parallel for private(i0,i1,k,node0) 
-          for (i1=0;i1<local_NE1;i1++) {
-            for (i0=0;i0<local_NE0;i0++) {
-           
-              k=i0+local_NE0*i1+faceNECount;
-              node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1);
-     
-              out->FaceElements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+totalNECount;
-              out->FaceElements->Tag[k]=100;
-              out->FaceElements->Owner[k]=myRank;
-        
-              if  (useElementsOnFace) {
-                 out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0                                 ;
-                 out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0           +2*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0           +2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+           2*Nstride0           ;
-                 out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0+2*Nstride2                      ;
-                 out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+2*Nstride2           +2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+           1*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(9,k,NN)] =node0+           2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+                      1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+1*Nstride2                      ;
-                 out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+1*Nstride2           +2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+2*Nstride2+1*Nstride1;
-                 out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+2*Nstride2           +1*Nstride0;
-              } else {
-                 out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0                                 ;
-                 out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0+           2*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0+           2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+                      2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0+           1*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+           2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+           1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+                      1*Nstride0;
-                 if (generateAllNodes){
-                    out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+           1*Nstride1+1*Nstride0;
-                 }
-              }
+    const int N_PER_E = 2;
+    const int DIM = 3;
+    dim_t Nstride0=0, Nstride1=0, Nstride2=0, local_NE0, local_NE1, local_NE2;
+    index_t e_offset0, e_offset1, e_offset2;
+
+    const Esys_MPI_rank myRank = mpiInfo->rank;
+
+    // set up the global dimensions of the mesh
+    const dim_t NE0 = std::max(dim_t(1),numElements[0]);
+    const dim_t NE1 = std::max(dim_t(1),numElements[1]);
+    const dim_t NE2 = std::max(dim_t(1),numElements[2]);
+    const dim_t N0 = N_PER_E*NE0+1;
+    const dim_t N1 = N_PER_E*NE1+1;
+    const dim_t N2 = N_PER_E*NE2+1;
+
+    // allocate mesh
+    std::stringstream name;
+    name << "Brick " << N0 << " x " << N1 << " x " << N2;
+    Mesh* out = new Mesh(name.str(), DIM, mpiInfo);
+
+    const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
+    bool generateAllNodes=(useFullElementOrder || useMacroElements);
+
+    if (generateAllNodes) {
+        if (useMacroElements) {
+            refElements.reset(new ReferenceElementSet(Hex27Macro, order, reduced_order));
+        } else {
+            refElements.reset(new ReferenceElementSet(Hex27, order, reduced_order));
+        }
+        if (useElementsOnFace) {
+            setError(SYSTEM_ERROR, "rich elements for Hex27 elements are not supported.");
+            delete out;
+            return NULL;
+        } else {
+            if (useMacroElements) {
+                refFaceElements.reset(new ReferenceElementSet(Rec9Macro, order, reduced_order));
+            } else {
+                refFaceElements.reset(new ReferenceElementSet(Rec9, order, reduced_order));
+            }
+            refContactElements.reset(new ReferenceElementSet(Rec9_Contact, order, reduced_order));
+        }
+    } else { // !generateAllNodes
+        refElements.reset(new ReferenceElementSet(Hex20, order, reduced_order));
+        if (useElementsOnFace) {
+            refFaceElements.reset(new ReferenceElementSet(Hex20Face, order, reduced_order));
+            refContactElements.reset(new ReferenceElementSet(Hex20Face_Contact, order, reduced_order));
+        } else {
+            refFaceElements.reset(new ReferenceElementSet(Rec8, order, reduced_order));
+            refContactElements.reset(new ReferenceElementSet(Rec8_Contact, order, reduced_order));
+        }
+    }
+    refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
+
+    out->setPoints(new ElementFile(refPoints, mpiInfo));
+    out->setContactElements(new ElementFile(refContactElements, mpiInfo));
+    out->setFaceElements(new ElementFile(refFaceElements, mpiInfo));
+    out->setElements(new ElementFile(refElements, mpiInfo));
+
+    // work out the largest dimension
+    if (N2==MAX3(N0,N1,N2)) {
+        Nstride0 = 1;
+        Nstride1 = N0;
+        Nstride2 = N0*N1;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        local_NE1 = NE1;
+        e_offset1 = 0;
+        mpiInfo->split(NE2, &local_NE2, &e_offset2);
+    } else if (N1==MAX3(N0,N1,N2)) {
+        Nstride0 = N2;
+        Nstride1 = N0*N2;
+        Nstride2 = 1;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        mpiInfo->split(NE1, &local_NE1, &e_offset1);
+        local_NE2 = NE2;
+        e_offset2 = 0;
+    } else {
+        Nstride0 = N1*N2;
+        Nstride1 = 1;
+        Nstride2 = N1;
+        mpiInfo->split(NE0, &local_NE0, &e_offset0);
+        local_NE1 = NE1;
+        e_offset1 = 0;
+        local_NE2 = NE2;
+        e_offset2 = 0;
+    }
+    const index_t offset0 = e_offset0*N_PER_E;
+    const index_t offset1 = e_offset1*N_PER_E;
+    const index_t offset2 = e_offset2*N_PER_E;
+    const dim_t local_N0 = local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
+    const dim_t local_N1 = local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
+    const dim_t local_N2 = local_NE2>0 ? local_NE2*N_PER_E+1 : 0;
+    dim_t NDOF0=0, NDOF1=0, NDOF2=0;
+
+    // get the number of surface elements
+    dim_t NFaceElements = 0;
+    if (!periodic[2] && local_NE2>0) {
+        NDOF2=N2;
+        if (offset2==0) NFaceElements+=local_NE1*local_NE0;
+        if (local_NE2+e_offset2 == NE2) NFaceElements+=local_NE1*local_NE0;
+    } else {
+        NDOF2=N2-1;
+    }
+
+    if (!periodic[0] && local_NE0>0) {
+        NDOF0=N0;
+        if (e_offset0 == 0) NFaceElements+=local_NE1*local_NE2;
+        if (local_NE0+e_offset0 == NE0) NFaceElements+=local_NE1*local_NE2;
+    } else {
+        NDOF0=N0-1;
+    }
+    if (!periodic[1] && local_NE1>0) {
+        NDOF1=N1;
+        if (e_offset1 == 0) NFaceElements+=local_NE0*local_NE2;
+        if (local_NE1+e_offset1 == NE1) NFaceElements+=local_NE0*local_NE2;
+    } else {
+        NDOF1=N1-1;
+    }
+
+    // allocate tables
+    out->Nodes->allocTable(local_N0*local_N1*local_N2);
+    out->Elements->allocTable(local_NE0*local_NE1*local_NE2);
+    out->FaceElements->allocTable(NFaceElements);
+
+    // create nodes
+#pragma omp parallel for
+    for (index_t i2=0; i2<local_N2; i2++) {
+        for (index_t i1=0; i1<local_N1; i1++) {
+            for (index_t i0=0; i0<local_N0; i0++) {
+                const dim_t k = i0+local_N0*i1+local_N0*local_N1*i2;
+                const index_t global_i0 = i0+offset0;
+                const index_t global_i1 = i1+offset1;
+                const index_t global_i2 = i2+offset2;
+                out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
+                out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
+                out->Nodes->Coordinates[INDEX2(2,k,DIM)]=DBLE(global_i2)/DBLE(N2-1)*Length[2];
+                out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1+Nstride2*global_i2;
+                out->Nodes->Tag[k]=0;
+                out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0)
+                                                +Nstride1*(global_i1%NDOF1)
+                                                +Nstride2*(global_i2%NDOF2);
             }
-          }
-          faceNECount+=local_NE1*local_NE0;
-       }
-       totalNECount+=NE1*NE0;
-       /* **  elements on boundary 200 (x3=1) */
-       if (local_NE2+e_offset2 == NE2) {
-#pragma omp parallel for private(i0,i1,k,node0) 
-          for (i1=0;i1<local_NE1;i1++) {
-            for (i0=0;i0<local_NE0;i0++) {
-      
-              k=i0+local_NE0*i1+faceNECount;
-              node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(NE2-1);
-        
-              out->FaceElements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+totalNECount;
-              out->FaceElements->Tag[k]=200;
-              out->FaceElements->Owner[k]=myRank;
-              if  (useElementsOnFace) {
-                 out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0+2*Nstride2                      ;
-                 out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0+2*Nstride2+           2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-      
-                 out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0                                 ;
-                 out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride0                      ;
-                 out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+           2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+           2*Nstride1;
-      
-                 out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+2*Nstride2+           1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(9,k,NN)] =node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+2*Nstride2+1*Nstride1           ;
-      
-                 out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+1*Nstride2;
-                 out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+1*Nstride2           +2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-      
-                 out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+                      1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+           1*Nstride1           ;
-      
-              } else {
-                 out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0+2*Nstride2                      ;
-                 out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0+2*Nstride2           +2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-                 out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0+2*Nstride2           +1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                 out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+2*Nstride2+1*Nstride1           ;
-                 if (generateAllNodes){
-                 out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+2*Nstride2+1*Nstride1+1*Nstride0;
-                 }
-              }
+        }
+    }
+
+    // set the elements
+    dim_t NN = out->Elements->numNodes;
+    index_t* eNodes = out->Elements->Nodes;
+#pragma omp parallel for
+    for (index_t i2=0; i2<local_NE2; i2++) {
+        for (index_t i1=0; i1<local_NE1; i1++) {
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+local_NE0*i1+local_NE0*local_NE1*i2;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                    + Nstride1*N_PER_E*(i1+e_offset1)
+                                    + Nstride2*N_PER_E*(i2+e_offset2);
+
+                out->Elements->Id[k] = (i0+e_offset0)
+                                     + NE0*(i1+e_offset1)
+                                     + NE0*NE1*(i2+e_offset2);
+                out->Elements->Tag[k]=0;
+                out->Elements->Owner[k]=myRank;
+
+                eNodes[INDEX2(0,k,NN)] =node0;
+                eNodes[INDEX2(1,k,NN)] =node0+                      2*Nstride0;
+                eNodes[INDEX2(2,k,NN)] =node0+           2*Nstride1+2*Nstride0;
+                eNodes[INDEX2(3,k,NN)] =node0+           2*Nstride1;
+                eNodes[INDEX2(4,k,NN)] =node0+2*Nstride2;
+                eNodes[INDEX2(5,k,NN)] =node0+2*Nstride2           +2*Nstride0;
+                eNodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                eNodes[INDEX2(7,k,NN)] =node0+2*Nstride2+2*Nstride1;
+                eNodes[INDEX2(8,k,NN)] =node0+                      1*Nstride0;
+                eNodes[INDEX2(9,k,NN)] =node0+           1*Nstride1+2*Nstride0;
+                eNodes[INDEX2(10,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+                eNodes[INDEX2(11,k,NN)]=node0+           1*Nstride1;
+                eNodes[INDEX2(12,k,NN)]=node0+1*Nstride2;
+                eNodes[INDEX2(13,k,NN)]=node0+1*Nstride2           +2*Nstride0;
+                eNodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                eNodes[INDEX2(15,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                eNodes[INDEX2(16,k,NN)]=node0+2*Nstride2           +1*Nstride0;
+                eNodes[INDEX2(17,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                eNodes[INDEX2(18,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                eNodes[INDEX2(19,k,NN)]=node0+2*Nstride2+1*Nstride1;
+                if (generateAllNodes) {
+                    eNodes[INDEX2(20,k,NN)]=node0+           1*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(21,k,NN)]=node0+1*Nstride2           +1*Nstride0;
+                    eNodes[INDEX2(22,k,NN)]=node0+1*Nstride2+1*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(23,k,NN)]=node0+1*Nstride2+2*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(24,k,NN)]=node0+1*Nstride2+1*Nstride1;
+                    eNodes[INDEX2(25,k,NN)]=node0+2*Nstride2+1*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(26,k,NN)]=node0+1*Nstride2+1*Nstride1+1*Nstride0;
+                }
             }
-          }
-          faceNECount+=local_NE1*local_NE0;
-       }
-       totalNECount+=NE1*NE0;
-     }
-     if (!periodic[0] && (local_NE0>0)) {
-        /* **  elements on boundary 001 (x1=0): */
-     
-        if (e_offset0 == 0) {
-#pragma omp parallel for private(i1,i2,k,node0) 
-           for (i2=0;i2<local_NE2;i2++) {
-             for (i1=0;i1<local_NE1;i1++) {
-      
-               k=i1+local_NE1*i2+faceNECount;
-               node0=Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-               out->FaceElements->Id[k]=(i1+e_offset1)+NE1*(i2+e_offset2)+totalNECount;
-               out->FaceElements->Tag[k]=1;
-               out->FaceElements->Owner[k]=myRank;
-      
-               if  (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0+2*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+2*Nstride1                      ;
-      
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0+2*Nstride0                      ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride2+2*Nstride0           ;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+2*Nstride1+2*Nstride0           ;
-      
-                  out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+1*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(9,k,NN)] =node0+2*Nstride2+1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+           1*Nstride1           ;
-      
-                  out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+                      1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+2*Nstride2           +1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+2*Nstride1+           1*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+1*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+1*Nstride1+           2*Nstride0;
-      
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)] =node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)] =node0+2*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)] =node0+           2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)] =node0+1*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)] =node0+2*Nstride2+1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)] =node0+1*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)] =node0+           1*Nstride1           ;
-                 if (generateAllNodes){
-                    out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+1*Nstride2+1*Nstride1           ;
-                 }
-               }
-             }
-           }
-           faceNECount+=local_NE1*local_NE2;
         }
+    }
+
+    // face elements
+    NN=out->FaceElements->numNodes;
+    dim_t totalNECount=NE0*NE1*NE2;
+    dim_t faceNECount = 0;
+    eNodes = out->FaceElements->Nodes;
+
+    // these are the quadrilateral elements on boundary 1 (x3=0):
+    if (!periodic[2] && local_NE2>0) {
+        // **  elements on boundary 100 (x3=0):
+        if (offset2==0) {
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i1+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(i1+e_offset1);
+
+                    out->FaceElements->Id[k] = (i0+e_offset0)
+                                             + NE0*(i1+e_offset1)+totalNECount;
+                    out->FaceElements->Tag[k]=100;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)] =node0;
+                        eNodes[INDEX2(1,k,NN)] =node0           +2*Nstride1;
+                        eNodes[INDEX2(2,k,NN)] =node0           +2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)] =node0+           2*Nstride0;
+                        eNodes[INDEX2(4,k,NN)] =node0+2*Nstride2;
+                        eNodes[INDEX2(5,k,NN)] =node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)] =node0+2*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(8,k,NN)] =node0+           1*Nstride1;
+                        eNodes[INDEX2(9,k,NN)] =node0+           2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(10,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(11,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(12,k,NN)]=node0+1*Nstride2;
+                        eNodes[INDEX2(13,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+1*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(16,k,NN)]=node0+2*Nstride2+1*Nstride1;
+                        eNodes[INDEX2(17,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(18,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(19,k,NN)]=node0+2*Nstride2           +1*Nstride0;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)] =node0;
+                        eNodes[INDEX2(1,k,NN)] =node0+           2*Nstride1;
+                        eNodes[INDEX2(2,k,NN)] =node0+           2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)] =node0+                      2*Nstride0;
+                        eNodes[INDEX2(4,k,NN)] =node0+           1*Nstride1;
+                        eNodes[INDEX2(5,k,NN)] =node0+           2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(6,k,NN)] =node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)] =node0+                      1*Nstride0;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)]=node0+        1*Nstride1+1*Nstride0;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE1*local_NE0;
+        }
+        totalNECount+=NE1*NE0;
+
+        // **  elements on boundary 200 (x3=1):
+        if (local_NE2+e_offset2 == NE2) {
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i1+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(NE2-1);
+
+                    out->FaceElements->Id[k] = (i0+e_offset0)
+                                             + NE0*(i1+e_offset1)+totalNECount;
+                    out->FaceElements->Tag[k]=200;
+                    out->FaceElements->Owner[k]=myRank;
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)] =node0+2*Nstride2;
+                        eNodes[INDEX2(1,k,NN)] =node0+2*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)] =node0+2*Nstride2+2*Nstride1;
+
+                        eNodes[INDEX2(4,k,NN)] =node0;
+                        eNodes[INDEX2(5,k,NN)] =node0+2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)] =node0+           2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)] =node0+           2*Nstride1;
+
+                        eNodes[INDEX2(8,k,NN)] =node0+2*Nstride2+           1*Nstride0;
+                        eNodes[INDEX2(9,k,NN)] =node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(10,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(11,k,NN)]=node0+2*Nstride2+1*Nstride1;
+
+                        eNodes[INDEX2(12,k,NN)]=node0+1*Nstride2;
+                        eNodes[INDEX2(13,k,NN)]=node0+1*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(14,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+1*Nstride2+2*Nstride1;
+
+                        eNodes[INDEX2(16,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(17,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(18,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(19,k,NN)]=node0+           1*Nstride1;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)] =node0+2*Nstride2;
+                        eNodes[INDEX2(1,k,NN)] =node0+2*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)] =node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(4,k,NN)] =node0+2*Nstride2           +1*Nstride0;
+                        eNodes[INDEX2(5,k,NN)] =node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(7,k,NN)] =node0+2*Nstride2+1*Nstride1;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)]=node0+2*Nstride2+1*Nstride1+1*Nstride0;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE1*local_NE0;
+        }
+        totalNECount+=NE1*NE0;
+    } // !periodic[2] && local_NE2>0
+
+    if (!periodic[0] && local_NE0>0) {
+        // **  elements on boundary 001 (x1=0):
+        if (e_offset0 == 0) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i1=0; i1<local_NE1; i1++) {
+                    const dim_t k = i1+local_NE1*i2+faceNECount;
+                    const index_t node0 = Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+                    out->FaceElements->Id[k] = (i1+e_offset1)
+                                             + NE1*(i2+e_offset2)+totalNECount;
+                    out->FaceElements->Tag[k]=1;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)] =node0;
+                        eNodes[INDEX2(1,k,NN)] =node0+2*Nstride2;
+                        eNodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(3,k,NN)] =node0+2*Nstride1;
+
+                        eNodes[INDEX2(4,k,NN)] =node0+2*Nstride0;
+                        eNodes[INDEX2(5,k,NN)] =node0+2*Nstride2+2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)] =node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)] =node0+2*Nstride1+2*Nstride0;
+
+                        eNodes[INDEX2(8,k,NN)] =node0+1*Nstride2;
+                        eNodes[INDEX2(9,k,NN)] =node0+2*Nstride2+1*Nstride1;
+                        eNodes[INDEX2(10,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(11,k,NN)]=node0+           1*Nstride1;
+
+                        eNodes[INDEX2(12,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(13,k,NN)]=node0+2*Nstride2           +1*Nstride0;
+                        eNodes[INDEX2(14,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+2*Nstride1+           1*Nstride0;
+
+                        eNodes[INDEX2(16,k,NN)]=node0+1*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(17,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(18,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(19,k,NN)]=node0+1*Nstride1+           2*Nstride0;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)] =node0;
+                        eNodes[INDEX2(1,k,NN)] =node0+2*Nstride2;
+                        eNodes[INDEX2(2,k,NN)] =node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(3,k,NN)] =node0+           2*Nstride1;
+                        eNodes[INDEX2(4,k,NN)] =node0+1*Nstride2;
+                        eNodes[INDEX2(5,k,NN)] =node0+2*Nstride2+1*Nstride1;
+                        eNodes[INDEX2(6,k,NN)] =node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(7,k,NN)] =node0+           1*Nstride1;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)] =node0+1*Nstride2+1*Nstride1;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE1*local_NE2;
+        } // e_offset0 == 0
         totalNECount+=NE1*NE2;
-     
-        /* **  elements on boundary 002 (x1=1): */
+
+        // **  elements on boundary 002 (x1=1):
         if (local_NE0+e_offset0 == NE0) {
-#pragma omp parallel for private(i1,i2,k,node0) 
-           for (i2=0;i2<local_NE2;i2++) {
-             for (i1=0;i1<local_NE1;i1++) {
-               k=i1+local_NE1*i2+faceNECount;
-               node0=Nstride0*N_PER_E*(NE0-1)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-               out->FaceElements->Id[k]=(i1+e_offset1)+NE1*(i2+e_offset2)+totalNECount;
-               out->FaceElements->Tag[k]=2;
-               out->FaceElements->Owner[k]=myRank;
-   
-               if  (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+                      2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+           2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride2+           2*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+           2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+2*Nstride2                      ;
-      
-                  out->FaceElements->Nodes[INDEX2(8,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(9,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+1*Nstride2+           2*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+                      1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+2*Nstride2+           1*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+           1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+2*Nstride2+1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+1*Nstride2                      ;
-      
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0                      +2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+           2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+1*Nstride2           +2*Nstride0;
-                 if (generateAllNodes){
-                    out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+1*Nstride2+1*Nstride1+2*Nstride0;
-                 }
-               }
-         
-             }
-           }
-           faceNECount+=local_NE1*local_NE2;
-         }
-         totalNECount+=NE1*NE2;
-     }
-     if (!periodic[1] && (local_NE1>0)) {
-        /* **  elements on boundary 010 (x2=0): */
-        if (e_offset1 == 0) {
-#pragma omp parallel for private(i0,i2,k,node0) 
-           for (i2=0;i2<local_NE2;i2++) {
-             for (i0=0;i0<local_NE0;i0++) {
-               k=i0+local_NE0*i2+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride2*N_PER_E*(i2+e_offset2);
-         
-               out->FaceElements->Id[k]=(i2+e_offset2)+NE2*(e_offset0+i0)+totalNECount;
-               out->FaceElements->Tag[k]=10;
-               out->FaceElements->Owner[k]=myRank;
-               if  (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+                      2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2           +2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride2                      ;
-      
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+           2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+2*Nstride1+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+2*Nstride2+2*Nstride1           ;
-      
-                  out->FaceElements->Nodes[INDEX2(8,k,NN)]=node0+                      1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(9,k,NN)]=node0+1*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+2*Nstride2+           1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+1*Nstride2                      ;
-      
-                  out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+           1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+2*Nstride2+1*Nstride1           ;
-   
-                  out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-      
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+                      2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+                      1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+1*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride2+           1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+1*Nstride2                      ;
-                 if (generateAllNodes){
-                    out->FaceElements->Nodes[INDEX2(8,k,NN)] =node0+1*Nstride2+         1*Nstride0;
-                 }
-               }
-             }
-           }
-           faceNECount+=local_NE0*local_NE2;
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i1=0; i1<local_NE1; i1++) {
+                    const dim_t k = i1+local_NE1*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(NE0-1)
+                                        + Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+                    out->FaceElements->Id[k] = (i1+e_offset1)
+                                             + NE1*(i2+e_offset2)+totalNECount;
+                    out->FaceElements->Tag[k]=2;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0+                      2*Nstride0;
+                        eNodes[INDEX2(1,k,NN)]=node0+           2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+2*Nstride2+           2*Nstride0;
+
+                        eNodes[INDEX2(4,k,NN)]=node0;
+                        eNodes[INDEX2(5,k,NN)]=node0+           2*Nstride1;
+                        eNodes[INDEX2(6,k,NN)]=node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(7,k,NN)]=node0+2*Nstride2;
+
+                        eNodes[INDEX2(8,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(9,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(10,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(11,k,NN)]=node0+1*Nstride2+           2*Nstride0;
+
+                        eNodes[INDEX2(12,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(13,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(14,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+2*Nstride2+           1*Nstride0;
+
+                        eNodes[INDEX2(16,k,NN)]=node0+           1*Nstride1;
+                        eNodes[INDEX2(17,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(18,k,NN)]=node0+2*Nstride2+1*Nstride1;
+                        eNodes[INDEX2(19,k,NN)]=node0+1*Nstride2;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)]=node0                      +2*Nstride0;
+                        eNodes[INDEX2(1,k,NN)]=node0+           2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+2*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(4,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(5,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+1*Nstride2           +2*Nstride0;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)]=node0+1*Nstride2+1*Nstride1+2*Nstride0;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE1*local_NE2;
         }
+        totalNECount+=NE1*NE2;
+    } // !periodic[0] && local_NE0>0
+
+    if (!periodic[1] && local_NE1>0) {
+        // **  elements on boundary 010 (x2=0):
+        if (e_offset1 == 0) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+
+                    out->FaceElements->Id[k] = (i2+e_offset2)
+                                             + NE2*(e_offset0+i0)+totalNECount;
+                    out->FaceElements->Tag[k]=10;
+                    out->FaceElements->Owner[k]=myRank;
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+                      2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+2*Nstride2;
+
+                        eNodes[INDEX2(4,k,NN)]=node0+           2*Nstride1;
+                        eNodes[INDEX2(5,k,NN)]=node0+2*Nstride1+           2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+2*Nstride2+2*Nstride1;
+
+                        eNodes[INDEX2(8,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(9,k,NN)]=node0+1*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(10,k,NN)]=node0+2*Nstride2+           1*Nstride0;
+                        eNodes[INDEX2(11,k,NN)]=node0+1*Nstride2;
+
+                        eNodes[INDEX2(12,k,NN)]=node0+           1*Nstride1;
+                        eNodes[INDEX2(13,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(14,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+2*Nstride2+1*Nstride1;
+
+                        eNodes[INDEX2(16,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(17,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(18,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(19,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+                      2*Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+2*Nstride2;
+                        eNodes[INDEX2(4,k,NN)]=node0+                      1*Nstride0;
+                        eNodes[INDEX2(5,k,NN)]=node0+1*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+2*Nstride2+           1*Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+1*Nstride2;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)]=node0+1*Nstride2+         1*Nstride0;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE0*local_NE2;
+        } // e_offset1==0
         totalNECount+=NE0*NE2;
-        /* **  elements on boundary 020 (x2=1): */
+
+        // **  elements on boundary 020 (x2=1):
         if (local_NE1+e_offset1 == NE1) {
-#pragma omp parallel for private(i0,i2,k,node0) 
-           for (i2=0;i2<local_NE2;i2++) {
-             for (i0=0;i0<local_NE0;i0++) {
-               k=i0+local_NE0*i2+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(NE1-1)+Nstride2*N_PER_E*(i2+e_offset2);
-   
-               out->FaceElements->Id[k]=(i2+e_offset2)+NE2*(i0+e_offset0)+totalNECount;
-               out->FaceElements->Tag[k]=20;
-               out->FaceElements->Owner[k]=myRank;
-      
-               if  (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+           2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride1+2*Nstride0           ;
-      
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0                                 ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+2*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride2+           2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+                      2*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(8,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(9,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(10,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(11,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(12,k,NN)]=node0+           1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(13,k,NN)]=node0+2*Nstride2+1*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(14,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(15,k,NN)]=node0+           1*Nstride1+2*Nstride0;
-      
-                  out->FaceElements->Nodes[INDEX2(16,k,NN)]=node0+1*Nstride2                      ;
-                  out->FaceElements->Nodes[INDEX2(17,k,NN)]=node0+2*Nstride2           +1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(18,k,NN)]=node0+1*Nstride2           +2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(19,k,NN)]=node0+                      1*Nstride0;
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+           2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+           2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+1*Nstride2+2*Nstride1           ;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+           2*Nstride1+1*Nstride0;
-                 if (generateAllNodes){
-                    out->FaceElements->Nodes[INDEX2(8,k,NN)]=node0+1*Nstride2+2*Nstride1+1*Nstride0;
-                 }
-               }
-             }
-           }
-           faceNECount+=local_NE0*local_NE2;
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(NE1-1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+
+                    out->FaceElements->Id[k] = (i2+e_offset2)
+                                             + NE2*(i0+e_offset0)+totalNECount;
+                    out->FaceElements->Tag[k]=20;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0+           2*Nstride1;
+                        eNodes[INDEX2(1,k,NN)]=node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+2*Nstride1+2*Nstride0;
+
+                        eNodes[INDEX2(4,k,NN)]=node0;
+                        eNodes[INDEX2(5,k,NN)]=node0+2*Nstride2;
+                        eNodes[INDEX2(6,k,NN)]=node0+2*Nstride2+           2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+                      2*Nstride0;
+
+                        eNodes[INDEX2(8,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(9,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(10,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(11,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+
+                        eNodes[INDEX2(12,k,NN)]=node0+           1*Nstride1;
+                        eNodes[INDEX2(13,k,NN)]=node0+2*Nstride2+1*Nstride1;
+                        eNodes[INDEX2(14,k,NN)]=node0+2*Nstride2+1*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(15,k,NN)]=node0+           1*Nstride1+2*Nstride0;
+
+                        eNodes[INDEX2(16,k,NN)]=node0+1*Nstride2;
+                        eNodes[INDEX2(17,k,NN)]=node0+2*Nstride2           +1*Nstride0;
+                        eNodes[INDEX2(18,k,NN)]=node0+1*Nstride2           +2*Nstride0;
+                        eNodes[INDEX2(19,k,NN)]=node0+                      1*Nstride0;
+                    } else { // !useElementsOnFace
+                        eNodes[INDEX2(0,k,NN)]=node0+           2*Nstride1;
+                        eNodes[INDEX2(1,k,NN)]=node0+2*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+2*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+           2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(4,k,NN)]=node0+1*Nstride2+2*Nstride1;
+                        eNodes[INDEX2(5,k,NN)]=node0+2*Nstride2+2*Nstride1+1*Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+1*Nstride2+2*Nstride1+2*Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+           2*Nstride1+1*Nstride0;
+                        if (generateAllNodes) {
+                            eNodes[INDEX2(8,k,NN)]=node0+1*Nstride2+2*Nstride1+1*Nstride0;
+                        }
+                    }
+                }
+            }
+            faceNECount+=local_NE0*local_NE2;
         }
         totalNECount+=NE0*NE2;
-     }
-     if (noError()) {
-         /* add tag names */
-         out->addTagMap("top", 200);
-         out->addTagMap("bottom", 100);
-         out->addTagMap("left", 1);
-         out->addTagMap("right", 2);
-         out->addTagMap("front", 10);
-         out->addTagMap("back", 20);
-     }
-  }
-    // prepare mesh for further calculations
-    if (noError()) {
-        out->resolveNodeIds();
     }
+
+    // add tag names
+    out->addTagMap("top", 200);
+    out->addTagMap("bottom", 100);
+    out->addTagMap("left", 1);
+    out->addTagMap("right", 2);
+    out->addTagMap("front", 10);
+    out->addTagMap("back", 20);
+
+    // prepare mesh for further calculations
+    out->resolveNodeIds();
     if (noError()) {
         out->prepare(optimize);
     }
diff --git a/finley/src/Mesh_hex8.cpp b/finley/src/Mesh_hex8.cpp
index bb0e9ed..2967b7f 100644
--- a/finley/src/Mesh_hex8.cpp
+++ b/finley/src/Mesh_hex8.cpp
@@ -27,48 +27,38 @@
 *****************************************************************************/
 
 #define ESNEEDPYTHON
-#include "esysUtils/first.h"
-
+#include <esysUtils/first.h>
 
 #include "RectangularMesh.h"
 
 namespace finley {
 
-Mesh* RectangularMesh_Hex8(const int* numElements, const double* Length,
+Mesh* RectangularMesh_Hex8(const dim_t* numElements, const double* Length,
                            const bool* periodic, int order, int reduced_order,
                            bool useElementsOnFace, bool useFullElementOrder,
-                           bool optimize,
-			   esysUtils::JMPI& mpiInfo)
+                           bool optimize, esysUtils::JMPI& mpiInfo)
 {
     const int N_PER_E = 1;
     const int DIM = 3;
-    int i0,i1,i2,k,Nstride0=0, Nstride1=0,Nstride2=0, local_NE0, local_NE1, local_NE2, local_N0=0, local_N1=0, local_N2=0;
-    int totalNECount,faceNECount,NDOF0=0,NDOF1=0,NDOF2=0,NFaceElements=0, NN;
-    int node0, e_offset2, e_offset1, e_offset0=0, offset1=0, offset2=0, offset0=0, global_i0, global_i1, global_i2;
-    const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
-#ifdef Finley_TRACE
-    double time0=timer();
-#endif
+    dim_t Nstride0=0, Nstride1=0, Nstride2=0, local_NE0, local_NE1, local_NE2;
+    index_t e_offset0, e_offset1, e_offset2;
 
-    // get MPI information
-    if (!noError()) {
-        return NULL;
-    }
-    const int myRank=mpiInfo->rank;
+    const Esys_MPI_rank myRank = mpiInfo->rank;
 
     // set up the global dimensions of the mesh
-    int NE0=std::max(1,numElements[0]);
-    int NE1=std::max(1,numElements[1]);
-    int NE2=std::max(1,numElements[2]);
-    int N0=N_PER_E*NE0+1;
-    int N1=N_PER_E*NE1+1;
-    int N2=N_PER_E*NE2+1;
+    const dim_t NE0 = std::max(dim_t(1),numElements[0]);
+    const dim_t NE1 = std::max(dim_t(1),numElements[1]);
+    const dim_t NE2 = std::max(dim_t(1),numElements[2]);
+    const dim_t N0 = N_PER_E*NE0+1;
+    const dim_t N1 = N_PER_E*NE1+1;
+    const dim_t N2 = N_PER_E*NE2+1;
 
     // allocate mesh
     std::stringstream name;
     name << "Rectangular " << N0 << " x " << N1 << " x " << N2 << " mesh";
     Mesh* out = new Mesh(name.str(), DIM, mpiInfo);
-    refElements.reset(new ReferenceElementSet(Hex8, order, reduced_order));
+
+    const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
     if (useElementsOnFace) {
         refFaceElements.reset(new ReferenceElementSet(Hex8Face, order, reduced_order));
         refContactElements.reset(new ReferenceElementSet(Hex8Face_Contact, order, reduced_order));
@@ -76,355 +66,374 @@ Mesh* RectangularMesh_Hex8(const int* numElements, const double* Length,
         refFaceElements.reset(new ReferenceElementSet(Rec4, order, reduced_order));
         refContactElements.reset(new ReferenceElementSet(Rec4_Contact, order, reduced_order));
     }
+    refElements.reset(new ReferenceElementSet(Hex8, order, reduced_order));
     refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
 
-    if (noError()) {
-        out->setPoints(new ElementFile(refPoints, mpiInfo));
-        out->setContactElements(new ElementFile(refContactElements, mpiInfo));
-        out->setFaceElements(new ElementFile(refFaceElements, mpiInfo));
-        out->setElements(new ElementFile(refElements, mpiInfo));
-
-        // work out the largest dimension
-        if (N2==MAX3(N0,N1,N2)) {
-            Nstride0=1;
-            Nstride1=N0;
-            Nstride2=N0*N1;
-            local_NE0=NE0;
-            e_offset0=0;
-            local_NE1=NE1;
-            e_offset1=0;
-            mpiInfo->split(NE2,&local_NE2,&e_offset2);
-        } else if (N1==MAX3(N0,N1,N2)) {
-            Nstride0=N2;
-            Nstride1=N0*N2;
-            Nstride2=1;
-            local_NE0=NE0;
-            e_offset0=0;
-            mpiInfo->split(NE1,&local_NE1,&e_offset1);
-            local_NE2=NE2;
-            e_offset2=0;
-        } else {
-            Nstride0=N1*N2;
-            Nstride1=1;
-            Nstride2=N1;
-            mpiInfo->split(NE0,&local_NE0,&e_offset0);
-            local_NE1=NE1;
-            e_offset1=0;
-            local_NE2=NE2;
-            e_offset2=0;
-        }
-        offset0=e_offset0*N_PER_E;
-        offset1=e_offset1*N_PER_E;
-        offset2=e_offset2*N_PER_E;
-        local_N0=local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
-        local_N1=local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
-        local_N2=local_NE2>0 ? local_NE2*N_PER_E+1 : 0;
-
-        // get the number of surface elements
-        NFaceElements=0;
-        if (!periodic[2] && (local_NE2>0)) {
-            NDOF2=N2;
-            if (offset2==0)
-                NFaceElements+=local_NE1*local_NE0;
-            if (local_NE2+e_offset2 == NE2)
-                NFaceElements+=local_NE1*local_NE0;
-        } else {
-            NDOF2=N2-1;
-        }
-
-        if (!periodic[0] && (local_NE0>0)) {
-            NDOF0=N0;
-            if (e_offset0 == 0)
-                NFaceElements+=local_NE1*local_NE2;
-            if (local_NE0+e_offset0 == NE0)
-                NFaceElements+=local_NE1*local_NE2;
-        } else {
-            NDOF0=N0-1;
-        }
-        if (!periodic[1] && (local_NE1>0)) {
-            NDOF1=N1;
-            if (e_offset1 == 0)
-                NFaceElements+=local_NE0*local_NE2;
-            if (local_NE1+e_offset1 == NE1)
-                NFaceElements+=local_NE0*local_NE2;
-        } else {
-            NDOF1=N1-1;
-        }
+    out->setPoints(new ElementFile(refPoints, mpiInfo));
+    out->setContactElements(new ElementFile(refContactElements, mpiInfo));
+    out->setFaceElements(new ElementFile(refFaceElements, mpiInfo));
+    out->setElements(new ElementFile(refElements, mpiInfo));
+
+    // work out the largest dimension
+    if (N2==MAX3(N0,N1,N2)) {
+        Nstride0 = 1;
+        Nstride1 = N0;
+        Nstride2 = N0*N1;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        local_NE1 = NE1;
+        e_offset1 = 0;
+        mpiInfo->split(NE2, &local_NE2, &e_offset2);
+    } else if (N1==MAX3(N0,N1,N2)) {
+        Nstride0 = N2;
+        Nstride1 = N0*N2;
+        Nstride2 = 1;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        mpiInfo->split(NE1, &local_NE1, &e_offset1);
+        local_NE2 = NE2;
+        e_offset2 = 0;
+    } else {
+        Nstride0 = N1*N2;
+        Nstride1 = 1;
+        Nstride2 = N1;
+        mpiInfo->split(NE0, &local_NE0, &e_offset0);
+        local_NE1 = NE1;
+        e_offset1 = 0;
+        local_NE2 = NE2;
+        e_offset2 = 0;
+    }
+    const index_t offset0 = e_offset0*N_PER_E;
+    const index_t offset1 = e_offset1*N_PER_E;
+    const index_t offset2 = e_offset2*N_PER_E;
+    const dim_t local_N0 = local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
+    const dim_t local_N1 = local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
+    const dim_t local_N2 = local_NE2>0 ? local_NE2*N_PER_E+1 : 0;
+    dim_t NDOF0=0, NDOF1=0, NDOF2=0;
+
+    // get the number of surface elements
+    dim_t NFaceElements = 0;
+    if (!periodic[2] && local_NE2>0) {
+        NDOF2=N2;
+        if (offset2==0)
+            NFaceElements+=local_NE1*local_NE0;
+        if (local_NE2+e_offset2 == NE2)
+            NFaceElements+=local_NE1*local_NE0;
+    } else {
+        NDOF2=N2-1;
     }
 
-    // allocate tables
-    if (noError()) {
-        out->Nodes->allocTable(local_N0*local_N1*local_N2);
-        out->Elements->allocTable(local_NE0*local_NE1*local_NE2);
-        out->FaceElements->allocTable(NFaceElements);
+    if (!periodic[0] && local_NE0>0) {
+        NDOF0=N0;
+        if (e_offset0 == 0)
+            NFaceElements+=local_NE1*local_NE2;
+        if (local_NE0+e_offset0 == NE0)
+            NFaceElements+=local_NE1*local_NE2;
+    } else {
+        NDOF0=N0-1;
+    }
+    if (!periodic[1] && local_NE1>0) {
+        NDOF1=N1;
+        if (e_offset1 == 0)
+            NFaceElements+=local_NE0*local_NE2;
+        if (local_NE1+e_offset1 == NE1)
+            NFaceElements+=local_NE0*local_NE2;
+    } else {
+        NDOF1=N1-1;
     }
 
-    if (noError()) {
-        // create nodes
-#pragma omp parallel for private(i0,i1,i2,k,global_i0,global_i1,global_i2)
-        for (i2=0;i2<local_N2;i2++) {
-            for (i1=0;i1<local_N1;i1++) {
-                for (i0=0;i0<local_N0;i0++) {
-                    k=i0+local_N0*i1+local_N0*local_N1*i2;
-                    global_i0=i0+offset0;
-                    global_i1=i1+offset1;
-                    global_i2=i2+offset2;
-                    out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
-                    out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
-                    out->Nodes->Coordinates[INDEX2(2,k,DIM)]=DBLE(global_i2)/DBLE(N2-1)*Length[2];
-                    out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1+Nstride2*global_i2;
-                    out->Nodes->Tag[k]=0;
-                    out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0)
-                                               +Nstride1*(global_i1%NDOF1)
-                                               +Nstride2*(global_i2%NDOF2);
-                }
+    // allocate tables
+    out->Nodes->allocTable(local_N0*local_N1*local_N2);
+    out->Elements->allocTable(local_NE0*local_NE1*local_NE2);
+    out->FaceElements->allocTable(NFaceElements);
+
+    // create nodes
+#pragma omp parallel for
+    for (index_t i2=0; i2<local_N2; i2++) {
+        for (index_t i1=0; i1<local_N1; i1++) {
+            for (index_t i0=0; i0<local_N0; i0++) {
+                const dim_t k = i0+local_N0*i1+local_N0*local_N1*i2;
+                const index_t global_i0 = i0+offset0;
+                const index_t global_i1 = i1+offset1;
+                const index_t global_i2 = i2+offset2;
+                out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
+                out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
+                out->Nodes->Coordinates[INDEX2(2,k,DIM)]=DBLE(global_i2)/DBLE(N2-1)*Length[2];
+                out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1+Nstride2*global_i2;
+                out->Nodes->Tag[k]=0;
+                out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0)
+                                                +Nstride1*(global_i1%NDOF1)
+                                                +Nstride2*(global_i2%NDOF2);
             }
         }
+    }
 
-        // set the elements
-        NN=out->Elements->numNodes;
-#pragma omp parallel for private(i0,i1,i2,k,node0)
-        for (i2=0;i2<local_NE2;i2++) {
-            for (i1=0;i1<local_NE1;i1++) {
-                for (i0=0;i0<local_NE0;i0++) {
-                    k=i0+local_NE0*i1+local_NE0*local_NE1*i2;
-                    node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-
-                    out->Elements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+NE0*NE1*(i2+e_offset2);
-                    out->Elements->Tag[k]=0;
-                    out->Elements->Owner[k]=myRank;
-
-                    out->Elements->Nodes[INDEX2(0,k,NN)]=node0;
-                    out->Elements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-                    out->Elements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
-                    out->Elements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
-                    out->Elements->Nodes[INDEX2(4,k,NN)]=node0+Nstride2;
-                    out->Elements->Nodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride0;
-                    out->Elements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                    out->Elements->Nodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride1;
-                }
+    // set the elements
+    dim_t NN = out->Elements->numNodes;
+    index_t* eNodes = out->Elements->Nodes;
+#pragma omp parallel for
+    for (index_t i2=0; i2<local_NE2; i2++) {
+        for (index_t i1=0; i1<local_NE1; i1++) {
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+local_NE0*i1+local_NE0*local_NE1*i2;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                    + Nstride1*N_PER_E*(i1+e_offset1)
+                                    + Nstride2*N_PER_E*(i2+e_offset2);
+
+                out->Elements->Id[k] = (i0+e_offset0)
+                                     + NE0*(i1+e_offset1)
+                                     + NE0*NE1*(i2+e_offset2);
+                out->Elements->Tag[k]=0;
+                out->Elements->Owner[k]=myRank;
+
+                eNodes[INDEX2(0,k,NN)]=node0;
+                eNodes[INDEX2(1,k,NN)]=node0+Nstride0;
+                eNodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
+                eNodes[INDEX2(3,k,NN)]=node0+Nstride1;
+                eNodes[INDEX2(4,k,NN)]=node0+Nstride2;
+                eNodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride0;
+                eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                eNodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride1;
             }
         }
+    }
 
-        // face elements
-        NN=out->FaceElements->numNodes;
-        totalNECount=NE0*NE1*NE2;
-        faceNECount=0;
-        // these are the quadrilateral elements on boundary 1 (x3=0):
-        if (!periodic[2]  && (local_NE2>0)) {
-            // **  elements on boundary 100 (x3=0):
-            if (e_offset2==0) {
-#pragma omp parallel for private(i0,i1,k,node0)
-                for (i1=0;i1<local_NE1;i1++) {
-                    for (i0=0;i0<local_NE0;i0++) {
-                        k=i0+local_NE0*i1+faceNECount;
-                        node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1);
-
-                        out->FaceElements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+totalNECount;
-                        out->FaceElements->Tag[k]=100;
-                        out->FaceElements->Owner[k]=myRank;
-
-                        if (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride0;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride0;
-                        }
+    // face elements
+    NN=out->FaceElements->numNodes;
+    dim_t totalNECount=NE0*NE1*NE2;
+    dim_t faceNECount = 0;
+    eNodes = out->FaceElements->Nodes;
+
+    // these are the quadrilateral elements on boundary 1 (x3=0):
+    if (!periodic[2] && local_NE2>0) {
+        // **  elements on boundary 100 (x3=0):
+        if (e_offset2==0) {
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i1+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(i1+e_offset1);
+
+                    out->FaceElements->Id[k] = (i0+e_offset0)
+                                             + NE0*(i1+e_offset1)+totalNECount;
+                    out->FaceElements->Tag[k]=100;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(4,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride0;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride0;
                     }
                 }
-                faceNECount+=local_NE1*local_NE0;
             }
-            totalNECount+=NE1*NE0;
-
-            // **  elements on boundary 200 (x3=1):
-            if (local_NE2+e_offset2 == NE2) {
-#pragma omp parallel for private(i0,i1,k,node0)
-                for (i1=0;i1<local_NE1;i1++) {
-                    for (i0=0;i0<local_NE0;i0++) {
-                        k=i0+local_NE0*i1+faceNECount;
-                        node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(NE2-1);
-
-                        out->FaceElements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1)+totalNECount;
-                        out->FaceElements->Tag[k]=200;
-                        out->FaceElements->Owner[k]=myRank;
-                        if  (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2+         Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride1;
-
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+           Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+           Nstride1;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2         +Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride1;
-                        }
+            faceNECount+=local_NE1*local_NE0;
+        }
+        totalNECount+=NE1*NE0;
+
+        // **  elements on boundary 200 (x3=1):
+        if (local_NE2+e_offset2 == NE2) {
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i1+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(NE2-1);
+
+                    out->FaceElements->Id[k] = (i0+e_offset0)
+                                             + NE0*(i1+e_offset1)+totalNECount;
+                    out->FaceElements->Tag[k]=200;
+                    out->FaceElements->Owner[k]=myRank;
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2+         Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride1;
+
+                        eNodes[INDEX2(4,k,NN)]=node0;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+         Nstride1+Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+         Nstride1;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2         +Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride1;
                     }
                 }
-                faceNECount+=local_NE1*local_NE0;
             }
-            totalNECount+=NE1*NE0;
+            faceNECount+=local_NE1*local_NE0;
         }
-        if (!periodic[0] && (local_NE0>0)) {
-            // **  elements on boundary 001 (x1=0):
-            if (e_offset0 == 0) {
-#pragma omp parallel for private(i1,i2,k,node0)
-                for (i2=0;i2<local_NE2;i2++) {
-                    for (i1=0;i1<local_NE1;i1++) {
-                        k=i1+local_NE1*i2+faceNECount;
-                        node0=Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-                        out->FaceElements->Id[k]=(i1+e_offset1)+NE1*(i2+e_offset2)+totalNECount;
-                        out->FaceElements->Tag[k]=1;
-                        out->FaceElements->Owner[k]=myRank;
-
-                        if (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride1+Nstride0;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
-                        }
+        totalNECount+=NE1*NE0;
+    } // !periodic[2] && local_NE2>0
+
+    if (!periodic[0] && local_NE0>0) {
+        // **  elements on boundary 001 (x1=0):
+        if (e_offset0 == 0) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i1=0; i1<local_NE1; i1++) {
+                    const dim_t k = i1+local_NE1*i2+faceNECount;
+                    const index_t node0 = Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+                    out->FaceElements->Id[k] = (i1+e_offset1)
+                                             + NE1*(i2+e_offset2)+totalNECount;
+                    out->FaceElements->Tag[k]=1;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(4,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride2+Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+Nstride1+Nstride0;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride1;
                     }
                 }
-                faceNECount+=local_NE1*local_NE2;
             }
-            totalNECount+=NE1*NE2;
-            // **  elements on boundary 002 (x1=1):
-            if (local_NE0+e_offset0 == NE0) {
-#pragma omp parallel for private(i1,i2,k,node0)
-                for (i2=0;i2<local_NE2;i2++) {
-                    for (i1=0;i1<local_NE1;i1++) {
-                        k=i1+local_NE1*i2+faceNECount;
-                        node0=Nstride0*N_PER_E*(NE0-1)+Nstride1*N_PER_E*(i1+e_offset1)+Nstride2*N_PER_E*(i2+e_offset2);
-                        out->FaceElements->Id[k]=(i1+e_offset1)+NE1*(i2+e_offset2)+totalNECount;
-                        out->FaceElements->Tag[k]=2;
-                        out->FaceElements->Owner[k]=myRank;
-
-                        if (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride0;
-
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride2;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride0;
-                        }
+            faceNECount+=local_NE1*local_NE2;
+        }
+        totalNECount+=NE1*NE2;
+
+        // **  elements on boundary 002 (x1=1):
+        if (local_NE0+e_offset0 == NE0) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i1=0; i1<local_NE1; i1++) {
+                    const dim_t k = i1+local_NE1*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(NE0-1)
+                                        + Nstride1*N_PER_E*(i1+e_offset1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+                    out->FaceElements->Id[k] = (i1+e_offset1)
+                                             + NE1*(i2+e_offset2)+totalNECount;
+                    out->FaceElements->Tag[k]=2;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride0;
+
+                        eNodes[INDEX2(4,k,NN)]=node0;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(7,k,NN)]=node0+Nstride2;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2+Nstride0;
                     }
                 }
-                faceNECount+=local_NE1*local_NE2;
             }
-            totalNECount+=NE1*NE2;
+            faceNECount+=local_NE1*local_NE2;
         }
-        if (!periodic[1] && (local_NE1>0)) {
-            // **  elements on boundary 010 (x2=0):
-            if (e_offset1 == 0) {
-#pragma omp parallel for private(i0,i2,k,node0)
-                for (i2=0;i2<local_NE2;i2++) {
-                    for (i0=0;i0<local_NE0;i0++) {
-                        k=i0+local_NE0*i2+faceNECount;
-                        node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride2*N_PER_E*(i2+e_offset2);
-
-                        out->FaceElements->Id[k]=(i2+e_offset2)+NE2*(e_offset0+i0)+totalNECount;
-                        out->FaceElements->Tag[k]=10;
-                        out->FaceElements->Owner[k]=myRank;
-                        if (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2;
-
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride1;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride2;
-                        }
+        totalNECount+=NE1*NE2;
+    } // !periodic[0] && local_NE0>0
+
+    if (!periodic[1] && local_NE1>0) {
+        // **  elements on boundary 010 (x2=0):
+        if (e_offset1 == 0) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+
+                    out->FaceElements->Id[k] = (i2+e_offset2)
+                                             + NE2*(e_offset0+i0)+totalNECount;
+                    out->FaceElements->Tag[k]=10;
+                    out->FaceElements->Owner[k]=myRank;
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2;
+
+                        eNodes[INDEX2(4,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride1+Nstride0;
+                        eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+Nstride2+Nstride1;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride0;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride2;
                     }
                 }
-                faceNECount+=local_NE0*local_NE2;
             }
-            totalNECount+=NE0*NE2;
-            // **  elements on boundary 020 (x2=1):
-            if (local_NE1+e_offset1 == NE1) {
-#pragma omp parallel for private(i0,i2,k,node0)
-                for (i2=0;i2<local_NE2;i2++) {
-                    for (i0=0;i0<local_NE0;i0++) {
-                        k=i0+local_NE0*i2+faceNECount;
-                        node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(NE1-1)+Nstride2*N_PER_E*(i2+e_offset2);
-
-                        out->FaceElements->Id[k]=(i2+e_offset2)+NE2*(i0+e_offset0)+totalNECount;
-                        out->FaceElements->Tag[k]=20;
-                        out->FaceElements->Owner[k]=myRank;
-
-                        if  (useElementsOnFace) {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
-
-                            out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0;
-                            out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride2;
-                            out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride0;
-                        } else {
-                            out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride2+Nstride1;
-                            out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
-                            out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
-                        }
+            faceNECount+=local_NE0*local_NE2;
+        }
+        totalNECount+=NE0*NE2;
+
+        // **  elements on boundary 020 (x2=1):
+        if (local_NE1+e_offset1 == NE1) {
+#pragma omp parallel for
+            for (index_t i2=0; i2<local_NE2; i2++) {
+                for (index_t i0=0; i0<local_NE0; i0++) {
+                    const dim_t k = i0+local_NE0*i2+faceNECount;
+                    const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                        + Nstride1*N_PER_E*(NE1-1)
+                                        + Nstride2*N_PER_E*(i2+e_offset2);
+
+                    out->FaceElements->Id[k] = (i2+e_offset2)
+                                             + NE2*(i0+e_offset0)+totalNECount;
+                    out->FaceElements->Tag[k]=20;
+                    out->FaceElements->Owner[k]=myRank;
+
+                    if (useElementsOnFace) {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
+
+                        eNodes[INDEX2(4,k,NN)]=node0;
+                        eNodes[INDEX2(5,k,NN)]=node0+Nstride2;
+                        eNodes[INDEX2(6,k,NN)]=node0+Nstride2+Nstride0;
+                        eNodes[INDEX2(7,k,NN)]=node0+Nstride0;
+                    } else {
+                        eNodes[INDEX2(0,k,NN)]=node0+Nstride1;
+                        eNodes[INDEX2(1,k,NN)]=node0+Nstride2+Nstride1;
+                        eNodes[INDEX2(2,k,NN)]=node0+Nstride2+Nstride1+Nstride0;
+                        eNodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
                     }
                 }
-                faceNECount+=local_NE0*local_NE2;
             }
-            totalNECount+=NE0*NE2;
+            faceNECount+=local_NE0*local_NE2;
         }
+        totalNECount+=NE0*NE2;
     }
-    if (noError()) {
-        // add tag names
-        out->addTagMap("top", 200);
-        out->addTagMap("bottom", 100);
-        out->addTagMap("left", 1);
-        out->addTagMap("right", 2);
-        out->addTagMap("front", 10);
-        out->addTagMap("back", 20);
-    }
+
+    // add tag names
+    out->addTagMap("top", 200);
+    out->addTagMap("bottom", 100);
+    out->addTagMap("left", 1);
+    out->addTagMap("right", 2);
+    out->addTagMap("front", 10);
+    out->addTagMap("back", 20);
 
     // prepare mesh for further calculations
-    if (noError()) {
-        out->resolveNodeIds();
-    }
+    out->resolveNodeIds();
     if (noError()) {
         out->prepare(optimize);
     }
diff --git a/finley/src/Mesh_optimizeDOFDistribution.cpp b/finley/src/Mesh_optimizeDOFDistribution.cpp
index 06d25bc..daeddbd 100644
--- a/finley/src/Mesh_optimizeDOFDistribution.cpp
+++ b/finley/src/Mesh_optimizeDOFDistribution.cpp
@@ -45,7 +45,7 @@ namespace finley {
 // that every rank has at least 1 vertex (at line 129 of file
 // "xyzpart.c" in parmetis 3.1.1, variable "nvtxs" would be 0 if 
 // any rank has no vertex).
-bool allRanksHaveNodes(esysUtils::JMPI& mpiInfo, const std::vector<int>& distribution)
+bool allRanksHaveNodes(esysUtils::JMPI& mpiInfo, const std::vector<index_t>& distribution)
 {
     int ret = 1;
 
@@ -70,18 +70,18 @@ bool allRanksHaveNodes(esysUtils::JMPI& mpiInfo, const std::vector<int>& distrib
 
 /****************************************************************************/
 
-void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
+void Mesh::optimizeDOFDistribution(std::vector<index_t>& distribution)
 {
     // these two are not const because of parmetis call
     int mpiSize=MPIInfo->size;
     const int myRank=MPIInfo->rank;
-    const int myFirstVertex=distribution[myRank];
-    const int myLastVertex=distribution[myRank+1];
-    const int myNumVertices=myLastVertex-myFirstVertex;
+    const index_t myFirstVertex=distribution[myRank];
+    const index_t myLastVertex=distribution[myRank+1];
+    const dim_t myNumVertices=myLastVertex-myFirstVertex;
 
     // first step is to distribute the elements according to a global X of DOF
     // len is used for the sending around of partition later on
-    int len=0;
+    index_t len=0;
     for (int p=0; p<mpiSize; ++p)
         len=std::max(len, distribution[p+1]-distribution[p]);
     std::vector<int> partition(len);
@@ -109,14 +109,14 @@ void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
         }
        
         // create the local matrix pattern
-        const int globalNumVertices=distribution[mpiSize];
+        const dim_t globalNumVertices=distribution[mpiSize];
         paso::Pattern_ptr pattern(paso::Pattern::fromIndexListArray(0,
                 myNumVertices, index_list.get(), 0, globalNumVertices, 0));
         // set the coordinates
         std::vector<real_t> xyz(myNumVertices*dim);
 #pragma omp parallel for
-        for (int i=0; i<Nodes->numNodes; ++i) {
-            const int k=Nodes->globalDegreesOfFreedom[i]-myFirstVertex;
+        for (index_t i=0; i<Nodes->numNodes; ++i) {
+            const index_t k=Nodes->globalDegreesOfFreedom[i]-myFirstVertex;
             if (k>=0 && k<myNumVertices) {
                 for (int j=0; j<dim; ++j)
                     xyz[k*dim+j]=static_cast<real_t>(Nodes->Coordinates[INDEX2(j,i,dim)]); 
@@ -138,21 +138,21 @@ void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
                               &ncon, &mpiSize, &tpwgts[0], &ubvec[0], options,
                               &edgecut, &partition[0], &MPIInfo->comm);
     } else {
-        for (int i=0; i<myNumVertices; ++i)
+        for (index_t i=0; i<myNumVertices; ++i)
             partition[i]=0; // CPU 0 owns all
     }
 #else
-    for (int i=0; i<myNumVertices; ++i)
+    for (index_t i=0; i<myNumVertices; ++i)
         partition[i]=myRank; // CPU myRank owns all
 #endif
 
     // create a new distribution and labeling of the DOF
-    std::vector<int> new_distribution(mpiSize+1, 0);
+    std::vector<index_t> new_distribution(mpiSize+1, 0);
 #pragma omp parallel
     {
         std::vector<int> loc_partition_count(mpiSize, 0);
 #pragma omp for
-        for (int i=0; i<myNumVertices; ++i)
+        for (index_t i=0; i<myNumVertices; ++i)
             loc_partition_count[partition[i]]++;
 #pragma omp critical
         {
@@ -164,19 +164,19 @@ void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
 #ifdef ESYS_MPI
     // recvbuf will be the concatenation of each CPU's contribution to
     // new_distribution
-    MPI_Allgather(&new_distribution[0], mpiSize, MPI_INT, recvbuf, mpiSize,
+    MPI_Allgather(&new_distribution[0], mpiSize, MPI_DIM_T, recvbuf, mpiSize,
                   MPI_INT, MPIInfo->comm);
 #else
     for (int i=0; i<mpiSize; ++i)
         recvbuf[i]=new_distribution[i];
 #endif
     new_distribution[0]=0;
-    std::vector<int> newGlobalDOFID(len);
+    std::vector<index_t> newGlobalDOFID(len);
     for (int rank=0; rank<mpiSize; rank++) {
         int c=0;
         for (int i=0; i<myRank; ++i)
             c+=recvbuf[rank+mpiSize*i];
-        for (int i=0; i<myNumVertices; ++i) {
+        for (index_t i=0; i<myNumVertices; ++i) {
             if (rank==partition[i]) {
                 newGlobalDOFID[i]=new_distribution[rank]+c;
                 c++;
@@ -197,11 +197,11 @@ void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
     std::vector<short> setNewDOFId(Nodes->numNodes, 1);
 
     for (int p=0; p<mpiSize; ++p) {
-        const int firstVertex=distribution[current_rank];
-        const int lastVertex=distribution[current_rank+1];
+        const index_t firstVertex=distribution[current_rank];
+        const index_t lastVertex=distribution[current_rank+1];
 #pragma omp parallel for
-        for (int i=0; i<Nodes->numNodes; ++i) {
-            const int k=Nodes->globalDegreesOfFreedom[i];
+        for (index_t i=0; i<Nodes->numNodes; ++i) {
+            const index_t k=Nodes->globalDegreesOfFreedom[i];
             if (setNewDOFId[i] && (firstVertex<=k) && (k<lastVertex)) {
                 Nodes->globalDegreesOfFreedom[i]=newGlobalDOFID[k-firstVertex];
                 setNewDOFId[i]=0;
@@ -211,7 +211,7 @@ void Mesh::optimizeDOFDistribution(std::vector<int>& distribution)
         if (p<mpiSize-1) { // the final send can be skipped
 #ifdef ESYS_MPI
             MPI_Status status;
-            MPI_Sendrecv_replace(&newGlobalDOFID[0], len, MPI_INT,
+            MPI_Sendrecv_replace(&newGlobalDOFID[0], len, MPI_DIM_T,
                                dest, MPIInfo->msg_tag_counter,
                                source, MPIInfo->msg_tag_counter,
                                MPIInfo->comm, &status);
diff --git a/finley/src/Mesh_read.cpp b/finley/src/Mesh_read.cpp
index 1c5717c..a25b709 100644
--- a/finley/src/Mesh_read.cpp
+++ b/finley/src/Mesh_read.cpp
@@ -38,8 +38,8 @@ namespace finley {
 }
 
 
-Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname, int order, int reduced_order,
-                 bool optimize)
+Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname,
+                 int order, int reduced_order, bool optimize)
 {
     int numNodes, numDim=0, numEle, i0, i1;
     const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
@@ -96,7 +96,7 @@ Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname, int order,
     if (noError()) {
         /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
         int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0,  nextCPU=1;
-        int *tempInts = new index_t[chunkSize*3+1];        /* Stores the integer message data */
+        int *tempInts = new int[chunkSize*3+1];        /* Stores the integer message data */
         double *tempCoords = new double[chunkSize*numDim]; /* Stores the double message data */
 
         /*
@@ -325,7 +325,7 @@ Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname, int order,
 
     if (noError()) {
         int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
-        int *tempInts = new index_t[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
+        int *tempInts = new int[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
         /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
         if (mpi_info->rank == 0) {  /* Master */
             for (;;) {            /* Infinite loop */
@@ -425,7 +425,7 @@ Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname, int order,
     /******************* Read the contact element data **********************/
     if (noError()) {
         int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
-        int *tempInts = new index_t[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
+        int *tempInts = new int[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
         /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
         if (mpi_info->rank == 0) {  /* Master */
             for (;;) {            /* Infinite loop */
@@ -528,7 +528,7 @@ Mesh* Mesh::read(esysUtils::JMPI& mpi_info, const std::string fname, int order,
     if (noError()) {
         int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
         // Store Id + Tag + node list (+ one int at end for chunkEle)
-        int *tempInts = new index_t[chunkSize*(2+numNodes)+1];
+        int *tempInts = new int[chunkSize*(2+numNodes)+1];
         // Elements are specified as a list of integers...only need one
         // message instead of two as with the nodes
         if (mpi_info->rank == 0) {  // Master
diff --git a/finley/src/Mesh_readGmsh.cpp b/finley/src/Mesh_readGmsh.cpp
index 9f0606d..4b8e2b7 100644
--- a/finley/src/Mesh_readGmsh.cpp
+++ b/finley/src/Mesh_readGmsh.cpp
@@ -1092,7 +1092,7 @@ Mesh* Mesh::readGmshMaster(esysUtils::JMPI& mpi_info, const std::string fname, i
                     errorFlag = EARLY_EOF;
                 int tag_info[2] = {0};
                 char *position = &line[0];
-                //skip the first int, unsure why
+                //skip the first int, it's the physical dimension
                 if (next_space(&position, 1) == NULL 
                         || sscanf(position, "%d", tag_info) != 1 
                         || next_space(&position, 1) == NULL
diff --git a/finley/src/Mesh_rec4.cpp b/finley/src/Mesh_rec4.cpp
index e8433f7..6ec1d71 100644
--- a/finley/src/Mesh_rec4.cpp
+++ b/finley/src/Mesh_rec4.cpp
@@ -32,254 +32,250 @@
 
 namespace finley {
 
-Mesh* RectangularMesh_Rec4(const int* numElements, const double* Length,
+Mesh* RectangularMesh_Rec4(const dim_t* numElements, const double* Length,
                            const bool* periodic, int order, int reduced_order,
                            bool useElementsOnFace, bool useFullElementOrder,
-                           bool optimize,
-			   esysUtils::JMPI& mpi_info)
+                           bool optimize, esysUtils::JMPI& mpiInfo)
 {
-#define N_PER_E 1
-#define DIM 2
-  int N0,N1,NE0,NE1,i0,i1,k,Nstride0=0,Nstride1=0, local_NE0, local_NE1, local_N0=0, local_N1=0, global_i0, global_i1;
-  int offset0=0, offset1=0, e_offset0=0, e_offset1=0;
-  int totalNECount,faceNECount,NDOF0=0,NDOF1=0,NFaceElements,NN;
-  const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
-  int node0, myRank;
-  char name[50];
-#ifdef Finley_TRACE
-  double time0=timer();
-#endif
-
-  /* get MPI information */
-  if (! noError()) {
-        return NULL;
-  }
-  myRank=mpi_info->rank;
-
-  /* set up the global dimensions of the mesh */
-
-  NE0=MAX(1,numElements[0]);
-  NE1=MAX(1,numElements[1]);
-  N0=N_PER_E*NE0+1;
-  N1=N_PER_E*NE1+1;
-
-  /*  allocate mesh: */
-  sprintf(name,"Rectangular %d x %d mesh",N0,N1);
-  Mesh* out=new Mesh(name, DIM, mpi_info);
-  refElements.reset(new ReferenceElementSet(Rec4, order, reduced_order));
-  if (useElementsOnFace) {
+    const int N_PER_E = 1;
+    const int DIM = 2;
+    dim_t Nstride0=0, Nstride1=0, local_NE0, local_NE1;
+    index_t e_offset0=0, e_offset1=0;
+
+    const Esys_MPI_rank myRank = mpiInfo->rank;
+
+    // set up the global dimensions of the mesh
+    const dim_t NE0 = std::max(dim_t(1),numElements[0]);
+    const dim_t NE1 = std::max(dim_t(1),numElements[1]);
+    const dim_t N0 = N_PER_E*NE0+1;
+    const dim_t N1 = N_PER_E*NE1+1;
+
+    // allocate mesh
+    std::stringstream name;
+    name << "Rectangular " << N0 << " x " << N1 << " mesh";
+    Mesh* out = new Mesh(name.str(), DIM, mpiInfo);
+
+    const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
+    if (useElementsOnFace) {
         refFaceElements.reset(new ReferenceElementSet(Rec4Face, order, reduced_order));
         refContactElements.reset(new ReferenceElementSet(Rec4Face_Contact, order, reduced_order));
-  } else {
+    } else {
         refFaceElements.reset(new ReferenceElementSet(Line2, order, reduced_order));
         refContactElements.reset(new ReferenceElementSet(Line2_Contact, order, reduced_order));
-  }
-  refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
-
-  if (noError()) {
-      out->setPoints(new ElementFile(refPoints, mpi_info));
-      out->setContactElements(new ElementFile(refContactElements, mpi_info));
-      out->setFaceElements(new ElementFile(refFaceElements, mpi_info));
-      out->setElements(new ElementFile(refElements, mpi_info));
-
-    /* work out the largest dimension */
-    if (N1==MAX(N0,N1)) {
-      Nstride0=1;
-        Nstride1=N0;
-        local_NE0=NE0;
-         e_offset0=0;
-         mpi_info->split(NE1,&local_NE1,&e_offset1);
+    }
+    refElements.reset(new ReferenceElementSet(Rec4, order, reduced_order));
+    refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
+
+    out->setPoints(new ElementFile(refPoints, mpiInfo));
+    out->setContactElements(new ElementFile(refContactElements, mpiInfo));
+    out->setFaceElements(new ElementFile(refFaceElements, mpiInfo));
+    out->setElements(new ElementFile(refElements, mpiInfo));
+
+    // work out the largest dimension
+    if (N1 == std::max(N0,N1)) {
+        Nstride0 = 1;
+        Nstride1 = N0;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        mpiInfo->split(NE1, &local_NE1, &e_offset1);
     } else {
-      Nstride0=N1;
-        Nstride1=1;
-        mpi_info->split(NE0,&local_NE0,&e_offset0);
-        local_NE1=NE1;
-         e_offset1=0;
+        Nstride0 = N1;
+        Nstride1 = 1;
+        mpiInfo->split(NE0, &local_NE0, &e_offset0);
+        local_NE1 = NE1;
+        e_offset1 = 0;
     }
-    offset0=e_offset0*N_PER_E;
-    offset1=e_offset1*N_PER_E;
-    local_N0=local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
-    local_N1=local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
-
-    /* get the number of surface elements */
-
-    NFaceElements=0;
-    if (!periodic[0] && (local_NE0>0)) {
-      NDOF0=N0;
-        if (e_offset0 == 0) NFaceElements+=local_NE1;
-         if (local_NE0+e_offset0 == NE0) NFaceElements+=local_NE1;
+    const index_t offset0 = e_offset0*N_PER_E;
+    const index_t offset1 = e_offset1*N_PER_E;
+    const dim_t local_N0 = local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
+    const dim_t local_N1 = local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
+    dim_t NDOF0=0, NDOF1=0;
+
+    // get the number of surface elements
+    dim_t NFaceElements = 0;
+    if (!periodic[0] && local_NE0>0) {
+        NDOF0=N0;
+        if (e_offset0 == 0)
+            NFaceElements+=local_NE1;
+        if (local_NE0+e_offset0 == NE0)
+            NFaceElements+=local_NE1;
     } else {
         NDOF0=N0-1;
     }
-    if (!periodic[1]  && (local_NE1>0)) {
-      NDOF1=N1;
-      if (e_offset1 == 0) NFaceElements+=local_NE0;
-      if (local_NE1+e_offset1 == NE1) NFaceElements+=local_NE0;
+
+    if (!periodic[1] && local_NE1>0) {
+        NDOF1=N1;
+        if (e_offset1 == 0)
+            NFaceElements+=local_NE0;
+        if (local_NE1+e_offset1 == NE1)
+            NFaceElements+=local_NE0;
     } else {
         NDOF1=N1-1;
     }
 
-    /*  allocate tables: */
-
-
+    // allocate tables
     out->Nodes->allocTable(local_N0*local_N1);
     out->Elements->allocTable(local_NE0*local_NE1);
     out->FaceElements->allocTable(NFaceElements);
 
-  }
-  if (noError()) {
-     /* create nodes */
-     #pragma omp parallel for private(i0,i1,k,global_i0,global_i1)
-     for (i1=0;i1<local_N1;i1++) {
-       for (i0=0;i0<local_N0;i0++) {
-           k=i0+local_N0*i1;
-           global_i0=i0+offset0;
-           global_i1=i1+offset1;
-           out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
-           out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
-           out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1;
-           out->Nodes->Tag[k]=0;
-           out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0)
-                                               +Nstride1*(global_i1%NDOF1);
-       }
-     }
-     /*   set the elements: */
-     NN=out->Elements->numNodes;
-     #pragma omp parallel for private(i0,i1,k,node0)
-     for (i1=0;i1<local_NE1;i1++) {
-         for (i0=0;i0<local_NE0;i0++) {
-
-           k=i0+local_NE0*i1;
-           node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1);
-
-           out->Elements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1);
-           out->Elements->Tag[k]=0;
-           out->Elements->Owner[k]=myRank;
-
-           out->Elements->Nodes[INDEX2(0,k,NN)]=node0;
-           out->Elements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-           out->Elements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
-           out->Elements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
-         }
-     }
-     /* face elements */
-     NN=out->FaceElements->numNodes;
-     totalNECount=NE0*NE1;
-     faceNECount=0;
-     if (!periodic[0]  && (local_NE0>0)) {
-        /* **  elements on boundary 001 (x1=0): */
+    // create nodes
+#pragma omp parallel for
+    for (index_t i1=0; i1<local_N1; i1++) {
+        for (index_t i0=0; i0<local_N0; i0++) {
+            const dim_t k = i0+local_N0*i1;
+            const index_t global_i0 = i0+offset0;
+            const index_t global_i1 = i1+offset1;
+            out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
+            out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
+            out->Nodes->Id[k] = Nstride0*global_i0 + Nstride1*global_i1;
+            out->Nodes->Tag[k]=0;
+            out->Nodes->globalDegreesOfFreedom[k] = Nstride0*(global_i0%NDOF0)
+                                                  + Nstride1*(global_i1%NDOF1);
+        }
+    }
+
+    // set the elements
+    dim_t NN = out->Elements->numNodes;
+#pragma omp parallel for
+    for (index_t i1=0; i1<local_NE1; i1++) {
+        for (index_t i0=0; i0<local_NE0; i0++) {
+            const dim_t k = i0+local_NE0*i1;
+            const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                + Nstride1*N_PER_E*(i1+e_offset1);
+
+            out->Elements->Id[k] = (i0+e_offset0) + NE0*(i1+e_offset1);
+            out->Elements->Tag[k] = 0;
+            out->Elements->Owner[k] = myRank;
+
+            out->Elements->Nodes[INDEX2(0,k,NN)]=node0;
+            out->Elements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
+            out->Elements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
+            out->Elements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
+        }
+    }
+
+    // face elements
+    NN=out->FaceElements->numNodes;
+    dim_t totalNECount=NE0*NE1;
+    dim_t faceNECount = 0;
+    index_t* eNodes = out->FaceElements->Nodes;
 
+    if (!periodic[0] && local_NE0>0) {
+        // **  elements on boundary 001 (x1=0):
         if (e_offset0 == 0) {
-#pragma omp parallel for private(i1,k,node0)
-           for (i1=0;i1<local_NE1;i1++) {
-
-               k=i1+faceNECount;
-               node0=Nstride1*N_PER_E*(i1+e_offset1);
-
-               out->FaceElements->Id[k]=i1+e_offset1+totalNECount;
-               out->FaceElements->Tag[k]=1;
-               out->FaceElements->Owner[k]=myRank;
-               if (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0;
-               }
-           }
-           faceNECount+=local_NE1;
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                const dim_t k = i1+faceNECount;
+                const index_t node0 = Nstride1*N_PER_E*(i1+e_offset1);
+
+                out->FaceElements->Id[k] = i1+e_offset1+totalNECount;
+                out->FaceElements->Tag[k] = 1;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(1,k,NN)]=node0;
+                    eNodes[INDEX2(2,k,NN)]=node0+Nstride0;
+                    eNodes[INDEX2(3,k,NN)]=node0+Nstride1+Nstride0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(1,k,NN)]=node0;
+                }
+            }
+            faceNECount+=local_NE1;
         }
         totalNECount+=NE1;
-        /* **  elements on boundary 002 (x1=1): */
+
+        // **  elements on boundary 002 (x1=1):
         if (local_NE0+e_offset0 == NE0) {
-#pragma omp parallel for private(i1,k,node0)
-           for (i1=0;i1<local_NE1;i1++) {
-               k=i1+faceNECount;
-               node0=Nstride0*N_PER_E*(NE0-1)+Nstride1*N_PER_E*(i1+e_offset1);
-
-               out->FaceElements->Id[k]=(i1+e_offset1)+totalNECount;
-               out->FaceElements->Tag[k]=2;
-               out->FaceElements->Owner[k]=myRank;
-
-               if (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0;
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
-               }
-           }
-           faceNECount+=local_NE1;
-         }
-         totalNECount+=NE1;
-     }
-     if (!periodic[1]  && (local_NE1>0)) {
-        /* **  elements on boundary 010 (x2=0): */
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                const dim_t k = i1+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(NE0-1)
+                                    + Nstride1*N_PER_E*(i1+e_offset1);
+
+                out->FaceElements->Id[k] = (i1+e_offset1)+totalNECount;
+                out->FaceElements->Tag[k] = 2;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(3,k,NN)]=node0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride1+Nstride0;
+                }
+            }
+            faceNECount+=local_NE1;
+        }
+        totalNECount+=NE1;
+    }
+
+    if (!periodic[1] && local_NE1>0) {
+        // **  elements on boundary 010 (x2=0):
         if (e_offset1 == 0) {
-#pragma omp parallel for private(i0,k,node0)
-           for (i0=0;i0<local_NE0;i0++) {
-               k=i0+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0);
-               out->FaceElements->Id[k]=e_offset0+i0+totalNECount;
-               out->FaceElements->Tag[k]=10;
-               out->FaceElements->Owner[k]=myRank;
-
-               if (useElementsOnFace) {
-                   out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                   out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-                   out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
-                   out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride1;
-               } else {
-                   out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                   out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride0;
-               }
-           }
-           faceNECount+=local_NE0;
+#pragma omp parallel for
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0);
+                out->FaceElements->Id[k] = e_offset0+i0+totalNECount;
+                out->FaceElements->Tag[k] = 10;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+Nstride1+Nstride0;
+                    eNodes[INDEX2(3,k,NN)]=node0+Nstride1;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride0;
+                }
+            }
+            faceNECount+=local_NE0;
         }
         totalNECount+=NE0;
-        /* **  elements on boundary 020 (x2=1): */
+
+        // **  elements on boundary 020 (x2=1):
         if (local_NE1+e_offset1 == NE1) {
-#pragma omp parallel for private(i0,k,node0)
-           for (i0=0;i0<local_NE0;i0++) {
-               k=i0+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(NE1-1);
-
-               out->FaceElements->Id[k]=i0+e_offset0+totalNECount;
-               out->FaceElements->Tag[k]=20;
-               out->FaceElements->Owner[k]=myRank;
-               if (useElementsOnFace) {
-                    out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1+Nstride0;
-                    out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1;
-                    out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0;
-                    out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+Nstride0;
-               } else {
-                    out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+Nstride1+Nstride0;
-                    out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+Nstride1;
-               }
-           }
-           faceNECount+=local_NE0;
+#pragma omp parallel for
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                    + Nstride1*N_PER_E*(NE1-1);
+
+                out->FaceElements->Id[k] = i0+e_offset0+totalNECount;
+                out->FaceElements->Tag[k] = 20;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride1+Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(2,k,NN)]=node0;
+                    eNodes[INDEX2(3,k,NN)]=node0+Nstride0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+Nstride1+Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+Nstride1;
+                }
+            }
+            faceNECount+=local_NE0;
         }
         totalNECount+=NE0;
-     }
-  }
-  if (noError()) {
-     /* add tag names */
-     out->addTagMap("top", 20);
-     out->addTagMap("bottom", 10);
-     out->addTagMap("left", 1);
-     out->addTagMap("right", 2);
-  }
-    // prepare mesh for further calculations
-    if (noError()) {
-        out->resolveNodeIds();
     }
+
+    // add tag names
+    out->addTagMap("top", 20);
+    out->addTagMap("bottom", 10);
+    out->addTagMap("left", 1);
+    out->addTagMap("right", 2);
+
+    // prepare mesh for further calculations
+    out->resolveNodeIds();
     if (noError()) {
         out->prepare(optimize);
     }
+    if (!noError()) {
+        delete out;
+        out=NULL;
+    }
 
     return out;
 }
diff --git a/finley/src/Mesh_rec8.cpp b/finley/src/Mesh_rec8.cpp
index 0f72165..25fb748 100644
--- a/finley/src/Mesh_rec8.cpp
+++ b/finley/src/Mesh_rec8.cpp
@@ -32,300 +32,289 @@
 
 namespace finley {
 
-Mesh* RectangularMesh_Rec8(const int* numElements, const double* Length,
+Mesh* RectangularMesh_Rec8(const dim_t* numElements, const double* Length,
                            const bool* periodic, int order, int reduced_order,
                            bool useElementsOnFace, bool useFullElementOrder,
                            bool useMacroElements, bool optimize,
-			   esysUtils::JMPI& mpi_info)
+                           esysUtils::JMPI& mpiInfo)
 {
-#define N_PER_E 2
-#define DIM 2
-  int N0,N1,NE0,NE1,i0,i1,k,Nstride0=0,Nstride1=0;
-  int totalNECount,faceNECount,NDOF0=0,NDOF1=0,NFaceElements,NN, local_NE0, local_NE1, local_N0=0, local_N1=0;
-  int e_offset1, e_offset0, offset0=0, offset1=0, global_i0, global_i1;
-  int node0, myRank;
-  const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
-  char name[50];
-  bool generateAllNodes = useFullElementOrder || useMacroElements;
-#ifdef Finley_TRACE
-  double time0=timer();
-#endif
-
-  /* get MPI information */
-  if (!noError()) {
-        return NULL;
-  }
-  myRank=mpi_info->rank;
-
-  /* set up the global dimensions of the mesh */
-
-  NE0=MAX(1,numElements[0]);
-  NE1=MAX(1,numElements[1]);
-  N0=N_PER_E*NE0+1;
-  N1=N_PER_E*NE1+1;
-
-  /*  allocate mesh: */
-  sprintf(name,"Rectangular %d x %d mesh",N0,N1);
-  Mesh* out = new Mesh(name, DIM, mpi_info);
-  if (generateAllNodes) {
-     /* setError(SYSTEM_ERROR,"full element order for Hex elements is not supported yet."); */
-     if (useMacroElements) {
-          refElements.reset(new ReferenceElementSet(Rec9Macro, order, reduced_order));
-     } else {
-          refElements.reset(new ReferenceElementSet(Rec9, order, reduced_order));
-     }
-     if (useElementsOnFace) {
-         setError(SYSTEM_ERROR,"rich elements for Rec9 elements are not supported yet.");
-     } else {
-         if (useMacroElements) {
-             refFaceElements.reset(new ReferenceElementSet(Line3Macro, order, reduced_order));
-         } else {
-             refFaceElements.reset(new ReferenceElementSet(Line3, order, reduced_order));
-         }
-         refContactElements.reset(new ReferenceElementSet(Line3_Contact, order, reduced_order));
-     }
-
-  } else  {
-     refElements.reset(new ReferenceElementSet(Rec8, order, reduced_order));
-     if (useElementsOnFace) {
-         refFaceElements.reset(new ReferenceElementSet(Rec8Face, order, reduced_order));
-         refContactElements.reset(new ReferenceElementSet(Rec8Face_Contact, order, reduced_order));
-
-     } else {
-         refFaceElements.reset(new ReferenceElementSet(Line3, order, reduced_order));
-         refContactElements.reset(new ReferenceElementSet(Line3_Contact, order, reduced_order));
-
-     }
-  }
-  refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
-
-
-  if (noError()) {
-
-      out->setPoints(new ElementFile(refPoints, mpi_info));
-      out->setContactElements(new ElementFile(refContactElements, mpi_info));
-      out->setFaceElements(new ElementFile(refFaceElements, mpi_info));
-      out->setElements(new ElementFile(refElements, mpi_info));
-
-      /* work out the largest dimension */
-      if (N1==MAX(N0,N1)) {
-          Nstride0=1;
-          Nstride1=N0;
-          local_NE0=NE0;
-          e_offset0=0;
-          mpi_info->split(NE1,&local_NE1,&e_offset1);
-      } else {
-          Nstride0=N1;
-          Nstride1=1;
-          mpi_info->split(NE0,&local_NE0,&e_offset0);
-          local_NE1=NE1;
-          e_offset1=0;
-      }
-      offset0=e_offset0*N_PER_E;
-      offset1=e_offset1*N_PER_E;
-      local_N0=local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
-      local_N1=local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
-
-      /* get the number of surface elements */
-
-      NFaceElements=0;
-      if (!periodic[0] &&  (local_NE0>0)) {
-          NDOF0=N0;
-          if (e_offset0 == 0) NFaceElements+=local_NE1;
-          if (local_NE0+e_offset0 == NE0) NFaceElements+=local_NE1;
-      } else {
-          NDOF0=N0-1;
-      }
-      if (!periodic[1] && (local_NE1>0)) {
-          NDOF1=N1;
-          if (e_offset1 == 0) NFaceElements+=local_NE0;
-          if (local_NE1+e_offset1 == NE1) NFaceElements+=local_NE0;
-      } else {
-          NDOF1=N1-1;
-      }
-
-      /*  allocate tables: */
-      out->Nodes->allocTable(local_N0*local_N1);
-      out->Elements->allocTable(local_NE0*local_NE1);
-      out->FaceElements->allocTable(NFaceElements);
-  }
-
-  if (noError()) {
-     /* create nodes */
-#pragma omp parallel for private(i0,i1,k,global_i0,global_i1)
-     for (i1=0;i1<local_N1;i1++) {
-       for (i0=0;i0<local_N0;i0++) {
-           k=i0+local_N0*i1;
-           global_i0=i0+offset0;
-           global_i1=i1+offset1;
-           out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
-           out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
-           out->Nodes->Id[k]=Nstride0*global_i0+Nstride1*global_i1;
-           out->Nodes->Tag[k]=0;
-           out->Nodes->globalDegreesOfFreedom[k]=Nstride0*(global_i0%NDOF0)
-                                               +Nstride1*(global_i1%NDOF1);
-       }
-     }
-     /*   set the elements: */
-     NN=out->Elements->numNodes;
-#pragma omp parallel for private(i0,i1,k,node0)
-     for (i1=0;i1<local_NE1;i1++) {
-         for (i0=0;i0<local_NE0;i0++) {
-
-           k=i0+local_NE0*i1;
-           node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(i1+e_offset1);
-
-           out->Elements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1);
-           out->Elements->Tag[k]=0;
-           out->Elements->Owner[k]=myRank;
-
-           out->Elements->Nodes[INDEX2(0,k,NN)]=node0;
-           out->Elements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
-           out->Elements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride1;
-           out->Elements->Nodes[INDEX2(4,k,NN)]=node0+1*Nstride0;
-           out->Elements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1+2*Nstride0;
-           out->Elements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride1+1*Nstride0;
-           out->Elements->Nodes[INDEX2(7,k,NN)]=node0+Nstride1;
-           if (generateAllNodes) {
-              out->Elements->Nodes[INDEX2(8,k,NN)]=node0+1*Nstride1+1*Nstride0;
-           }
-         }
-     }
-     /* face elements */
-     NN=out->FaceElements->numNodes;
-     totalNECount=NE0*NE1;
-     faceNECount=0;
-     if (!periodic[0] && (local_NE0>0)) {
-        /* **  elements on boundary 001 (x1=0): */
+    const int N_PER_E = 2;
+    const int DIM = 2;
+    dim_t Nstride0=0, Nstride1=0, local_NE0, local_NE1;
+    index_t e_offset0, e_offset1;
+    const bool generateAllNodes = useFullElementOrder || useMacroElements;
+
+    const Esys_MPI_rank myRank = mpiInfo->rank;
+
+    // set up the global dimensions of the mesh
+    const dim_t NE0 = std::max(dim_t(1),numElements[0]);
+    const dim_t NE1 = std::max(dim_t(1),numElements[1]);
+    const dim_t N0 = N_PER_E*NE0+1;
+    const dim_t N1 = N_PER_E*NE1+1;
+
+    // allocate mesh
+    std::stringstream name;
+    name << "Rectangular " << N0 << " x " << N1 << " mesh";
+    Mesh* out = new Mesh(name.str(), DIM, mpiInfo);
+
+    const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
+    if (generateAllNodes) {
+        if (useMacroElements) {
+            refElements.reset(new ReferenceElementSet(Rec9Macro, order, reduced_order));
+        } else {
+            refElements.reset(new ReferenceElementSet(Rec9, order, reduced_order));
+        }
+        if (useElementsOnFace) {
+            setError(SYSTEM_ERROR, "rich elements for Rec9 elements are not supported yet.");
+        } else {
+            if (useMacroElements) {
+                refFaceElements.reset(new ReferenceElementSet(Line3Macro, order, reduced_order));
+            } else {
+                refFaceElements.reset(new ReferenceElementSet(Line3, order, reduced_order));
+            }
+            refContactElements.reset(new ReferenceElementSet(Line3_Contact, order, reduced_order));
+        }
+    } else { // !generateAllNodes
+        refElements.reset(new ReferenceElementSet(Rec8, order, reduced_order));
+        if (useElementsOnFace) {
+            refFaceElements.reset(new ReferenceElementSet(Rec8Face, order, reduced_order));
+            refContactElements.reset(new ReferenceElementSet(Rec8Face_Contact, order, reduced_order));
+        } else {
+            refFaceElements.reset(new ReferenceElementSet(Line3, order, reduced_order));
+            refContactElements.reset(new ReferenceElementSet(Line3_Contact, order, reduced_order));
+        }
+    }
+    refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
+
+    out->setPoints(new ElementFile(refPoints, mpiInfo));
+    out->setContactElements(new ElementFile(refContactElements, mpiInfo));
+    out->setFaceElements(new ElementFile(refFaceElements, mpiInfo));
+    out->setElements(new ElementFile(refElements, mpiInfo));
+
+    // work out the largest dimension
+    if (N1 == std::max(N0,N1)) {
+        Nstride0 = 1;
+        Nstride1 = N0;
+        local_NE0 = NE0;
+        e_offset0 = 0;
+        mpiInfo->split(NE1, &local_NE1, &e_offset1);
+    } else {
+        Nstride0 = N1;
+        Nstride1 = 1;
+        mpiInfo->split(NE0, &local_NE0, &e_offset0);
+        local_NE1 = NE1;
+        e_offset1 = 0;
+    }
+    const index_t offset0 = e_offset0*N_PER_E;
+    const index_t offset1 = e_offset1*N_PER_E;
+    const dim_t local_N0 = local_NE0>0 ? local_NE0*N_PER_E+1 : 0;
+    const dim_t local_N1 = local_NE1>0 ? local_NE1*N_PER_E+1 : 0;
+    dim_t NDOF0=0, NDOF1=0;
+
+    // get the number of surface elements
+    dim_t NFaceElements = 0;
+    if (!periodic[0] && local_NE0>0) {
+        NDOF0=N0;
+        if (e_offset0 == 0)
+            NFaceElements+=local_NE1;
+        if (local_NE0+e_offset0 == NE0)
+            NFaceElements+=local_NE1;
+    } else {
+        NDOF0=N0-1;
+    }
+    if (!periodic[1] && local_NE1>0) {
+        NDOF1=N1;
+        if (e_offset1 == 0)
+            NFaceElements+=local_NE0;
+        if (local_NE1+e_offset1 == NE1)
+            NFaceElements+=local_NE0;
+    } else {
+        NDOF1=N1-1;
+    }
+
+    // allocate tables
+    out->Nodes->allocTable(local_N0*local_N1);
+    out->Elements->allocTable(local_NE0*local_NE1);
+    out->FaceElements->allocTable(NFaceElements);
+
+    // create nodes
+#pragma omp parallel for
+    for (index_t i1=0; i1<local_N1; i1++) {
+        for (index_t i0=0; i0<local_N0; i0++) {
+            const dim_t k = i0+local_N0*i1;
+            const index_t global_i0 = i0+offset0;
+            const index_t global_i1 = i1+offset1;
+            out->Nodes->Coordinates[INDEX2(0,k,DIM)]=DBLE(global_i0)/DBLE(N0-1)*Length[0];
+            out->Nodes->Coordinates[INDEX2(1,k,DIM)]=DBLE(global_i1)/DBLE(N1-1)*Length[1];
+            out->Nodes->Id[k] = Nstride0*global_i0+Nstride1*global_i1;
+            out->Nodes->Tag[k]=0;
+            out->Nodes->globalDegreesOfFreedom[k] = Nstride0*(global_i0%NDOF0)
+                                                  + Nstride1*(global_i1%NDOF1);
+        }
+    }
+
+    // set the elements
+    dim_t NN = out->Elements->numNodes;
+#pragma omp parallel for
+    for (index_t i1=0; i1<local_NE1; i1++) {
+        for (index_t i0=0; i0<local_NE0; i0++) {
+            const dim_t k = i0+local_NE0*i1;
+            const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                + Nstride1*N_PER_E*(i1+e_offset1);
+
+            out->Elements->Id[k]=(i0+e_offset0)+NE0*(i1+e_offset1);
+            out->Elements->Tag[k]=0;
+            out->Elements->Owner[k]=myRank;
+
+            out->Elements->Nodes[INDEX2(0,k,NN)]=node0;
+            out->Elements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
+            out->Elements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride1+2*Nstride0;
+            out->Elements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride1;
+            out->Elements->Nodes[INDEX2(4,k,NN)]=node0+1*Nstride0;
+            out->Elements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1+2*Nstride0;
+            out->Elements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride1+1*Nstride0;
+            out->Elements->Nodes[INDEX2(7,k,NN)]=node0+Nstride1;
+            if (generateAllNodes) {
+                out->Elements->Nodes[INDEX2(8,k,NN)]=node0+1*Nstride1+1*Nstride0;
+            }
+        }
+    }
+
+    // face elements
+    NN=out->FaceElements->numNodes;
+    dim_t totalNECount=NE0*NE1;
+    dim_t faceNECount=0;
+    index_t* eNodes = out->FaceElements->Nodes;
+
+    if (!periodic[0] && local_NE0>0) {
+        // **  elements on boundary 001 (x1=0):
         if (e_offset0 == 0) {
-#pragma omp parallel for private(i1,k,node0)
-           for (i1=0;i1<local_NE1;i1++) {
-               k=i1+faceNECount;
-               node0=Nstride1*N_PER_E*(i1+e_offset1);
-
-               out->FaceElements->Id[k]=i1+e_offset1+totalNECount;
-               out->FaceElements->Tag[k]=1;
-               out->FaceElements->Owner[k]=myRank;
-               if (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride1;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+Nstride1;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+2*Nstride1+1*Nstride0;
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride1;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1;
-               }
-           }
-           faceNECount+=local_NE1;
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                const dim_t k = i1+faceNECount;
+                const index_t node0 = Nstride1*N_PER_E*(i1+e_offset1);
+
+                out->FaceElements->Id[k]=i1+e_offset1+totalNECount;
+                out->FaceElements->Tag[k]=1;
+                out->FaceElements->Owner[k]=myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(1,k,NN)]=node0;
+                    eNodes[INDEX2(2,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(3,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(4,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(5,k,NN)]=node0+1*Nstride0;
+                    eNodes[INDEX2(6,k,NN)]=node0+Nstride1+2*Nstride0;
+                    eNodes[INDEX2(7,k,NN)]=node0+2*Nstride1+1*Nstride0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(1,k,NN)]=node0;
+                    eNodes[INDEX2(2,k,NN)]=node0+Nstride1;
+                }
+            }
+            faceNECount+=local_NE1;
         }
         totalNECount+=NE1;
-        /* **  elements on boundary 002 (x1=1): */
+
+        // **  elements on boundary 002 (x1=1):
         if (local_NE0+e_offset0 == NE0) {
-#pragma omp parallel for private(i1,k,node0)
-           for (i1=0;i1<local_NE1;i1++) {
-               k=i1+faceNECount;
-               node0=Nstride0*N_PER_E*(NE0-1)+Nstride1*N_PER_E*(i1+e_offset1);
-
-               out->FaceElements->Id[k]=(i1+e_offset1)+totalNECount;
-               out->FaceElements->Tag[k]=2;
-               out->FaceElements->Owner[k]=myRank;
-
-               if (useElementsOnFace) {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride1;
-                  out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0;
-                  out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+2*Nstride1+1*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+Nstride1;
-                  out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+1*Nstride0;
-               } else {
-                  out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                  out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+Nstride1+2*Nstride0;
-               }
-           }
-           faceNECount+=local_NE1;
-         }
-         totalNECount+=NE1;
-     }
-     if (!periodic[1] && (local_NE1>0)) {
-        /* **  elements on boundary 010 (x2=0): */
+#pragma omp parallel for
+            for (index_t i1=0; i1<local_NE1; i1++) {
+                const dim_t k = i1+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(NE0-1)
+                                    + Nstride1*N_PER_E*(i1+e_offset1);
+
+                out->FaceElements->Id[k] = (i1+e_offset1)+totalNECount;
+                out->FaceElements->Tag[k] = 2;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(3,k,NN)]=node0;
+                    eNodes[INDEX2(4,k,NN)]=node0+Nstride1+2*Nstride0;
+                    eNodes[INDEX2(5,k,NN)]=node0+2*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(6,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(7,k,NN)]=node0+1*Nstride0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+Nstride1+2*Nstride0;
+                }
+            }
+            faceNECount+=local_NE1;
+        }
+        totalNECount+=NE1;
+    }
+    if (!periodic[1] && local_NE1>0) {
+        // **  elements on boundary 010 (x2=0):
         if (e_offset1 == 0) {
-#pragma omp parallel for private(i0,k,node0)
-           for (i0=0;i0<local_NE0;i0++) {
-               k=i0+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0);
-
-               out->FaceElements->Id[k]=e_offset0+i0+totalNECount;
-               out->FaceElements->Tag[k]=10;
-               out->FaceElements->Owner[k]=myRank;
-
-               if (useElementsOnFace) {
-                   out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                   out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride1;
-                   out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+1*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1+2*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+2*Nstride1+1*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride1;
-               } else {
-                   out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0;
-                   out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
-                   out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+1*Nstride0;
-               }
-           }
-           faceNECount+=local_NE0;
+#pragma omp parallel for
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0);
+
+                out->FaceElements->Id[k] = e_offset0+i0+totalNECount;
+                out->FaceElements->Tag[k] = 10;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(3,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(4,k,NN)]=node0+1*Nstride0;
+                    eNodes[INDEX2(5,k,NN)]=node0+Nstride1+2*Nstride0;
+                    eNodes[INDEX2(6,k,NN)]=node0+2*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(7,k,NN)]=node0+Nstride1;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(2,k,NN)]=node0+1*Nstride0;
+                }
+            }
+            faceNECount+=local_NE0;
         }
         totalNECount+=NE0;
-        /* **  elements on boundary 020 (x2=1): */
+
+        // **  elements on boundary 020 (x2=1):
         if (local_NE1+e_offset1 == NE1) {
-#pragma omp parallel for private(i0,k,node0)
-           for (i0=0;i0<local_NE0;i0++) {
-               k=i0+faceNECount;
-               node0=Nstride0*N_PER_E*(i0+e_offset0)+Nstride1*N_PER_E*(NE1-1);
-
-               out->FaceElements->Id[k]=i0+e_offset0+totalNECount;
-               out->FaceElements->Tag[k]=20;
-               out->FaceElements->Owner[k]=myRank;
-               if (useElementsOnFace) {
-                    out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                    out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride1;
-                    out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0;
-                    out->FaceElements->Nodes[INDEX2(3,k,NN)]=node0+2*Nstride0;
-                    out->FaceElements->Nodes[INDEX2(4,k,NN)]=node0+2*Nstride1+1*Nstride0;
-                    out->FaceElements->Nodes[INDEX2(5,k,NN)]=node0+Nstride1;
-                    out->FaceElements->Nodes[INDEX2(6,k,NN)]=node0+1*Nstride0;
-                    out->FaceElements->Nodes[INDEX2(7,k,NN)]=node0+Nstride1+2*Nstride0;
-               } else {
-                    out->FaceElements->Nodes[INDEX2(0,k,NN)]=node0+2*Nstride1+2*Nstride0;
-                    out->FaceElements->Nodes[INDEX2(1,k,NN)]=node0+2*Nstride1;
-                    out->FaceElements->Nodes[INDEX2(2,k,NN)]=node0+2*Nstride1+1*Nstride0;
-               }
-           }
-           faceNECount+=local_NE0;
+#pragma omp parallel for
+            for (index_t i0=0; i0<local_NE0; i0++) {
+                const dim_t k = i0+faceNECount;
+                const index_t node0 = Nstride0*N_PER_E*(i0+e_offset0)
+                                    + Nstride1*N_PER_E*(NE1-1);
+
+                out->FaceElements->Id[k] = i0+e_offset0+totalNECount;
+                out->FaceElements->Tag[k] = 20;
+                out->FaceElements->Owner[k] = myRank;
+                if (useElementsOnFace) {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(2,k,NN)]=node0;
+                    eNodes[INDEX2(3,k,NN)]=node0+2*Nstride0;
+                    eNodes[INDEX2(4,k,NN)]=node0+2*Nstride1+1*Nstride0;
+                    eNodes[INDEX2(5,k,NN)]=node0+Nstride1;
+                    eNodes[INDEX2(6,k,NN)]=node0+1*Nstride0;
+                    eNodes[INDEX2(7,k,NN)]=node0+Nstride1+2*Nstride0;
+                } else {
+                    eNodes[INDEX2(0,k,NN)]=node0+2*Nstride1+2*Nstride0;
+                    eNodes[INDEX2(1,k,NN)]=node0+2*Nstride1;
+                    eNodes[INDEX2(2,k,NN)]=node0+2*Nstride1+1*Nstride0;
+                }
+            }
+            faceNECount+=local_NE0;
         }
         totalNECount+=NE0;
-     }
-  }
-  if (noError()) {
-     /* add tag names */
-     out->addTagMap("top", 20);
-     out->addTagMap("bottom", 10);
-     out->addTagMap("left", 1);
-     out->addTagMap("right", 2);
-   }
-    // prepare mesh for further calculations
-    if (noError()) {
-        out->resolveNodeIds();
     }
+
+    // add tag names
+    out->addTagMap("top", 20);
+    out->addTagMap("bottom", 10);
+    out->addTagMap("left", 1);
+    out->addTagMap("right", 2);
+
+    // prepare mesh for further calculations
+    out->resolveNodeIds();
     if (noError()) {
         out->prepare(optimize);
     }
diff --git a/finley/src/Mesh_write.cpp b/finley/src/Mesh_write.cpp
index b63e934..c3bf992 100644
--- a/finley/src/Mesh_write.cpp
+++ b/finley/src/Mesh_write.cpp
@@ -15,109 +15,113 @@
 *****************************************************************************/
 
 
-/************************************************************************************/
+/*****************************************************************************/
 
 /*   Finley: write Mesh in finley file format */
 
-/************************************************************************************/
+/*****************************************************************************/
 
 #define ESNEEDPYTHON
-#include "esysUtils/first.h"
+#include <esysUtils/first.h>
 
 #include "Mesh.h"
 
+#include <iomanip>
+
+using std::cout;
+using std::endl;
+using std::ios;
+using std::setw;
+
 namespace finley {
 
 /// writes the mesh to the external file fname using the Finley file format
 void Mesh::write(const std::string fname) const
 {
-  char error_msg[LenErrorMsg_MAX];
-  FILE *f;
-  int NN,i,j,numDim;
-
-  if (MPIInfo->size >1 ) {
-    setError(IO_ERROR,"Mesh_write: only single processor runs are supported.");
-    return;
-
-  }
-  /* open file */
-  f=fopen(fname.c_str(), "w");
-  if (f==NULL) {
-    sprintf(error_msg,"Mesh_write: Opening file %s for writing failed.",fname.c_str());
-    setError(IO_ERROR,error_msg);
-    return;
-  }
-
-  /* write header */
-
-  fprintf(f, "%s\n", m_name.c_str());
-  
-  /*  write nodes: */
-  
-  if (Nodes!=NULL) {
-    numDim=getDim();
-    fprintf(f,"%1dD-Nodes %d\n", numDim, Nodes->numNodes);
-    for (i=0;i<Nodes->numNodes;i++) {
-      fprintf(f,"%d %d %d",Nodes->Id[i],Nodes->globalDegreesOfFreedom[i],Nodes->Tag[i]);
-      for (j=0;j<numDim;j++) fprintf(f," %20.15e",Nodes->Coordinates[INDEX2(j,i,numDim)]);
-      fprintf(f,"\n");
+    if (MPIInfo->size >1) {
+        setError(IO_ERROR, "Mesh_write: only single processor runs are supported.");
+        return;
     }
-  } else {
-    fprintf(f,"0D-Nodes 0\n");
-  }
-  
-  /*  write elements: */
-
-  if (Elements!=NULL) {
-    fprintf(f, "%s %d\n",Elements->referenceElementSet->referenceElement->Type->Name,Elements->numElements);
-    NN=Elements->numNodes; 
-    for (i=0;i<Elements->numElements;i++) {
-      fprintf(f,"%d %d",Elements->Id[i],Elements->Tag[i]);
-      for (j=0;j<NN;j++) fprintf(f," %d",Nodes->Id[Elements->Nodes[INDEX2(j,i,NN)]]);
-      fprintf(f,"\n");
+    // open file
+    FILE* f=fopen(fname.c_str(), "w");
+    if (f==NULL) {
+        char error_msg[LenErrorMsg_MAX];
+        sprintf(error_msg, "Mesh_write: Opening file %s for writing failed.",fname.c_str());
+        setError(IO_ERROR,error_msg);
+        return;
     }
-  } else {
-    fprintf(f,"Tet4 0\n");
-  }
-
-  /*  write face elements: */
-  if (FaceElements!=NULL) {
-    fprintf(f, "%s %d\n", FaceElements->referenceElementSet->referenceElement->Type->Name,FaceElements->numElements);
-    NN=FaceElements->numNodes;
-    for (i=0;i<FaceElements->numElements;i++) {
-      fprintf(f,"%d %d",FaceElements->Id[i],FaceElements->Tag[i]);
-      for (j=0;j<NN;j++) fprintf(f," %d",Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN)]]);
-      fprintf(f,"\n");
+
+    // write header
+    fprintf(f, "%s\n", m_name.c_str());
+
+    // write nodes:
+    if (Nodes != NULL) {
+        const int numDim = getDim();
+        fprintf(f,"%1dD-Nodes %d\n", numDim, Nodes->numNodes);
+        for (index_t i=0; i<Nodes->numNodes; i++) {
+            fprintf(f,"%d %d %d",Nodes->Id[i],Nodes->globalDegreesOfFreedom[i],Nodes->Tag[i]);
+            for (int j=0; j<numDim; j++)
+                fprintf(f," %20.15e",Nodes->Coordinates[INDEX2(j,i,numDim)]);
+            fprintf(f,"\n");
+        }
+    } else {
+        fprintf(f,"0D-Nodes 0\n");
     }
-  } else {
-    fprintf(f,"Tri3 0\n");
-  }
-
-  /*  write Contact elements : */
-  if (ContactElements!=NULL) {
-    fprintf(f, "%s %d\n",ContactElements->referenceElementSet->referenceElement->Type->Name,ContactElements->numElements);
-    NN=ContactElements->numNodes;
-    for (i=0;i<ContactElements->numElements;i++) {
-      fprintf(f,"%d %d",ContactElements->Id[i],ContactElements->Tag[i]);
-      for (j=0;j<NN;j++) fprintf(f," %d",Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN)]]);
-      fprintf(f,"\n");
+
+    // write elements:
+    if (Elements != NULL) {
+        fprintf(f, "%s %d\n",Elements->referenceElementSet->referenceElement->Type->Name,Elements->numElements);
+        const int NN=Elements->numNodes;
+        for (index_t i=0; i<Elements->numElements; i++) {
+            fprintf(f,"%d %d",Elements->Id[i],Elements->Tag[i]);
+            for (int j=0; j<NN; j++)
+                fprintf(f," %d",Nodes->Id[Elements->Nodes[INDEX2(j,i,NN)]]);
+            fprintf(f,"\n");
+        }
+    } else {
+        fprintf(f,"Tet4 0\n");
     }
-  } else {
-    fprintf(f,"Tri3_Contact 0\n");
-  }
-  
-  /*  write points: */
-  if (Points!=NULL) {
-    fprintf(f, "%s %d\n",Points->referenceElementSet->referenceElement->Type->Name,Points->numElements);
-    for (i=0;i<Points->numElements;i++) {
-      fprintf(f,"%d %d %d\n",Points->Id[i],Points->Tag[i],Nodes->Id[Points->Nodes[INDEX2(0,i,1)]]);
+
+    // write face elements:
+    if (FaceElements != NULL) {
+        fprintf(f, "%s %d\n", FaceElements->referenceElementSet->referenceElement->Type->Name,FaceElements->numElements);
+        const int NN=FaceElements->numNodes;
+        for (index_t i=0; i<FaceElements->numElements; i++) {
+            fprintf(f,"%d %d",FaceElements->Id[i],FaceElements->Tag[i]);
+            for (int j=0; j<NN; j++)
+                fprintf(f," %d",Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN)]]);
+            fprintf(f,"\n");
+        }
+    } else {
+        fprintf(f,"Tri3 0\n");
     }
-  } else {
-    fprintf(f,"Point1 0\n");
-  }
 
-    /*  write tags:*/
-    if (tagMap.size()>0) {
+    // write contact elements:
+    if (ContactElements != NULL) {
+        fprintf(f, "%s %d\n",ContactElements->referenceElementSet->referenceElement->Type->Name,ContactElements->numElements);
+        const int NN=ContactElements->numNodes;
+        for (index_t i=0; i<ContactElements->numElements; i++) {
+            fprintf(f,"%d %d",ContactElements->Id[i],ContactElements->Tag[i]);
+            for (int j=0; j<NN; j++)
+                fprintf(f," %d",Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN)]]);
+            fprintf(f,"\n");
+        }
+    } else {
+        fprintf(f,"Tri3_Contact 0\n");
+    }
+
+    // write points:
+    if (Points != NULL) {
+        fprintf(f, "%s %d\n",Points->referenceElementSet->referenceElement->Type->Name,Points->numElements);
+        for (index_t i=0; i<Points->numElements; i++) {
+            fprintf(f,"%d %d %d\n",Points->Id[i],Points->Tag[i],Nodes->Id[Points->Nodes[INDEX2(0,i,1)]]);
+        }
+    } else {
+        fprintf(f,"Point1 0\n");
+    }
+
+    // write tags:
+    if (tagMap.size() > 0) {
         fprintf(f, "Tags\n");
         TagMap::const_iterator it;
         for (it=tagMap.begin(); it!=tagMap.end(); it++) {
@@ -126,124 +130,172 @@ void Mesh::write(const std::string fname) const
     }
     fclose(f);
 #ifdef Finley_TRACE
-    printf("mesh %s has been written to file %s\n", m_name, fname.c_str());
+    cout << "mesh " << m_name << " has been written to file " << fname << endl;
 #endif
 }
 
 void Mesh::printInfo(bool full)
 {
-  int NN,i,j,numDim;
-
-  fprintf(stdout, "PrintMesh_Info running on CPU %d of %d\n",MPIInfo->rank, MPIInfo->size);
-  fprintf(stdout, "\tMesh name '%s'\n", m_name.c_str());
-  fprintf(stdout, "\tApproximation order %d\n",approximationOrder);
-  fprintf(stdout, "\tReduced Approximation order %d\n",reducedApproximationOrder);
-  fprintf(stdout, "\tIntegration order %d\n",integrationOrder);
-  fprintf(stdout, "\tReduced Integration order %d\n",reducedIntegrationOrder);
-
-  /* write nodes: */
-  if (Nodes!=NULL) {
-    numDim=getDim();
-    fprintf(stdout, "\tNodes: %1dD-Nodes %d\n", numDim, Nodes->numNodes);
-    if (full) {
-      fprintf(stdout, "\t     Id   Tag  gDOF   gNI grDfI  grNI:  Coordinates\n");
-      for (i=0;i<Nodes->numNodes;i++) {
-        fprintf(stdout, "\t  %5d %5d %5d %5d %5d %5d: ", Nodes->Id[i], Nodes->Tag[i], Nodes->globalDegreesOfFreedom[i], Nodes->globalNodesIndex[i], Nodes->globalReducedDOFIndex[i], Nodes->globalReducedNodesIndex[i]);
-        for (j=0;j<numDim;j++) fprintf(stdout," %20.15e",Nodes->Coordinates[INDEX2(j,i,numDim)]);
-        fprintf(stdout,"\n");
-      }
-    }
-  } else {
-    fprintf(stdout, "\tNodes: 0D-Nodes 0\n");
-  }
-
-  /* write elements: */
-  if (Elements!=NULL) {
-    int mine=0, overlap=0;
-    for (i=0;i<Elements->numElements;i++) {
-      if (Elements->Owner[i] == MPIInfo->rank) mine++;
-      else overlap++;
-    }
-    fprintf(stdout, "\tElements: %s %d (TypeId=%d) owner=%d overlap=%d\n",Elements->referenceElementSet->referenceElement->Type->Name,Elements->numElements,Elements->referenceElementSet->referenceElement->Type->TypeId, mine, overlap);
-    NN=Elements->numNodes;
-    if (full) {
-      fprintf(stdout, "\t     Id   Tag Owner Color:  Nodes\n");
-      for (i=0;i<Elements->numElements;i++) {
-        fprintf(stdout, "\t  %5d %5d %5d %5d: ",Elements->Id[i],Elements->Tag[i],Elements->Owner[i],Elements->Color[i]);
-        for (j=0;j<NN;j++) fprintf(stdout," %5d",Nodes->Id[Elements->Nodes[INDEX2(j,i,NN)]]);
-        fprintf(stdout,"\n");
-      }
-    }
-  } else {
-    fprintf(stdout, "\tElements: Tet4 0\n");
-  }
-
-  /* write face elements: */
-  if (FaceElements!=NULL) {
-    int mine=0, overlap=0;
-    for (i=0;i<FaceElements->numElements;i++) {
-      if (FaceElements->Owner[i] == MPIInfo->rank) mine++;
-      else overlap++;
-    }
-    fprintf(stdout, "\tFace elements: %s %d (TypeId=%d) owner=%d overlap=%d\n", FaceElements->referenceElementSet->referenceElement->Type->Name,FaceElements->numElements,FaceElements->referenceElementSet->referenceElement->Type->TypeId, mine, overlap);
-    NN=FaceElements->numNodes;
-    if (full) {
-      fprintf(stdout, "\t     Id   Tag Owner Color:  Nodes\n");
-      for (i=0;i<FaceElements->numElements;i++) {
-        fprintf(stdout, "\t  %5d %5d %5d %5d: ",FaceElements->Id[i],FaceElements->Tag[i],FaceElements->Owner[i],FaceElements->Color[i]);
-        for (j=0;j<NN;j++) fprintf(stdout," %5d",Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN)]]);
-        fprintf(stdout,"\n");
-      }
+    cout << "PrintMesh_Info running on CPU " << MPIInfo->rank << " of "
+              << MPIInfo->size << endl;
+    cout << "\tMesh name '" << m_name << "'\n";
+    cout << "\tApproximation order " << approximationOrder << endl;
+    cout << "\tReduced Approximation order " <<reducedApproximationOrder << endl;
+    cout << "\tIntegration order " << integrationOrder << endl;
+    cout << "\tReduced Integration order " << reducedIntegrationOrder << endl;
+
+    // write nodes:
+    if (Nodes != NULL) {
+        const int numDim = getDim();
+        cout << "\tNodes: " << numDim << "D-Nodes " << Nodes->numNodes << endl;
+        if (full) {
+            cout << "\t     Id   Tag  gDOF   gNI grDfI  grNI:  Coordinates\n";
+            for (index_t i=0; i < Nodes->numNodes; i++) {
+                cout << "\t" << setw(7) << Nodes->Id[i]
+                     << setw(6) << Nodes->Tag[i]
+                     << setw(6) << Nodes->globalDegreesOfFreedom[i]
+                     << setw(6) << Nodes->globalNodesIndex[i]
+                     << setw(6) << Nodes->globalReducedDOFIndex[i]
+                     << setw(6) << Nodes->globalReducedNodesIndex[i] << ": ";
+                cout.setf(ios::scientific, ios::floatfield);
+                cout.precision(15);
+                for (int j=0; j<numDim; j++)
+                    cout << " " << Nodes->Coordinates[INDEX2(j,i,numDim)];
+                cout << endl;
+            }
+        }
+    } else {
+        cout << "\tNodes: 0D-Nodes 0\n";
     }
-  } else {
-    fprintf(stdout, "\tFace elements: Tri3 0\n");
-  }
-
-  /* write Contact elements : */
-  if (ContactElements!=NULL) {
-    int mine=0, overlap=0;
-    for (i=0;i<ContactElements->numElements;i++) {
-      if (ContactElements->Owner[i] == MPIInfo->rank) mine++;
-      else overlap++;
+
+    // write elements:
+    if (Elements != NULL) {
+        dim_t mine=0, overlap=0;
+        for (index_t i=0; i<Elements->numElements; i++) {
+            if (Elements->Owner[i] == MPIInfo->rank)
+                mine++;
+            else
+                overlap++;
+        }
+        cout << "\tElements: "
+            << Elements->referenceElementSet->referenceElement->Type->Name
+            << " " << Elements->numElements << " (TypeId="
+            << Elements->referenceElementSet->referenceElement->Type->TypeId
+            << ") owner=" << mine << " overlap=" << overlap << endl;
+        if (full) {
+            const int NN=Elements->numNodes;
+            cout << "\t     Id   Tag Owner Color:  Nodes\n";
+            for (index_t i=0; i<Elements->numElements; i++) {
+                cout << "\t" << setw(7) << Elements->Id[i]
+                     << setw(6) << Elements->Tag[i]
+                     << setw(6) << Elements->Owner[i]
+                     << setw(6) << Elements->Color[i] << ": ";
+                for (int j=0; j<NN; j++)
+                    cout << setw(6) << Nodes->Id[Elements->Nodes[INDEX2(j,i,NN)]];
+                cout << endl;
+            }
+        }
+    } else {
+        cout << "\tElements: Tet4 0\n";
     }
-    fprintf(stdout, "\tContact elements: %s %d (TypeId=%d) owner=%d overlap=%d\n",ContactElements->referenceElementSet->referenceElement->Type->Name,ContactElements->numElements,ContactElements->referenceElementSet->referenceElement->Type->TypeId, mine, overlap);
-    NN=ContactElements->numNodes;
-    if (full) {
-      fprintf(stdout, "\t     Id   Tag Owner Color:  Nodes\n");
-      for (i=0;i<ContactElements->numElements;i++) {
-        fprintf(stdout, "\t  %5d %5d %5d %5d: ",ContactElements->Id[i],ContactElements->Tag[i],ContactElements->Owner[i],ContactElements->Color[i]);
-        for (j=0;j<NN;j++) fprintf(stdout," %5d",Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN)]]);
-        fprintf(stdout,"\n");
-      }
+
+    // write face elements:
+    if (FaceElements != NULL) {
+        dim_t mine=0, overlap=0;
+        for (index_t i=0; i < FaceElements->numElements; i++) {
+            if (FaceElements->Owner[i] == MPIInfo->rank)
+                mine++;
+            else
+                overlap++;
+        }
+        cout << "\tFace elements: "
+            << FaceElements->referenceElementSet->referenceElement->Type->Name
+            << " " << FaceElements->numElements << " (TypeId="
+            << FaceElements->referenceElementSet->referenceElement->Type->TypeId
+            << ") owner=" << mine << " overlap=" << overlap << endl;
+        if (full) {
+            const int NN=FaceElements->numNodes;
+            cout << "\t     Id   Tag Owner Color:  Nodes\n";
+            for (index_t i=0; i<FaceElements->numElements; i++) {
+                cout << "\t" << setw(7) << FaceElements->Id[i]
+                     << setw(6) << FaceElements->Tag[i]
+                     << setw(6) << FaceElements->Owner[i]
+                     << setw(6) << FaceElements->Color[i] << ": ";
+                for (int j=0; j<NN; j++)
+                    cout << setw(6) << Nodes->Id[FaceElements->Nodes[INDEX2(j,i,NN)]];
+                cout << endl;
+            }
+        }
+    } else {
+        cout << "\tFace elements: Tri3 0\n";
     }
-  } else {
-    fprintf(stdout, "\tContact elements: Tri3_Contact 0\n");
-  }
-
-  /* write points: */
-  if (Points!=NULL) {
-    int mine=0, overlap=0;
-    for (i=0;i<Points->numElements;i++) {
-      if (Points->Owner[i] == MPIInfo->rank) mine++;
-      else overlap++;
+
+    // write Contact elements:
+    if (ContactElements != NULL) {
+        dim_t mine=0, overlap=0;
+        for (index_t i=0; i<ContactElements->numElements; i++) {
+            if (ContactElements->Owner[i] == MPIInfo->rank)
+                mine++;
+            else
+                overlap++;
+        }
+        cout << "\tContact elements: "
+            << ContactElements->referenceElementSet->referenceElement->Type->Name
+            << " " << ContactElements->numElements << " (TypeId="
+            << ContactElements->referenceElementSet->referenceElement->Type->TypeId
+            << ") owner=" << mine << " overlap=" << overlap << endl;
+        if (full) {
+            const int NN=ContactElements->numNodes;
+            cout << "\t     Id   Tag Owner Color:  Nodes\n";
+            for (index_t i=0; i<ContactElements->numElements; i++) {
+                cout << "\t" << setw(7) << ContactElements->Id[i]
+                     << setw(6) << ContactElements->Tag[i]
+                     << setw(6) << ContactElements->Owner[i]
+                     << setw(6) << ContactElements->Color[i] << ": ";
+                for (int j=0; j<NN; j++)
+                    cout << setw(6) << Nodes->Id[ContactElements->Nodes[INDEX2(j,i,NN)]];
+                cout << endl;
+            }
+        }
+    } else {
+        cout << "\tContact elements: Tri3_Contact 0\n";
     }
-    fprintf(stdout, "\tPoints: %s %d (TypeId=%d) owner=%d overlap=%d\n",Points->referenceElementSet->referenceElement->Type->Name,Points->numElements,Points->referenceElementSet->referenceElement->Type->TypeId, mine, overlap);
-    if (full) {
-      fprintf(stdout, "\t     Id   Tag Owner Color:  Nodes\n");
-      for (i=0;i<Points->numElements;i++) {
-        fprintf(stdout, "\t  %5d %5d %5d %5d %5d\n",Points->Id[i],Points->Tag[i],Points->Owner[i],Points->Color[i],Nodes->Id[Points->Nodes[INDEX2(0,i,1)]]);
-      }
+
+    // write points:
+    if (Points != NULL) {
+        dim_t mine=0, overlap=0;
+        for (index_t i=0; i<Points->numElements; i++) {
+            if (Points->Owner[i] == MPIInfo->rank)
+                mine++;
+            else
+                overlap++;
+        }
+        cout << "\tPoints: "
+            << Points->referenceElementSet->referenceElement->Type->Name
+            << " " << Points->numElements << " (TypeId="
+            << Points->referenceElementSet->referenceElement->Type->TypeId
+            << ") owner=" << mine << " overlap=" << overlap << endl;
+        if (full) {
+            cout << "\t     Id   Tag Owner Color:  Nodes\n";
+            for (index_t i=0; i<Points->numElements; i++) {
+                cout << "\t" << setw(7) << Points->Id[i]
+                     << setw(6) << Points->Tag[i]
+                     << setw(6) << Points->Owner[i]
+                     << setw(6) << Points->Color[i]
+                     << setw(8) << Nodes->Id[Points->Nodes[INDEX2(0,i,1)]]
+                     << endl;
+            }
+        }
+    } else {
+        cout << "\tPoints: Point1 0\n";
     }
-  } else {
-    fprintf(stdout, "\tPoints: Point1 0\n");
-  }
 
     // write tags
-    if (tagMap.size()>0) {
-        fprintf(stdout, "\tTags:\n");
+    if (tagMap.size() > 0) {
+        cout << "\tTags:\n";
         TagMap::const_iterator it;
         for (it=tagMap.begin(); it!=tagMap.end(); it++) {
-            fprintf(stdout, "\t  %5d %s\n", it->second, it->first.c_str());
+            cout << "\t" << setw(7) << it->second << " " << it->first << endl;
         }
     }
 }
diff --git a/finley/src/NodeFile.cpp b/finley/src/NodeFile.cpp
index e0c3246..e80b435 100644
--- a/finley/src/NodeFile.cpp
+++ b/finley/src/NodeFile.cpp
@@ -33,49 +33,53 @@
 namespace finley {
 
 // helper function
-static void scatterEntries(int n, int* index, int min_index, int max_index,
-                           int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
-                           int* globalDegreesOfFreedom_out,
-                           int* globalDegreesOfFreedom_in,
+static void scatterEntries(dim_t n, const index_t* index, index_t min_index,
+                           index_t max_index, index_t* Id_out,
+                           const index_t* Id_in,
+                           int* Tag_out, const int* Tag_in,
+                           index_t* globalDegreesOfFreedom_out,
+                           const index_t* globalDegreesOfFreedom_in,
                            int numDim, double* Coordinates_out,
-                           double* Coordinates_in)
+                           const double* Coordinates_in)
 {
-    const int range = max_index-min_index;
+    const index_t range = max_index-min_index;
     const size_t numDim_size = numDim*sizeof(double);
 
 #pragma omp parallel for
-    for (int i=0; i<n; i++) {
-        const int k=index[i]-min_index;
-        if ((k>=0) && (k<range)) {
-            Id_out[k]=Id_in[i];
-            Tag_out[k]=Tag_in[i];
-            globalDegreesOfFreedom_out[k]=globalDegreesOfFreedom_in[i];
-            memcpy(&(Coordinates_out[INDEX2(0,k,numDim)]),
-                    &(Coordinates_in[INDEX2(0,i,numDim)]), numDim_size);
+    for (index_t i=0; i<n; i++) {
+        const index_t k = index[i]-min_index;
+        if (k>=0 && k<range) {
+            Id_out[k] = Id_in[i];
+            Tag_out[k] = Tag_in[i];
+            globalDegreesOfFreedom_out[k] = globalDegreesOfFreedom_in[i];
+            memcpy(&Coordinates_out[INDEX2(0,k,numDim)],
+                    &Coordinates_in[INDEX2(0,i,numDim)], numDim_size);
         }
     }
 }
 
 // helper function
-static void gatherEntries(int n, const int* index, int min_index, int max_index,
-                          int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
-                          int* globalDegreesOfFreedom_out,
-                          int* globalDegreesOfFreedom_in,
+static void gatherEntries(dim_t n, const index_t* index,
+                          index_t min_index, index_t max_index,
+                          index_t* Id_out, const index_t* Id_in,
+                          int* Tag_out, const int* Tag_in,
+                          index_t* globalDegreesOfFreedom_out,
+                          const index_t* globalDegreesOfFreedom_in,
                           int numDim, double* Coordinates_out,
-                          double* Coordinates_in)
+                          const double* Coordinates_in)
 {
-    const int range = max_index-min_index;
+    const index_t range = max_index-min_index;
     const size_t numDim_size = numDim*sizeof(double);
 
 #pragma omp parallel for
-    for (int i=0; i<n; i++) {
-        const int k=index[i]-min_index;
-        if ((k>=0) && (k<range)) {
-            Id_out[i]=Id_in[k];
-            Tag_out[i]=Tag_in[k];
-            globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
-            memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]),
-                    &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
+    for (index_t i=0; i<n; i++) {
+        const index_t k = index[i]-min_index;
+        if (k>=0 && k<range) {
+            Id_out[i] = Id_in[k];
+            Tag_out[i] = Tag_in[k];
+            globalDegreesOfFreedom_out[i] = globalDegreesOfFreedom_in[k];
+            memcpy(&Coordinates_out[INDEX2(0,i,numDim)],
+                    &Coordinates_in[INDEX2(0,k,numDim)], numDim_size);
         }
     }
 }
@@ -107,38 +111,38 @@ NodeFile::~NodeFile()
 }
 
 /// allocates the node table within this node file to hold NN nodes.
-void NodeFile::allocTable(int NN)
+void NodeFile::allocTable(dim_t NN)
 {
-    if (numNodes>0)
+    if (numNodes > 0)
         freeTable();
 
-    Id=new int[NN];
-    Coordinates=new double[NN*numDim];
-    Tag=new int[NN];
-    globalDegreesOfFreedom=new int[NN];
-    globalReducedDOFIndex=new int[NN];
-    globalReducedNodesIndex=new int[NN];
-    globalNodesIndex=new int[NN];
-    reducedNodesId=new int[NN];
-    degreesOfFreedomId=new int[NN];
-    reducedDegreesOfFreedomId=new int[NN];
+    Id = new index_t[NN];
+    Coordinates = new double[NN*numDim];
+    Tag = new int[NN];
+    globalDegreesOfFreedom = new index_t[NN];
+    globalReducedDOFIndex = new index_t[NN];
+    globalReducedNodesIndex = new index_t[NN];
+    globalNodesIndex = new index_t[NN];
+    reducedNodesId = new index_t[NN];
+    degreesOfFreedomId = new index_t[NN];
+    reducedDegreesOfFreedomId = new index_t[NN];
     numNodes=NN;
 
     // this initialization makes sure that data are located on the right
     // processor
 #pragma omp parallel for
-    for (int n=0; n<numNodes; n++) {
-        Id[n]=-1;
+    for (index_t n=0; n<numNodes; n++) {
+        Id[n] = -1;
         for (int i=0; i<numDim; i++)
             Coordinates[INDEX2(i,n,numDim)]=0.;
-        Tag[n]=-1;
-        globalDegreesOfFreedom[n]=-1;
-        globalReducedDOFIndex[n]=-1;
-        globalReducedNodesIndex[n]=-1;
-        globalNodesIndex[n]=-1;
-        reducedNodesId[n]=-1;
-        degreesOfFreedomId[n]=-1;
-        reducedDegreesOfFreedomId[n]=-1;
+        Tag[n] = -1;
+        globalDegreesOfFreedom[n] = -1;
+        globalReducedDOFIndex[n] = -1;
+        globalReducedNodesIndex[n] = -1;
+        globalNodesIndex[n] = -1;
+        reducedNodesId[n] = -1;
+        degreesOfFreedomId[n] = -1;
+        reducedDegreesOfFreedomId[n] = -1;
     }
 }
 
@@ -209,7 +213,7 @@ void NodeFile::setCoordinates(const escript::Data& newX)
         const size_t numDim_size=numDim*sizeof(double);
         ++status;
 #pragma omp parallel for
-        for (int n=0; n<numNodes; n++) {
+        for (index_t n=0; n<numNodes; n++) {
             memcpy(&(Coordinates[INDEX2(0,n,numDim)]), newX.getSampleDataRO(n), numDim_size);
         }
     }
@@ -230,16 +234,16 @@ void NodeFile::setTags(const int newTag, const escript::Data& mask)
     }
 
 #pragma omp parallel for
-    for (int n=0; n<numNodes; n++) {
+    for (index_t n=0; n<numNodes; n++) {
          if (mask.getSampleDataRO(n)[0] > 0)
              Tag[n]=newTag;
     }
     updateTagList();
 }
 
-std::pair<int,int> NodeFile::getDOFRange() const
+std::pair<index_t,index_t> NodeFile::getDOFRange() const
 {
-    std::pair<int,int> result(util::getMinMaxInt(
+    std::pair<index_t,index_t> result(util::getMinMaxInt(
                                         1, numNodes, globalDegreesOfFreedom));
     if (result.second < result.first) {
         result.first = -1;
@@ -248,14 +252,14 @@ std::pair<int,int> NodeFile::getDOFRange() const
     return result;
 }
 
-std::pair<int,int> NodeFile::getGlobalIdRange() const
+std::pair<index_t,index_t> NodeFile::getGlobalIdRange() const
 {
-    std::pair<int,int> result(util::getMinMaxInt(1, numNodes, Id));
+    std::pair<index_t,index_t> result(util::getMinMaxInt(1, numNodes, Id));
 
 #ifdef ESYS_MPI
-    int global_id_range[2];
-    int id_range[2] = { -result.first, result.second };
-    MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
+    index_t global_id_range[2];
+    index_t id_range[2] = { -result.first, result.second };
+    MPI_Allreduce(id_range, global_id_range, 2, MPI_DIM_T, MPI_MAX, MPIInfo->comm);
     result.first = -global_id_range[0];
     result.second = global_id_range[1];
 #endif
@@ -266,15 +270,15 @@ std::pair<int,int> NodeFile::getGlobalIdRange() const
     return result;
 }
 
-std::pair<int,int> NodeFile::getGlobalDOFRange() const
+std::pair<index_t,index_t> NodeFile::getGlobalDOFRange() const
 {
-    std::pair<int,int> result(util::getMinMaxInt(
+    std::pair<index_t,index_t> result(util::getMinMaxInt(
                                         1, numNodes, globalDegreesOfFreedom));
 
 #ifdef ESYS_MPI
-    int global_id_range[2];
-    int id_range[2] = { -result.first, result.second };
-    MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
+    index_t global_id_range[2];
+    index_t id_range[2] = { -result.first, result.second };
+    MPI_Allreduce(id_range, global_id_range, 2, MPI_DIM_T, MPI_MAX, MPIInfo->comm);
     result.first = -global_id_range[0];
     result.second = global_id_range[1];
 #endif
@@ -285,14 +289,14 @@ std::pair<int,int> NodeFile::getGlobalDOFRange() const
     return result;
 }
 
-std::pair<int,int> NodeFile::getGlobalNodeIDIndexRange() const
+std::pair<index_t,index_t> NodeFile::getGlobalNodeIDIndexRange() const
 {
-    std::pair<int,int> result(util::getMinMaxInt(1, numNodes, globalNodesIndex));
+    std::pair<index_t,index_t> result(util::getMinMaxInt(1, numNodes, globalNodesIndex));
 
 #ifdef ESYS_MPI
-    int global_id_range[2];
-    int id_range[2] = { -result.first, result.second };
-    MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
+    index_t global_id_range[2];
+    index_t id_range[2] = { -result.first, result.second };
+    MPI_Allreduce(id_range, global_id_range, 2, MPI_DIM_T, MPI_MAX, MPIInfo->comm);
     result.first = -global_id_range[0];
     result.second = global_id_range[1];
 #endif
@@ -317,7 +321,7 @@ void NodeFile::copyTable(int offset, int idOffset, int dofOffset,
     }
 
 #pragma omp parallel for
-    for (int n=0; n<in->numNodes; n++) {
+    for (index_t n=0; n<in->numNodes; n++) {
         Id[offset+n]=in->Id[n]+idOffset;
         Tag[offset+n]=in->Tag[n];
         globalDegreesOfFreedom[offset+n]=in->globalDegreesOfFreedom[n]+dofOffset;
@@ -330,7 +334,7 @@ void NodeFile::copyTable(int offset, int idOffset, int dofOffset,
 /// scatters the NodeFile in into this NodeFile using index[0:in->numNodes-1].
 /// index has to be between 0 and numNodes-1.
 /// colouring is chosen for the worst case
-void NodeFile::scatter(int* index, const NodeFile* in)
+void NodeFile::scatter(const index_t* index, const NodeFile* in)
 {
     scatterEntries(numNodes, index, 0, in->numNodes, Id, in->Id, Tag, in->Tag,
                    globalDegreesOfFreedom, in->globalDegreesOfFreedom,
@@ -340,34 +344,34 @@ void NodeFile::scatter(int* index, const NodeFile* in)
 /// gathers this NodeFile from the NodeFile 'in' using the entries in
 /// index[0:out->numNodes-1] which are between min_index and max_index
 /// (exclusive)
-void NodeFile::gather(int* index, const NodeFile* in)
+void NodeFile::gather(const index_t* index, const NodeFile* in)
 {
-    const std::pair<int,int> id_range(in->getGlobalIdRange());
+    const std::pair<index_t,index_t> id_range(in->getGlobalIdRange());
     gatherEntries(numNodes, index, id_range.first, id_range.second, Id, in->Id,
             Tag, in->Tag, globalDegreesOfFreedom, in->globalDegreesOfFreedom,
             numDim, Coordinates, in->Coordinates);
 }
 
-void NodeFile::gather_global(int *index, const NodeFile* in)
+void NodeFile::gather_global(const index_t *index, const NodeFile* in)
 {
     // get the global range of node ids
-    const std::pair<int,int> id_range(in->getGlobalIdRange());
-    const int undefined_node=id_range.first-1;
-    std::vector<int> distribution(in->MPIInfo->size+1);
+    const std::pair<index_t,index_t> id_range(in->getGlobalIdRange());
+    const index_t undefined_node=id_range.first-1;
+    std::vector<index_t> distribution(in->MPIInfo->size+1);
 
     // distribute the range of node ids
-    int buffer_len=in->MPIInfo->setDistribution(id_range.first, id_range.second, &distribution[0]);
+    index_t buffer_len=in->MPIInfo->setDistribution(id_range.first, id_range.second, &distribution[0]);
 
     // allocate buffers
-    int *Id_buffer=new int[buffer_len];
+    index_t *Id_buffer=new index_t[buffer_len];
     int *Tag_buffer=new int[buffer_len];
-    int *globalDegreesOfFreedom_buffer=new int[buffer_len];
+    index_t *globalDegreesOfFreedom_buffer=new index_t[buffer_len];
     double *Coordinates_buffer=new double[buffer_len*numDim];
 
     // fill Id_buffer by the undefined_node marker to check if nodes
     // are defined
 #pragma omp parallel for
-    for (int n=0; n<buffer_len; n++)
+    for (index_t n=0; n<buffer_len; n++)
         Id_buffer[n]=undefined_node;
 
     // fill the buffer by sending portions around in a circle
@@ -380,14 +384,14 @@ void NodeFile::gather_global(int *index, const NodeFile* in)
     for (int p=0; p<in->MPIInfo->size; ++p) {
         if (p>0) { // the initial send can be skipped
 #ifdef ESYS_MPI
-            MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
+            MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_DIM_T, dest,
                     in->MPIInfo->msg_tag_counter, source,
                     in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
                     in->MPIInfo->msg_tag_counter+1, source,
                     in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
-                    MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
+                    MPI_DIM_T, dest, in->MPIInfo->msg_tag_counter+2, source,
                     in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
                     MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
@@ -416,14 +420,14 @@ void NodeFile::gather_global(int *index, const NodeFile* in)
                 Coordinates, Coordinates_buffer);
         if (p < in->MPIInfo->size-1) { // the last send can be skipped
 #ifdef ESYS_MPI
-            MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
+            MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_DIM_T, dest,
                     in->MPIInfo->msg_tag_counter, source,
                     in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
                     in->MPIInfo->msg_tag_counter+1, source,
                     in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
-                    MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
+                    MPI_DIM_T, dest, in->MPIInfo->msg_tag_counter+2, source,
                     in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
             MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
                     MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
@@ -435,7 +439,7 @@ void NodeFile::gather_global(int *index, const NodeFile* in)
     }
     // check if all nodes are set:
 #pragma omp parallel for
-    for (int n=0; n<numNodes; ++n) {
+    for (index_t n=0; n<numNodes; ++n) {
         if (Id[n] == undefined_node) {
             std::stringstream ss;
             ss << "NodeFile::gather_global: Node id " << Id[n]
@@ -453,20 +457,20 @@ void NodeFile::gather_global(int *index, const NodeFile* in)
 }
 
 void NodeFile::assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF,
-                                   const std::vector<int>& distribution)
+                                   const std::vector<index_t>& distribution)
 {
     Esys_MPI_rank p_min=MPIInfo->size, p_max=-1;
     // first we retrieve the min and max DOF on this processor to reduce
     // costs for searching
-    const std::pair<int,int> dof_range(getDOFRange());
+    const std::pair<index_t,index_t> dof_range(getDOFRange());
 
     for (int p=0; p<MPIInfo->size; ++p) {
         if (distribution[p]<=dof_range.first) p_min=p;
         if (distribution[p]<=dof_range.second) p_max=p;
     }
 #pragma omp parallel for
-    for (int n=0; n<numNodes; ++n) {
-        const int k=globalDegreesOfFreedom[n];
+    for (index_t n=0; n<numNodes; ++n) {
+        const index_t k=globalDegreesOfFreedom[n];
         for (int p=p_min; p<=p_max; ++p) {
             if (k < distribution[p+1]) {
                 mpiRankOfDOF[n]=p;
@@ -477,20 +481,20 @@ void NodeFile::assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF,
 }
 
 int NodeFile::prepareLabeling(const std::vector<short>& mask,
-                              std::vector<int>& buffer,
-                              std::vector<int>& distribution, bool useNodes)
+                              std::vector<index_t>& buffer,
+                              std::vector<index_t>& distribution, bool useNodes)
 {
-    const int UNSET_ID=-1,SET_ID=1;
+    const index_t UNSET_ID=-1,SET_ID=1;
 
     // get the global range of DOF/node ids
-    std::pair<int,int> idRange(useNodes ?
+    std::pair<index_t,index_t> idRange(useNodes ?
             getGlobalNodeIDIndexRange() : getGlobalDOFRange());
-    const int* indexArray = (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
+    const index_t* indexArray = (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
     // distribute the range of node ids
     distribution.assign(MPIInfo->size+1, 0);
     int buffer_len=MPIInfo->setDistribution(idRange.first,
             idRange.second, &distribution[0]);
-    const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
+    const dim_t myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
 
     // fill buffer by the UNSET_ID marker to check if nodes are defined
     buffer.assign(buffer_len, UNSET_ID);
@@ -512,10 +516,10 @@ int NodeFile::prepareLabeling(const std::vector<short>& mask,
             MPIInfo->msg_tag_counter++;
         }
         buffer_rank=esysUtils::mod_rank(MPIInfo->size, buffer_rank-1);
-        const int id0=distribution[buffer_rank];
-        const int id1=distribution[buffer_rank+1];
+        const index_t id0=distribution[buffer_rank];
+        const index_t id1=distribution[buffer_rank+1];
 #pragma omp parallel for
-        for (int n=0; n<numNodes; n++) {
+        for (index_t n=0; n<numNodes; n++) {
             if (mask.size()<numNodes || mask[n]>-1) {
                 const int k=indexArray[n];
                 if (id0<=k && k<id1) {
@@ -526,8 +530,8 @@ int NodeFile::prepareLabeling(const std::vector<short>& mask,
     }
     // count the entries in the buffer
     // TODO: OMP parallel
-    int myNewCount=0;
-    for (int n=0; n<myCount; ++n) {
+    index_t myNewCount=0;
+    for (index_t n=0; n<myCount; ++n) {
         if (buffer[n] == SET_ID) {
             buffer[n]=myNewCount;
             myNewCount++;
@@ -538,17 +542,17 @@ int NodeFile::prepareLabeling(const std::vector<short>& mask,
 
 int NodeFile::createDenseDOFLabeling()
 {
-    std::vector<int> DOF_buffer;
-    std::vector<int> distribution;
-    std::vector<int> loc_offsets(MPIInfo->size);
-    std::vector<int> offsets(MPIInfo->size);
-    int new_numGlobalDOFs=0;
+    std::vector<index_t> DOF_buffer;
+    std::vector<index_t> distribution;
+    std::vector<index_t> loc_offsets(MPIInfo->size);
+    std::vector<index_t> offsets(MPIInfo->size);
+    index_t new_numGlobalDOFs=0;
 
     // retrieve the number of own DOFs and fill buffer
     loc_offsets[MPIInfo->rank]=prepareLabeling(std::vector<short>(),
             DOF_buffer, distribution, false);
 #ifdef ESYS_MPI
-    MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
+    MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_DIM_T,
                   MPI_SUM, MPIInfo->comm);
     for (int n=0; n<MPIInfo->size; ++n) {
         loc_offsets[n]=new_numGlobalDOFs;
@@ -559,9 +563,9 @@ int NodeFile::createDenseDOFLabeling()
     loc_offsets[0]=0;
 #endif
 
-    const int myDOFs=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
+    const dim_t myDOFs=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
 #pragma omp parallel for
-    for (int n=0; n<myDOFs; ++n)
+    for (index_t n=0; n<myDOFs; ++n)
         DOF_buffer[n]+=loc_offsets[MPIInfo->rank];
 
     std::vector<unsigned char> set_new_DOF(numNodes, true);
@@ -574,11 +578,11 @@ int NodeFile::createDenseDOFLabeling()
 #endif
     int buffer_rank=MPIInfo->rank;
     for (int p=0; p<MPIInfo->size; ++p) {
-        const int dof0=distribution[buffer_rank];
-        const int dof1=distribution[buffer_rank+1];
+        const index_t dof0=distribution[buffer_rank];
+        const index_t dof1=distribution[buffer_rank+1];
 #pragma omp parallel for
-        for (int n=0; n<numNodes; n++) {
-            const int k=globalDegreesOfFreedom[n];
+        for (index_t n=0; n<numNodes; n++) {
+            const index_t k=globalDegreesOfFreedom[n];
             if (set_new_DOF[n] && dof0<=k && k<dof1) {
                 globalDegreesOfFreedom[n]=DOF_buffer[k-dof0];
                 set_new_DOF[n]=false;
@@ -587,7 +591,7 @@ int NodeFile::createDenseDOFLabeling()
         if (p<MPIInfo->size-1) { // the last send can be skipped
 #ifdef ESYS_MPI
             MPI_Status status;
-            MPI_Sendrecv_replace(&DOF_buffer[0], DOF_buffer.size(), MPI_INT,
+            MPI_Sendrecv_replace(&DOF_buffer[0], DOF_buffer.size(), MPI_DIM_T,
                     dest, MPIInfo->msg_tag_counter, source,
                     MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
 #endif
@@ -599,22 +603,22 @@ int NodeFile::createDenseDOFLabeling()
     return new_numGlobalDOFs;
 }
 
-int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
-                                      const std::vector<int>& dofDistribution)
+int NodeFile::createDenseNodeLabeling(std::vector<index_t>& nodeDistribution,
+                                      const std::vector<index_t>& dofDistribution)
 {
     const int UNSET_ID=-1, SET_ID=1;
-    const int myFirstDOF=dofDistribution[MPIInfo->rank];
-    const int myLastDOF=dofDistribution[MPIInfo->rank+1];
+    const index_t myFirstDOF=dofDistribution[MPIInfo->rank];
+    const index_t myLastDOF=dofDistribution[MPIInfo->rank+1];
 
     // find the range of node ids controlled by me
-    int min_id=std::numeric_limits<int>::max();
-    int max_id=std::numeric_limits<int>::min();
+    index_t min_id=std::numeric_limits<index_t>::max();
+    index_t max_id=std::numeric_limits<index_t>::min();
 #pragma omp parallel
     {
-        int loc_max_id=max_id;
-        int loc_min_id=min_id;
+        index_t loc_max_id=max_id;
+        index_t loc_min_id=min_id;
 #pragma omp for
-        for (int n=0; n<numNodes; n++) {
+        for (index_t n=0; n<numNodes; n++) {
             const int dof=globalDegreesOfFreedom[n];
             if (myFirstDOF<=dof && dof<myLastDOF) {
                 loc_max_id=std::max(loc_max_id, Id[n]);
@@ -627,32 +631,32 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
             min_id=std::min(loc_min_id, min_id);
         }
     }
-    int my_buffer_len = (max_id>=min_id ? max_id-min_id+1 : 0);
-    int buffer_len;
+    index_t my_buffer_len = (max_id>=min_id ? max_id-min_id+1 : 0);
+    index_t buffer_len;
 
 #ifdef ESYS_MPI
-    MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_INT, MPI_MAX,
+    MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_DIM_T, MPI_MAX,
                   MPIInfo->comm);
 #else
     buffer_len=my_buffer_len;
 #endif
 
     const int header_len=2;
-    std::vector<int> Node_buffer(buffer_len+header_len, UNSET_ID);
+    std::vector<index_t> Node_buffer(buffer_len+header_len, UNSET_ID);
     // extra storage for these IDs
     Node_buffer[0]=min_id;
     Node_buffer[1]=max_id;
 
     // mark and count the nodes in use
 #pragma omp parallel for
-    for (int n=0; n<numNodes; n++) {
+    for (index_t n=0; n<numNodes; n++) {
         globalNodesIndex[n]=-1;
-        const int dof=globalDegreesOfFreedom[n];
+        const index_t dof=globalDegreesOfFreedom[n];
         if (myFirstDOF<=dof && dof<myLastDOF)
             Node_buffer[Id[n]-min_id+header_len]=SET_ID;
     }
-    int myNewNumNodes=0;
-    for (int n=0; n<my_buffer_len; n++) {
+    index_t myNewNumNodes=0;
+    for (index_t n=0; n<my_buffer_len; n++) {
         if (Node_buffer[header_len+n]==SET_ID) {
             Node_buffer[header_len+n]=myNewNumNodes;
             myNewNumNodes++;
@@ -660,13 +664,13 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
     }
     // make the local number of nodes globally available
 #ifdef ESYS_MPI
-    MPI_Allgather(&myNewNumNodes, 1, MPI_INT, &nodeDistribution[0], 1, MPI_INT,
-                  MPIInfo->comm);
+    MPI_Allgather(&myNewNumNodes, 1, MPI_DIM_T, &nodeDistribution[0], 1,
+                  MPI_DIM_T, MPIInfo->comm);
 #else
     nodeDistribution[0]=myNewNumNodes;
 #endif
 
-    int globalNumNodes=0;
+    dim_t globalNumNodes=0;
     for (int p=0; p<MPIInfo->size; ++p) {
         const int itmp=nodeDistribution[p];
         nodeDistribution[p]=globalNumNodes;
@@ -676,7 +680,7 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
 
     // offset node buffer
 #pragma omp parallel for
-    for (int n=0; n<my_buffer_len; n++)
+    for (index_t n=0; n<my_buffer_len; n++)
         Node_buffer[n+header_len]+=nodeDistribution[MPIInfo->rank];
 
     // now we send this buffer around to assign global node index
@@ -686,15 +690,15 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
 #endif
     int buffer_rank=MPIInfo->rank;
     for (int p=0; p<MPIInfo->size; ++p) {
-        const int nodeID_0=Node_buffer[0];
-        const int nodeID_1=Node_buffer[1];
-        const int dof0=dofDistribution[buffer_rank];
-        const int dof1=dofDistribution[buffer_rank+1];
+        const index_t nodeID_0=Node_buffer[0];
+        const index_t nodeID_1=Node_buffer[1];
+        const index_t dof0=dofDistribution[buffer_rank];
+        const index_t dof1=dofDistribution[buffer_rank+1];
         if (nodeID_0 <= nodeID_1) {
 #pragma omp parallel for
-            for (int n=0; n<numNodes; n++) {
-                const int dof=globalDegreesOfFreedom[n];
-                const int id=Id[n]-nodeID_0;
+            for (index_t n=0; n<numNodes; n++) {
+                const index_t dof=globalDegreesOfFreedom[n];
+                const index_t id=Id[n]-nodeID_0;
                 if (dof0<=dof && dof<dof1 && id>=0 && id<=nodeID_1-nodeID_0)
                     globalNodesIndex[n]=Node_buffer[id+header_len];
             }
@@ -702,11 +706,11 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
         if (p<MPIInfo->size-1) { // the last send can be skipped
 #ifdef ESYS_MPI
             MPI_Status status;
-            MPI_Sendrecv_replace(&Node_buffer[0], Node_buffer.size(), MPI_INT,
+            MPI_Sendrecv_replace(&Node_buffer[0], Node_buffer.size(), MPI_DIM_T,
                     dest, MPIInfo->msg_tag_counter, source,
                     MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
 #endif
-	    ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
+	        ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
         }
         buffer_rank=esysUtils::mod_rank(MPIInfo->size, buffer_rank-1);
     }
@@ -716,17 +720,17 @@ int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
 int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
                                          bool useNodes)
 {
-    std::vector<int> buffer;
-    std::vector<int> distribution;
-    std::vector<int> loc_offsets(MPIInfo->size);
-    std::vector<int> offsets(MPIInfo->size);
-    int new_numGlobalReduced=0;
+    std::vector<index_t> buffer;
+    std::vector<index_t> distribution;
+    std::vector<index_t> loc_offsets(MPIInfo->size);
+    std::vector<index_t> offsets(MPIInfo->size);
+    dim_t new_numGlobalReduced=0;
 
     // retrieve the number of own DOFs/nodes and fill buffer
     loc_offsets[MPIInfo->rank]=prepareLabeling(reducedMask, buffer,
                                                distribution, useNodes);
 #ifdef ESYS_MPI
-    MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
+    MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_DIM_T,
                   MPI_SUM, MPIInfo->comm);
     for (int n=0; n<MPIInfo->size; ++n) {
         loc_offsets[n]=new_numGlobalReduced;
@@ -737,18 +741,18 @@ int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
     loc_offsets[0]=0;
 #endif
 
-    const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
+    const dim_t myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
 #pragma omp parallel for
-    for (int n=0; n<myCount; ++n)
+    for (index_t n=0; n<myCount; ++n)
         buffer[n]+=loc_offsets[MPIInfo->rank];
 
-    const int* denseArray =
+    const index_t* denseArray =
         (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
-    int* reducedArray =
+    index_t* reducedArray =
         (useNodes ? globalReducedNodesIndex : globalReducedDOFIndex);
 
 #pragma omp parallel for
-    for (int n=0; n<numNodes; ++n)
+    for (index_t n=0; n<numNodes; ++n)
         reducedArray[n]=loc_offsets[0]-1;
 
     // now entries are collected from the buffer by sending them around
@@ -759,12 +763,12 @@ int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
 #endif
     int buffer_rank=MPIInfo->rank;
     for (int p=0; p<MPIInfo->size; ++p) {
-        const int id0=distribution[buffer_rank];
-        const int id1=distribution[buffer_rank+1];
+        const index_t id0=distribution[buffer_rank];
+        const index_t id1=distribution[buffer_rank+1];
 #pragma omp parallel for
-        for (int n=0; n<numNodes; n++) {
+        for (index_t n=0; n<numNodes; n++) {
             if (reducedMask[n] > -1) {
-                const int k=denseArray[n];
+                const index_t k=denseArray[n];
                 if (id0<=k && k<id1)
                     reducedArray[n]=buffer[k-id0];
             }
@@ -772,11 +776,11 @@ int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
         if (p<MPIInfo->size-1) { // the last send can be skipped
 #ifdef ESYS_MPI
             MPI_Status status;
-            MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_INT, dest,
+            MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_DIM_T, dest,
                     MPIInfo->msg_tag_counter, source,
                     MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
 #endif
-	    ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
+	        ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
         }
         buffer_rank=esysUtils::mod_rank(MPIInfo->size, buffer_rank-1);
     }
@@ -786,7 +790,7 @@ int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
 void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements) 
 {
     paso::Distribution_ptr dof_distribution;
-    const int* globalDOFIndex;
+    const index_t* globalDOFIndex;
     if (use_reduced_elements) {
         dof_distribution=reducedDegreesOfFreedomDistribution;
         globalDOFIndex=globalReducedDOFIndex;
@@ -794,13 +798,13 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
         dof_distribution=degreesOfFreedomDistribution;
         globalDOFIndex=globalDegreesOfFreedom;
     }
-    const int myFirstDOF=dof_distribution->getFirstComponent();
-    const int myLastDOF=dof_distribution->getLastComponent();
+    const index_t myFirstDOF=dof_distribution->getFirstComponent();
+    const index_t myLastDOF=dof_distribution->getLastComponent();
     const int mpiSize=MPIInfo->size;
     const int myRank=MPIInfo->rank;
 
-    int min_DOF, max_DOF;
-    std::pair<int,int> DOF_range(util::getFlaggedMinMaxInt(
+    index_t min_DOF, max_DOF;
+    std::pair<index_t,index_t> DOF_range(util::getFlaggedMinMaxInt(
                                             numNodes, globalDOFIndex, -1));
 
     if (DOF_range.second < DOF_range.first) {
@@ -825,16 +829,16 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
         return;
     }
     const int UNUSED = -1;
-    const int len_loc_dof=max_DOF-min_DOF+1;
-    std::vector<int> shared(numNodes*(p_max-p_min+1));
-    std::vector<int> offsetInShared(mpiSize+1);
-    std::vector<int> locDOFMask(len_loc_dof, UNUSED);
+    const index_t len_loc_dof=max_DOF-min_DOF+1;
+    std::vector<index_t> shared(numNodes*(p_max-p_min+1));
+    std::vector<index_t> offsetInShared(mpiSize+1);
+    std::vector<index_t> locDOFMask(len_loc_dof, UNUSED);
 
 #pragma omp parallel 
     {
 #pragma omp for
-        for (int i=0;i<numNodes;++i) {
-            const int k=globalDOFIndex[i];
+        for (index_t i=0;i<numNodes;++i) {
+            const index_t k=globalDOFIndex[i];
             if (k > -1) {
 #ifdef BOUNDS_CHECK
                 if ((k-min_DOF)>=len_loc_dof) {
@@ -852,12 +856,12 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
        }
 #endif
 #pragma omp for
-       for (int i=myFirstDOF-min_DOF; i<myLastDOF-min_DOF; ++i) {
+       for (index_t i=myFirstDOF-min_DOF; i<myLastDOF-min_DOF; ++i) {
             locDOFMask[i]=i-myFirstDOF+min_DOF;
         }
     }
 
-    std::vector<int> wanted_DOFs(numNodes);
+    std::vector<index_t> wanted_DOFs(numNodes);
     std::vector<int> rcv_len(mpiSize);
     std::vector<int> snd_len(mpiSize);
     std::vector<int> neighbor(mpiSize);
@@ -866,15 +870,15 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
     int lastn=n;
     for (int p=p_min; p<=p_max; ++p) {
         if (p != myRank) {
-            const int firstDOF=std::max(min_DOF, dof_distribution->first_component[p]);
-            const int lastDOF=std::min(max_DOF+1, dof_distribution->first_component[p+1]);
+            const index_t firstDOF=std::max(min_DOF, dof_distribution->first_component[p]);
+            const index_t lastDOF=std::min(max_DOF+1, dof_distribution->first_component[p+1]);
 #ifdef BOUNDS_CHECK
             if (firstDOF-min_DOF<0 || lastDOF-min_DOF>len_loc_dof) {
                 printf("BOUNDS_CHECK %s %d p=%d\n", __FILE__, __LINE__, p);
                 exit(1);
             }
 #endif
-            for (int i=firstDOF-min_DOF; i<lastDOF-min_DOF; ++i) {
+            for (index_t i=firstDOF-min_DOF; i<lastDOF-min_DOF; ++i) {
                 if (locDOFMask[i] == UNUSED-1) {
                    locDOFMask[i]=myLastDOF-myFirstDOF+n;
                    wanted_DOFs[n]=i+min_DOF;
@@ -906,10 +910,10 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
     offsetInShared[numNeighbors]=lastn;
 
     // assign new DOF labels to nodes
-    std::vector<int> nodeMask(numNodes, UNUSED);
+    std::vector<index_t> nodeMask(numNodes, UNUSED);
 #pragma omp parallel for
-    for (int i=0; i<numNodes; ++i) {
-        const int k=globalDOFIndex[i];
+    for (index_t i=0; i<numNodes; ++i) {
+        const index_t k=globalDOFIndex[i];
         if (k > -1)
             nodeMask[i]=locDOFMask[k-min_DOF];
     }
@@ -929,10 +933,10 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
     }
 #endif
 #pragma omp parallel for
-    for (int i=0; i<lastn; ++i)
+    for (index_t i=0; i<lastn; ++i)
         shared[i]=myLastDOF-myFirstDOF+i;
 
-    int *p = shared.empty() ? NULL : &shared[0];
+    index_t *p = shared.empty() ? NULL : &shared[0];
     paso::SharedComponents_ptr rcv_shcomp(new paso::SharedComponents(
             myLastDOF-myFirstDOF, numNeighbors, &neighbor[0], p,
             &offsetInShared[0], 1, 0, MPIInfo));
@@ -953,7 +957,7 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
 #ifdef ESYS_MPI
         MPI_Isend(&(wanted_DOFs[rcv_shcomp->offsetInShared[p]]),
                 rcv_shcomp->offsetInShared[p+1]-rcv_shcomp->offsetInShared[p],
-                MPI_INT, rcv_shcomp->neighbor[p],
+                MPI_DIM_T, rcv_shcomp->neighbor[p],
                 MPIInfo->msg_tag_counter+myRank, MPIInfo->comm,
                 &mpi_requests[count]);
         count++;
@@ -964,7 +968,7 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
     for (int p=0; p<mpiSize; p++) {
         if (snd_len[p] > 0) {
 #ifdef ESYS_MPI
-            MPI_Irecv(&shared[n], snd_len[p], MPI_INT, p,
+            MPI_Irecv(&shared[n], snd_len[p], MPI_DIM_T, p,
                     MPIInfo->msg_tag_counter+p, MPIInfo->comm,
                     &mpi_requests[count]);
             count++;
@@ -982,7 +986,7 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
 #endif
     // map global ids to local id's
 #pragma omp parallel for
-    for (int i=0; i<n; ++i) {
+    for (index_t i=0; i<n; ++i) {
         shared[i]=locDOFMask[shared[i]-min_DOF];
     }
 
@@ -999,29 +1003,29 @@ void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
     }
 }
 
-void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
-                                  const std::vector<int>& dofDist,
-                                  const std::vector<int>& nodeDist)
+void NodeFile::createNodeMappings(const std::vector<index_t>& indexReducedNodes,
+                                  const std::vector<index_t>& dofDist,
+                                  const std::vector<index_t>& nodeDist)
 {
     const int mpiSize=MPIInfo->size;
     const int myRank=MPIInfo->rank;
 
-    const int myFirstDOF=dofDist[myRank];
-    const int myLastDOF=dofDist[myRank+1];
-    const int myNumDOF=myLastDOF-myFirstDOF;
+    const index_t myFirstDOF=dofDist[myRank];
+    const index_t myLastDOF=dofDist[myRank+1];
+    const index_t myNumDOF=myLastDOF-myFirstDOF;
 
-    const int myFirstNode=nodeDist[myRank];
-    const int myLastNode=nodeDist[myRank+1];
-    const int myNumNodes=myLastNode-myFirstNode;
+    const index_t myFirstNode=nodeDist[myRank];
+    const index_t myLastNode=nodeDist[myRank+1];
+    const index_t myNumNodes=myLastNode-myFirstNode;
 
     std::vector<short> maskMyReducedDOF(myNumDOF, -1);
     std::vector<short> maskMyReducedNodes(myNumNodes, -1);
-    const int iRNsize = indexReducedNodes.size();
+    const index_t iRNsize = indexReducedNodes.size();
 
     // mark the nodes used by the reduced mesh
 #pragma omp parallel for
-    for (int i=0; i<iRNsize; ++i) {
-        int k=globalNodesIndex[indexReducedNodes[i]];
+    for (index_t i=0; i<iRNsize; ++i) {
+        index_t k=globalNodesIndex[indexReducedNodes[i]];
         if (k>=myFirstNode && myLastNode>k)
             maskMyReducedNodes[k-myFirstNode]=1;
         k=globalDegreesOfFreedom[indexReducedNodes[i]];
@@ -1029,24 +1033,24 @@ void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
             maskMyReducedDOF[k-myFirstDOF]=1;
         }
     }
-    std::vector<int> indexMyReducedDOF = util::packMask(maskMyReducedDOF);
-    int myNumReducedDOF=indexMyReducedDOF.size();
-    std::vector<int> indexMyReducedNodes = util::packMask(maskMyReducedNodes);
-    int myNumReducedNodes=indexMyReducedNodes.size();
+    std::vector<index_t> indexMyReducedDOF = util::packMask(maskMyReducedDOF);
+    index_t myNumReducedDOF=indexMyReducedDOF.size();
+    std::vector<index_t> indexMyReducedNodes = util::packMask(maskMyReducedNodes);
+    index_t myNumReducedNodes=indexMyReducedNodes.size();
 
-    std::vector<int> rdofDist(mpiSize+1);
-    std::vector<int> rnodeDist(mpiSize+1);
+    std::vector<index_t> rdofDist(mpiSize+1);
+    std::vector<index_t> rnodeDist(mpiSize+1);
 #ifdef ESYS_MPI
-    MPI_Allgather(&myNumReducedNodes, 1, MPI_INT, &rnodeDist[0], 1, MPI_INT, MPIInfo->comm);
-    MPI_Allgather(&myNumReducedDOF, 1, MPI_INT, &rdofDist[0], 1, MPI_INT, MPIInfo->comm);
+    MPI_Allgather(&myNumReducedNodes, 1, MPI_DIM_T, &rnodeDist[0], 1, MPI_DIM_T, MPIInfo->comm);
+    MPI_Allgather(&myNumReducedDOF, 1, MPI_DIM_T, &rdofDist[0], 1, MPI_DIM_T, MPIInfo->comm);
 #else
     rnodeDist[0]=myNumReducedNodes;
     rdofDist[0]=myNumReducedDOF;
 #endif
-    int globalNumReducedNodes=0;
-    int globalNumReducedDOF=0;
+    index_t globalNumReducedNodes=0;
+    index_t globalNumReducedDOF=0;
     for (int i=0; i<mpiSize;++i) {
-        int k=rnodeDist[i];
+        index_t k=rnodeDist[i];
         rnodeDist[i]=globalNumReducedNodes;
         globalNumReducedNodes+=k;
 
@@ -1067,22 +1071,22 @@ void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
     reducedDegreesOfFreedomDistribution.reset(new paso::Distribution(
                                                 MPIInfo, &rdofDist[0], 1, 0));
 
-    std::vector<int> nodeMask(numNodes);
+    std::vector<index_t> nodeMask(numNodes);
 
     if (noError()) {
         const int UNUSED = -1;
         // ==== nodes mapping which is a dummy structure ========
 #pragma omp parallel for
-        for (int i=0; i<numNodes; ++i)
+        for (index_t i=0; i<numNodes; ++i)
             nodeMask[i]=i;
         nodesMapping.assign(nodeMask, UNUSED);
 
         // ==== mapping between nodes and reduced nodes ==========
 #pragma omp parallel for
-        for (int i=0; i<numNodes; ++i)
+        for (index_t i=0; i<numNodes; ++i)
             nodeMask[i]=UNUSED;
 #pragma omp parallel for
-        for (int i=0; i<iRNsize; ++i)
+        for (index_t i=0; i<iRNsize; ++i)
             nodeMask[indexReducedNodes[i]]=i;
         reducedNodesMapping.assign(nodeMask, UNUSED);
     }
@@ -1095,19 +1099,19 @@ void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
 
     // get the Ids for DOFs and reduced nodes
     if (noError()) {
-        const int rnTargets = reducedNodesMapping.getNumTargets();
-        const int dofTargets = degreesOfFreedomMapping.getNumTargets();
-        const int rdofTargets = reducedDegreesOfFreedomMapping.getNumTargets();
+        const index_t rnTargets = reducedNodesMapping.getNumTargets();
+        const index_t dofTargets = degreesOfFreedomMapping.getNumTargets();
+        const index_t rdofTargets = reducedDegreesOfFreedomMapping.getNumTargets();
 #pragma omp parallel
         {
 #pragma omp for
-         for (int i=0; i<rnTargets; ++i)
+         for (index_t i=0; i<rnTargets; ++i)
              reducedNodesId[i]=Id[reducedNodesMapping.map[i]];
 #pragma omp for
-         for (int i=0; i<dofTargets; ++i)
+         for (index_t i=0; i<dofTargets; ++i)
              degreesOfFreedomId[i]=Id[degreesOfFreedomMapping.map[i]];
 #pragma omp for
-         for (int i=0; i<rdofTargets; ++i)
+         for (index_t i=0; i<rdofTargets; ++i)
              reducedDegreesOfFreedomId[i]=Id[reducedDegreesOfFreedomMapping.map[i]];
         }
     } else {
diff --git a/finley/src/NodeFile.h b/finley/src/NodeFile.h
index e256d23..ad1102a 100644
--- a/finley/src/NodeFile.h
+++ b/finley/src/NodeFile.h
@@ -33,64 +33,64 @@ public:
     NodeFile(int nDim, esysUtils::JMPI& mpiInfo);
     ~NodeFile();
 
-    void allocTable(int numNodes);
+    void allocTable(dim_t numNodes);
     void freeTable();
 
     void print() const;
-    inline int getFirstNode() const;
-    inline int getLastNode() const;
-    inline int getGlobalNumNodes() const;
-    inline int* borrowGlobalNodesIndex() const;
+    inline index_t getFirstNode() const;
+    inline index_t getLastNode() const;
+    inline index_t getGlobalNumNodes() const;
+    inline index_t* borrowGlobalNodesIndex() const;
 
-    inline int getFirstReducedNode() const;
-    inline int getLastReducedNode() const;
-    inline int getGlobalNumReducedNodes() const;
-    inline int* borrowGlobalReducedNodesIndex() const;
+    inline index_t getFirstReducedNode() const;
+    inline index_t getLastReducedNode() const;
+    inline index_t getGlobalNumReducedNodes() const;
+    inline index_t* borrowGlobalReducedNodesIndex() const;
 
     /// returns the number of FEM nodes
-    inline int getNumNodes() const;
-    inline int getNumReducedNodes() const;
-    inline int getNumDegreesOfFreedom() const;
-    inline int getNumReducedDegreesOfFreedom() const;
-
-    inline const std::vector<int>& borrowReducedNodesTarget() const;
-    inline const std::vector<int>& borrowDegreesOfFreedomTarget() const;
-    inline const std::vector<int>& borrowNodesTarget() const;
-    inline const std::vector<int>& borrowReducedDegreesOfFreedomTarget() const;
-
-    inline const int* borrowTargetReducedNodes() const;
-    inline const int* borrowTargetDegreesOfFreedom() const;
-    inline const int* borrowTargetNodes() const;
-    inline const int* borrowTargetReducedDegreesOfFreedom() const;
-
-    void createNodeMappings(const std::vector<int>& indexReducedNodes,
-                            const std::vector<int>& dofDistribution,
-                            const std::vector<int>& nodeDistribution);
+    inline dim_t getNumNodes() const;
+    inline dim_t getNumReducedNodes() const;
+    inline dim_t getNumDegreesOfFreedom() const;
+    inline dim_t getNumReducedDegreesOfFreedom() const;
+
+    inline const std::vector<index_t>& borrowReducedNodesTarget() const;
+    inline const std::vector<index_t>& borrowDegreesOfFreedomTarget() const;
+    inline const std::vector<index_t>& borrowNodesTarget() const;
+    inline const std::vector<index_t>& borrowReducedDegreesOfFreedomTarget() const;
+
+    inline const index_t* borrowTargetReducedNodes() const;
+    inline const index_t* borrowTargetDegreesOfFreedom() const;
+    inline const index_t* borrowTargetNodes() const;
+    inline const index_t* borrowTargetReducedDegreesOfFreedom() const;
+
+    void createNodeMappings(const std::vector<index_t>& indexReducedNodes,
+                            const std::vector<index_t>& dofDistribution,
+                            const std::vector<index_t>& nodeDistribution);
     int createDenseDOFLabeling();
-    int createDenseNodeLabeling(std::vector<int>& nodeDistribution,
-                                const std::vector<int>& dofDistribution);
+    int createDenseNodeLabeling(std::vector<index_t>& nodeDistribution,
+                                const std::vector<index_t>& dofDistribution);
     int createDenseReducedLabeling(const std::vector<short>& reducedMask,
                                    bool useNodes);
-    void assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF, const std::vector<int>& distribution);
+    void assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF, const std::vector<index_t>& distribution);
 
     void copyTable(int offset, int idOffset, int dofOffset, const NodeFile* in);
-    void gather(int* index, const NodeFile* in);
-    void gather_global(int* index, const NodeFile* in);
-    void scatter(int* index, const NodeFile* in);
+    void gather(const index_t* index, const NodeFile* in);
+    void gather_global(const index_t* index, const NodeFile* in);
+    void scatter(const index_t* index, const NodeFile* in);
 
     void setCoordinates(const escript::Data& newX);
     void setTags(const int newTag, const escript::Data& mask);
     inline void updateTagList();
 
-    std::pair<int,int> getDOFRange() const;
+    std::pair<index_t,index_t> getDOFRange() const;
 
 private:
-    std::pair<int,int> getGlobalIdRange() const;
-    std::pair<int,int> getGlobalDOFRange() const;
-    std::pair<int,int> getGlobalNodeIDIndexRange() const;
+    std::pair<index_t,index_t> getGlobalIdRange() const;
+    std::pair<index_t,index_t> getGlobalDOFRange() const;
+    std::pair<index_t,index_t> getGlobalNodeIDIndexRange() const;
     int prepareLabeling(const std::vector<short>& mask,
-                        std::vector<int>& buffer,
-                        std::vector<int>& distribution, bool useNodes);
+                        std::vector<index_t>& buffer,
+                        std::vector<index_t>& distribution, bool useNodes);
     void createDOFMappingAndCoupling(bool reduced);
 
     NodeMapping nodesMapping;
@@ -106,11 +106,11 @@ public:
     /// MPI information
     esysUtils::JMPI MPIInfo;
     /// number of nodes
-    int numNodes;
+    dim_t numNodes;
     /// number of spatial dimensions
     int numDim;
     /// Id[i] is the id number of node i. It needs to be unique.
-    int *Id;
+    index_t *Id;
     /// Tag[i] is the tag of node i.
     int *Tag;
     /// vector of tags which are actually used
@@ -118,17 +118,17 @@ public:
     /// globalDegreesOfFreedom[i] is the global degree of freedom assigned
     /// to node i. This index is used to consider periodic boundary conditions
     /// by assigning the same degreesOfFreedom to the same node.
-    int* globalDegreesOfFreedom;
+    index_t* globalDegreesOfFreedom;
     /// Coordinates[INDEX2(k,i,numDim)] is the k-th coordinate of node i
     double *Coordinates;
     /// assigns each local node a global unique Id in a dense labeling of
     /// reduced DOF. Value <0 indicates that the DOF is not used.
-    int *globalReducedDOFIndex;
+    index_t *globalReducedDOFIndex;
     /// assigns each local node a global unique Id in a dense labeling.
     /// Value <0 indicates that the DOF is not used
-    int *globalReducedNodesIndex;
+    index_t *globalReducedNodesIndex;
     /// assigns each local reduced node a global unique Id in a dense labeling
-    int *globalNodesIndex;
+    index_t *globalNodesIndex;
 
     paso::Distribution_ptr nodesDistribution;
     paso::Distribution_ptr reducedNodesDistribution;
@@ -139,9 +139,9 @@ public:
     paso::Connector_ptr reducedDegreesOfFreedomConnector;
   
     /// these are the packed versions of Id
-    int *reducedNodesId;        
-    int *degreesOfFreedomId;
-    int *reducedDegreesOfFreedomId;
+    index_t *reducedNodesId;        
+    index_t *degreesOfFreedomId;
+    index_t *reducedDegreesOfFreedomId;
 
     /// the status counts the updates done on the node coordinates.
     /// The value is increased by 1 when the node coordinates are updated.
@@ -152,102 +152,102 @@ public:
 // implementation of inline methods
 //
 
-inline int NodeFile::getFirstNode() const
+inline index_t NodeFile::getFirstNode() const
 {
     return nodesDistribution->getFirstComponent();
 }
 
-inline int NodeFile::getLastNode() const
+inline index_t NodeFile::getLastNode() const
 {
     return nodesDistribution->getLastComponent();
 }
 
-inline int NodeFile::getGlobalNumNodes() const
+inline index_t NodeFile::getGlobalNumNodes() const
 {
     return nodesDistribution->getGlobalNumComponents();
 }
 
-inline int* NodeFile::borrowGlobalNodesIndex() const
+inline index_t* NodeFile::borrowGlobalNodesIndex() const
 {
     return globalNodesIndex;
 }
 
-inline int NodeFile::getFirstReducedNode() const
+inline index_t NodeFile::getFirstReducedNode() const
 {
     return reducedNodesDistribution->getFirstComponent();
 }
 
-inline int NodeFile::getLastReducedNode() const
+inline index_t NodeFile::getLastReducedNode() const
 {
     return reducedNodesDistribution->getLastComponent();
 }
 
-inline int NodeFile::getGlobalNumReducedNodes() const
+inline dim_t NodeFile::getGlobalNumReducedNodes() const
 {
     return reducedNodesDistribution->getGlobalNumComponents();
 }
 
-inline int* NodeFile::borrowGlobalReducedNodesIndex() const
+inline index_t* NodeFile::borrowGlobalReducedNodesIndex() const
 {
     return globalReducedNodesIndex;
 }
 
-inline int NodeFile::getNumNodes() const
+inline dim_t NodeFile::getNumNodes() const
 {
     return numNodes;
 }
 
-inline int NodeFile::getNumReducedNodes() const
+inline dim_t NodeFile::getNumReducedNodes() const
 {
     return reducedNodesMapping.getNumTargets();
 }
 
-inline int NodeFile::getNumDegreesOfFreedom() const
+inline dim_t NodeFile::getNumDegreesOfFreedom() const
 {
     return degreesOfFreedomDistribution->getMyNumComponents();
 }
 
-inline int NodeFile::getNumReducedDegreesOfFreedom() const
+inline dim_t NodeFile::getNumReducedDegreesOfFreedom() const
 {
     return reducedDegreesOfFreedomDistribution->getMyNumComponents();
 }
 
-inline const std::vector<int>& NodeFile::borrowNodesTarget() const
+inline const std::vector<index_t>& NodeFile::borrowNodesTarget() const
 {
     return nodesMapping.map;
 }
 
-inline const std::vector<int>& NodeFile::borrowReducedNodesTarget() const
+inline const std::vector<index_t>& NodeFile::borrowReducedNodesTarget() const
 {
     return reducedNodesMapping.map;
 }
 
-inline const std::vector<int>& NodeFile::borrowDegreesOfFreedomTarget() const
+inline const std::vector<index_t>& NodeFile::borrowDegreesOfFreedomTarget() const
 {
     return degreesOfFreedomMapping.map;
 }
 
-inline const std::vector<int>& NodeFile::borrowReducedDegreesOfFreedomTarget() const
+inline const std::vector<index_t>& NodeFile::borrowReducedDegreesOfFreedomTarget() const
 {
     return reducedDegreesOfFreedomMapping.map;
 }
 
-inline const int* NodeFile::borrowTargetNodes() const
+inline const index_t* NodeFile::borrowTargetNodes() const
 {
     return &nodesMapping.target[0];
 }
 
-inline const int* NodeFile::borrowTargetReducedNodes() const
+inline const index_t* NodeFile::borrowTargetReducedNodes() const
 {
     return &reducedNodesMapping.target[0];
 }
 
-inline const int* NodeFile::borrowTargetDegreesOfFreedom() const
+inline const index_t* NodeFile::borrowTargetDegreesOfFreedom() const
 {
     return &degreesOfFreedomMapping.target[0];
 }
 
-inline const int* NodeFile::borrowTargetReducedDegreesOfFreedom() const
+inline const index_t* NodeFile::borrowTargetReducedDegreesOfFreedom() const
 {
     return &reducedDegreesOfFreedomMapping.target[0];
 }
diff --git a/finley/src/NodeMapping.h b/finley/src/NodeMapping.h
index 6b8a937..9d8bd37 100644
--- a/finley/src/NodeMapping.h
+++ b/finley/src/NodeMapping.h
@@ -37,33 +37,33 @@ struct NodeMapping {
     /// initializes a node mapping. The target array is copied and a reverse
     /// map created.
     /// theTarget[i]=unused means that no target is defined for FEM node i.
-    void assign(const std::vector<int>& theTarget, int unused)
+    void assign(const std::vector<index_t>& theTarget, index_t unused)
     {
         if (theTarget.empty())
             return;
 
-        std::pair<int,int> range(
+        std::pair<index_t,index_t> range(
             util::getFlaggedMinMaxInt(theTarget.size(), &theTarget[0], unused));
         if (range.first < 0) {
             setError(VALUE_ERROR, "NodeMapping: target has negative entry.");
             return;
         }
         // now we assume min(target)=0!
-        const int numTargets = range.first<=range.second ? range.second+1 : 0;
+        const dim_t numTargets = range.first<=range.second ? range.second+1 : 0;
         target.assign(theTarget.begin(), theTarget.end());
-        const int targetSize = target.size();
+        const index_t targetSize = target.size();
         map.assign(numTargets, -1);
 
 #pragma omp parallel
         {
 #pragma omp for
-            for (int i=0; i<targetSize; ++i) {
+            for (index_t i=0; i<targetSize; ++i) {
                 if (target[i] != unused)
                     map[target[i]]=i;
             }
             // sanity check
 #pragma omp for
-            for (int i=0; i<numTargets; ++i) {
+            for (index_t i=0; i<numTargets; ++i) {
                 if (map[i]==-1) {
                     setError(VALUE_ERROR, "NodeMapping: target does not define a continuous labeling.");
                 }
@@ -72,12 +72,12 @@ struct NodeMapping {
     }
 
     /// returns the number of target nodes (number of items in the map array)
-    int getNumTargets() const { return map.size(); }
+    dim_t getNumTargets() const { return map.size(); }
 
     /// target[i] defines the target of FEM node i=0,...,numNodes-1
-    std::vector<int> target;
+    std::vector<index_t> target;
     /// maps the target nodes back to the FEM nodes: target[map[i]]=i
-    std::vector<int> map;
+    std::vector<index_t> map;
 };
 
 } // namespace finley
diff --git a/finley/src/RectangularMesh.h b/finley/src/RectangularMesh.h
index 46c8e2d..c8c625f 100644
--- a/finley/src/RectangularMesh.h
+++ b/finley/src/RectangularMesh.h
@@ -28,10 +28,27 @@
 
 namespace finley {
 
-Mesh* RectangularMesh_Hex20(const int*, const double*, const bool*, int, int, bool, bool, bool, bool, esysUtils::JMPI& info);
-Mesh* RectangularMesh_Hex8(const int*, const double*, const bool*, int, int, bool, bool, bool, esysUtils::JMPI& info);
-Mesh* RectangularMesh_Rec8(const int*, const double*, const bool*, int, int, bool, bool, bool, bool, esysUtils::JMPI& info);
-Mesh* RectangularMesh_Rec4(const int*, const double*, const bool*, int, int, bool, bool, bool, esysUtils::JMPI& info);
+Mesh* RectangularMesh_Hex20(const dim_t* numElements, const double* length,
+                            const bool* periodic, int order, int reducedOrder,
+                            bool useElementsOnFace, bool useFullElementOrder,
+                            bool useMacroElements, bool optimize,
+                            esysUtils::JMPI& mpi_info);
+
+Mesh* RectangularMesh_Hex8(const dim_t* numElements, const double* length,
+                           const bool* periodic, int order, int reducedOrder,
+                           bool useElementsOnFace, bool useFullElementOrder,
+                           bool useMacroElements, esysUtils::JMPI& mpi_info);
+
+Mesh* RectangularMesh_Rec8(const dim_t* numElements, const double* length,
+                           const bool* periodic, int order, int reducedOrder,
+                           bool useElementsOnFace, bool useFullElementOrder,
+                           bool useMacroElements, bool optimize,
+                           esysUtils::JMPI& mpi_info);
+
+Mesh* RectangularMesh_Rec4(const dim_t* numElements, const double* length,
+                           const bool* periodic, int order, int reducedOrder,
+                           bool useElementsOnFace, bool useFullElementOrder,
+                           bool useMacroElements, esysUtils::JMPI& mpi_info);
 
 }
 
diff --git a/finley/src/Util.cpp b/finley/src/Util.cpp
index fac9e69..cd6981d 100644
--- a/finley/src/Util.cpp
+++ b/finley/src/Util.cpp
@@ -51,10 +51,10 @@ void sortValueAndIndex(ValueAndIndexList& array)
 
 /// gathers values into vector out from vector in using index:
 ///   out(1:numData, 1:len) := in(1:numData, index(1:len))
-void gather(int len, const int* index, int numData, const double* in, double* out)
+void gather(dim_t len, const index_t* index, dim_t numData, const double* in, double* out)
 {
-    for (int s=0; s<len; s++) {
-        for (int i=0; i<numData; i++) {
+    for (index_t s=0; s<len; s++) {
+        for (index_t i=0; i<numData; i++) {
             out[INDEX2(i,s,numData)] = in[INDEX2(i,index[s],numData)];
         }
     }
@@ -63,10 +63,11 @@ void gather(int len, const int* index, int numData, const double* in, double* ou
 /// adds a vector in into out using an index:
 ///   out(1:numData,index[p])+=in(1:numData,p) where
 ///   p={k=1...len, index[k]<upperBound}
-void addScatter(const int len, const int* index, const int numData, const double* in, double* out, const int upperBound)
+void addScatter(dim_t len, const index_t* index, dim_t numData,
+                const double* in, double* out, index_t upperBound)
 {
-    for (int s=0; s<len; s++) {
-        for (int i=0; i<numData; i++) {
+    for (index_t s=0; s<len; s++) {
+        for (index_t i=0; i<numData; i++) {
             if (index[s] < upperBound) {
                 out[INDEX2(i,index[s],numData)]+=in[INDEX2(i,s,numData)];
             }
@@ -236,16 +237,16 @@ void normalVector(int len, int dim, int dim1, const double* A, double* Normal)
 }
 
 /// calculates the minimum value from a dim X N integer array
-int getMinInt(int dim, int N, const int* values)
+index_t getMinInt(int dim, dim_t N, const index_t* values)
 {
-    int out = std::numeric_limits<int>::max();
+    index_t out = std::numeric_limits<index_t>::max();
     if (values && dim*N > 0) {
         out=values[0];
 #pragma omp parallel
         {
-            int out_local=out;
+            index_t out_local=out;
 #pragma omp for
-            for (int j=0; j<N; j++) {
+            for (index_t j=0; j<N; j++) {
                 for (int i=0; i<dim; i++)
                     out_local=std::min(out_local, values[INDEX2(i,j,dim)]);
             }
@@ -257,16 +258,16 @@ int getMinInt(int dim, int N, const int* values)
 }
 
 /// calculates the maximum value from a dim X N integer array
-int getMaxInt(int dim, int N, const int* values)
+index_t getMaxInt(int dim, dim_t N, const index_t* values)
 {
-    int out = std::numeric_limits<int>::min();
+    index_t out = std::numeric_limits<index_t>::min();
     if (values && dim*N > 0) {
         out=values[0];
 #pragma omp parallel
         {
-            int out_local=out;
+            index_t out_local=out;
 #pragma omp for
-            for (int j=0; j<N; j++) {
+            for (index_t j=0; j<N; j++) {
                 for (int i=0; i<dim; i++)
                     out_local=std::max(out_local, values[INDEX2(i,j,dim)]);
             }
@@ -277,18 +278,18 @@ int getMaxInt(int dim, int N, const int* values)
     return out;
 }
 
-std::pair<int,int> getMinMaxInt(int dim, int N, const int* values)
+std::pair<index_t,index_t> getMinMaxInt(int dim, dim_t N, const index_t* values)
 {
-    int vmin = std::numeric_limits<int>::max();
-    int vmax = std::numeric_limits<int>::min();
+    index_t vmin = std::numeric_limits<index_t>::max();
+    index_t vmax = std::numeric_limits<index_t>::min();
     if (values && dim*N > 0) {
         vmin = vmax = values[0];
 #pragma omp parallel
         {
-            int vmin_local=vmin;
-            int vmax_local=vmax;
+            index_t vmin_local=vmin;
+            index_t vmax_local=vmax;
 #pragma omp for
-            for (int j=0; j<N; j++) {
+            for (index_t j=0; j<N; j++) {
                 for (int i=0; i<dim; i++) {
                     vmin_local=std::min(vmin_local, values[INDEX2(i,j,dim)]);
                     vmax_local=std::max(vmax_local, values[INDEX2(i,j,dim)]);
@@ -301,23 +302,23 @@ std::pair<int,int> getMinMaxInt(int dim, int N, const int* values)
             }
         }
     }
-    return std::pair<int,int>(vmin,vmax);
+    return std::pair<index_t,index_t>(vmin,vmax);
 }
 
 /// calculates the minimum and maximum value from an integer array of length N
 /// disregarding the value 'ignore'
-std::pair<int,int> getFlaggedMinMaxInt(int N, const int* values, int ignore)
+std::pair<index_t,index_t> getFlaggedMinMaxInt(dim_t N, const index_t* values, index_t ignore)
 {
-    int vmin = std::numeric_limits<int>::max();
-    int vmax = std::numeric_limits<int>::min();
+    index_t vmin = std::numeric_limits<index_t>::max();
+    index_t vmax = std::numeric_limits<index_t>::min();
     if (values && N > 0) {
         vmin = vmax = values[0];
 #pragma omp parallel
         {
-            int vmin_local=vmin;
-            int vmax_local=vmax;
+            index_t vmin_local=vmin;
+            index_t vmax_local=vmax;
 #pragma omp for
-            for (int i=0; i<N; i++) {
+            for (index_t i=0; i<N; i++) {
                 if (values[i] != ignore) {
                     vmin_local=std::min(vmin_local, values[i]);
                     vmax_local=std::max(vmax_local, values[i]);
@@ -330,15 +331,15 @@ std::pair<int,int> getFlaggedMinMaxInt(int N, const int* values, int ignore)
             }
         }
     }
-    return std::pair<int,int>(vmin,vmax);
+    return std::pair<index_t,index_t>(vmin,vmax);
 }
 
 /// determines the indices of the positive entries in mask returning the
 /// length of index.
-std::vector<int> packMask(const std::vector<short>& mask)
+std::vector<index_t> packMask(const std::vector<short>& mask)
 {
-    std::vector<int> index;
-    for (int k=0; k<mask.size(); k++) {
+    std::vector<index_t> index;
+    for (index_t k=0; k<mask.size(); k++) {
         if (mask[k] >= 0) {
             index.push_back(k);
         }
diff --git a/finley/src/Util.h b/finley/src/Util.h
index c2ecd5a..4657e16 100644
--- a/finley/src/Util.h
+++ b/finley/src/Util.h
@@ -47,11 +47,11 @@ inline bool hasReducedIntegrationOrder(const escript::Data& in)
                 || fs == FINLEY_REDUCED_CONTACT_ELEMENTS_2);
 }
 
-void gather(int len, const int* index, int numData, const double* in,
+void gather(dim_t len, const index_t* index, dim_t numData, const double* in,
             double* out);
 
-void addScatter(int len, const int* index, int numData, const double* in,
-                double* out, int upperBound);
+void addScatter(dim_t len, const index_t* index, dim_t numData,
+                const double* in, double* out, index_t upperBound);
 
 void smallMatMult(int A1, int A2, double* A, int B2,
                   const std::vector<double>& B,
@@ -66,15 +66,15 @@ void invertSmallMat(int len, int dim, const double* A, double *invA,
 
 void normalVector(int len, int dim, int dim1, const double* A, double* Normal);
 
-int getMinInt(int dim, int N, const int* values);
+index_t getMinInt(int dim, dim_t N, const index_t* values);
 
-int getMaxInt(int dim, int N, const int* values);
+index_t getMaxInt(int dim, dim_t N, const index_t* values);
 
-std::pair<int,int> getMinMaxInt(int dim, int N, const int* values);
+std::pair<index_t,index_t> getMinMaxInt(int dim, dim_t N, const index_t* values);
 
-std::pair<int,int> getFlaggedMinMaxInt(int N, const int* values, int ignore);
+std::pair<index_t,index_t> getFlaggedMinMaxInt(dim_t N, const index_t* values, index_t ignore);
 
-std::vector<int> packMask(const std::vector<short>& mask);
+std::vector<index_t> packMask(const std::vector<short>& mask);
 
 void setValuesInUse(const int *values, const int numValues,
                     std::vector<int>& valuesInUse, esysUtils::JMPI& mpiinfo);
diff --git a/finley/src/generateReferenceElementList.py b/finley/src/generateReferenceElementList.py
index 9f7bdea..02592ca 100644
--- a/finley/src/generateReferenceElementList.py
+++ b/finley/src/generateReferenceElementList.py
@@ -15,6 +15,8 @@
 #
 #  this code generates the ReferenceElement_InfoList in ReferenceElements.cpp
 #
+from __future__ import print_function, division
+
 GEOBASE = {
 "Point": (1, 1, "Point", [0] ),
 "Line":  (2, 2, "Point", [0,2, 2,1]),
diff --git a/finley/test/python/FCT_benchmark.py b/finley/test/python/FCT_benchmark.py
index 66ea6d9..53ba0d7 100755
--- a/finley/test/python/FCT_benchmark.py
+++ b/finley/test/python/FCT_benchmark.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/FCT_test0.py b/finley/test/python/FCT_test0.py
index 0d33d0d..8e62b29 100644
--- a/finley/test/python/FCT_test0.py
+++ b/finley/test/python/FCT_test0.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -53,7 +54,7 @@ print("u0 =",u0)
 T_END=dt
 print("dt = ",dt)
 while t<T_END:
-    print("time step t=",t+dt)	
+    print("time step t=",t+dt)
     u=fc.getSolution(dt)
     saveVTK("u.%s.vtu"%(c+1,),u=u)
     print("u =",u)
diff --git a/finley/test/python/FCT_test1.py b/finley/test/python/FCT_test1.py
index aa2a9a4..09ddb64 100644
--- a/finley/test/python/FCT_test1.py
+++ b/finley/test/python/FCT_test1.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -122,7 +123,7 @@ print("QUALITY FCT: time = %s pi"%(t/pi),inf(u0),sup(u0),integrate(u0))
 #T_END=200*dt
 while t<T_END:
    
-    print("time step t=",t+dt)	
+    print("time step t=",t+dt)
     u=fc.getSolution(dt)
     print("QUALITY FCT: time = %s pi"%(t+dt/pi),inf(u),sup(u),integrate(u))
     saveVTK("u.%s.vtu"%(c+1,),u=u)
diff --git a/finley/test/python/FCT_test2.py b/finley/test/python/FCT_test2.py
index 834fc86..e972458 100644
--- a/finley/test/python/FCT_test2.py
+++ b/finley/test/python/FCT_test2.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -123,8 +123,8 @@ saveVTK("u.%s.vtu"%c,u=u0)
 fc.setInitialSolution(u0)
 t=T0
 while t<T_END:
-    print("time step t=",t+dt)	
-    u=fc.solve(dt)	
+    print("time step t=",t+dt)
+    u=fc.solve(dt)
     if TEST_SUPG:
         #========== supg tests ================
         nn=max(ceil(dt/dt_supg),1.)
diff --git a/finley/test/python/OutTest.py b/finley/test/python/OutTest.py
index d1cab6c..52d08c6 100644
--- a/finley/test/python/OutTest.py
+++ b/finley/test/python/OutTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/PoissonSolverTest.py b/finley/test/python/PoissonSolverTest.py
index 787fff4..bffe6d2 100644
--- a/finley/test/python/PoissonSolverTest.py
+++ b/finley/test/python/PoissonSolverTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/RT2D.py b/finley/test/python/RT2D.py
index f9b7c37..7bfc893 100644
--- a/finley/test/python/RT2D.py
+++ b/finley/test/python/RT2D.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 from esys.escript import *
 import esys.finley
 from esys.escript.models import StokesProblemCartesian
@@ -7,10 +25,10 @@ from esys.weipa import saveVTK
 
 
 #physical properties
-rho1 = 1000		#fluid density on bottom
-rho2 = 1010		#fluid density on top
-eta1 = 100.0		#fluid viscosity on bottom
-eta2 = 100.0		#fluid viscosity on top
+rho1 = 1000             #fluid density on bottom
+rho2 = 1010             #fluid density on top
+eta1 = 100.0            #fluid viscosity on bottom
+eta2 = 100.0            #fluid viscosity on top
 g=10.0
 
 #solver settings
diff --git a/finley/test/python/RecTest.py b/finley/test/python/RecTest.py
index a3c8948..8a68dba 100644
--- a/finley/test/python/RecTest.py
+++ b/finley/test/python/RecTest.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/axisymm-splitB.py b/finley/test/python/axisymm-splitB.py
index f250954..68cf120 100755
--- a/finley/test/python/axisymm-splitB.py
+++ b/finley/test/python/axisymm-splitB.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -90,7 +92,7 @@ while istep < nstep:
     print("time step :",istep," t = ",t)
     r=Function(dom).getX()[0]
     r_b=FunctionOnBoundary(dom).getX()[0]
-    print("	volume : ",integrate(r))
+    print("volume : ",integrate(r))
     #
     #  step 1:
     #
@@ -98,7 +100,7 @@ while istep < nstep:
     n_d=dom.getNormal()
     t_d=matrixmult(numpy.array([[0.,-1.],[1.,0]]),n_d)
     sigma_d=(sign(inner(t_d,U))*alpha_w*t_d-n_d)*Pen*clip(inner(n_d,U),0.)
-    print("	sigma_d =",inf(sigma_d),sup(sigma_d))
+    print("sigma_d =",inf(sigma_d),sup(sigma_d))
 
     momentumStep1.setValue(D=r*ro*kronecker(dom),
                            Y=r*ro*U+dt*r*[0.,-ro*g], 
@@ -134,8 +136,8 @@ while istep < nstep:
     #
     p+=dp         
     U=U_new
-    print("	U:",inf(U),sup(U))
-    print("	P:",inf(p),sup(p)) 
+    print("U:",inf(U),sup(U))
+    print("P:",inf(p),sup(p)) 
 
 
     p_pos=clip(p,small)
@@ -144,7 +146,7 @@ while istep < nstep:
     gamma=sqrt(2*((gg[0,0]-vol/3)**2+(gg[1,1]-vol/3)**2+(U[0]/r-vol/3)**2+(gg[1,0]+gg[0,1])**2/2))
     m=whereNegative(eta*gamma-alpha*p_pos) 
     eta_d=m*eta+(1.-m)*alpha*p_pos/(gamma+small)  
-    print("	viscosity =",inf(eta_d),sup(eta_d)) 
+    print("viscosity =",inf(eta_d),sup(eta_d)) 
     dev_stress=eta_d*(symmetric(gg)-2./3.*vol*kronecker(dom))
     #
     # step size control:
@@ -153,7 +155,7 @@ while istep < nstep:
     dt1=inf(dom.getSize()/(length(U)+small))
     dt2=inf(0.5*ro*(len**2)/eta_d)
     dt=dt1*dt2/(dt1+dt2)
-    print("	new step size = ",dt)
+    print("new step size = ",dt)
     #
     #  update geometry
     #
diff --git a/finley/test/python/blocktest.py b/finley/test/python/blocktest.py
index 4addb0a..7714328 100755
--- a/finley/test/python/blocktest.py
+++ b/finley/test/python/blocktest.py
@@ -25,6 +25,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/brick.py b/finley/test/python/brick.py
index 9338693..5dbb747 100644
--- a/finley/test/python/brick.py
+++ b/finley/test/python/brick.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/coalgas.py b/finley/test/python/coalgas.py
index 89075bf..4cd4da8 100755
--- a/finley/test/python/coalgas.py
+++ b/finley/test/python/coalgas.py
@@ -15,6 +15,9 @@
 """
 Gas in Coal Seam (fully coupled version)
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/convection.py b/finley/test/python/convection.py
index 573428e..2beebd6 100644
--- a/finley/test/python/convection.py
+++ b/finley/test/python/convection.py
@@ -18,6 +18,9 @@ this is a convection simulation over a domain [0,L] X [0,L] x [0,H]
 It is solved in dimensionless form
 
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/damage.py b/finley/test/python/damage.py
index 7b64803..e92c5f6 100644
--- a/finley/test/python/damage.py
+++ b/finley/test/python/damage.py
@@ -15,6 +15,9 @@
 """
 Damage mechanics 
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -39,14 +42,14 @@ NE_L=int(ceil(L*NE_H/H))
 CASE=1
 
 #Boundary conditions: 
-#	axial loading: they applied a stress inversely proportional to the acoustic emission rate. We could have the axial forcing a stress or velocity inversely proportional to dalpha/dt (only when it is positive, and with the applied forcing rate going to zero when damage accumulation rate goes to a value we can determine in a test run with constant forcing). If this is to challenging or time consuming we could have a constant axial strain rate with very short time steps (at least when alpha [...]
+#   axial loading: they applied a stress inversely proportional to the acoustic emission rate. We could have the axial forcing a stress or velocity inversely proportional to dalpha/dt (only when it is positive, and with the applied forcing rate going to zero when damage accumulation rate goes to a value we can determine in a test run with constant forcing). If this is to challenging or time consuming we could have a constant axial strain rate with very short time steps (at least when alp [...]
 
 #Variables calculated and written to an output file:
-#	time
-#	differential stress (S_33-S_11)
-#	deviatoric stress (S_33 - p)
-#	Axial and transverse strain
-#	damage and damage rate
+#   time
+#   differential stress (S_33-S_11)
+#   deviatoric stress (S_33 - p)
+#   Axial and transverse strain
+#   damage and damage rate
 
 
 T_END=60000000.0*U.sec                       # end time
diff --git a/finley/test/python/generate_dumps.py b/finley/test/python/generate_dumps.py
index d2ef522..fd23d48 100644
--- a/finley/test/python/generate_dumps.py
+++ b/finley/test/python/generate_dumps.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/generate_meshes.py b/finley/test/python/generate_meshes.py
index 3aacb1c..d3308cd 100644
--- a/finley/test/python/generate_meshes.py
+++ b/finley/test/python/generate_meshes.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/linearElastic.py b/finley/test/python/linearElastic.py
index 205994e..16d535f 100755
--- a/finley/test/python/linearElastic.py
+++ b/finley/test/python/linearElastic.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/localization.py b/finley/test/python/localization.py
index 4d2433b..8290113 100644
--- a/finley/test/python/localization.py
+++ b/finley/test/python/localization.py
@@ -18,6 +18,9 @@ with a plastic layer above a viscous layer of thickness H_VISC.
 The yield condition is perturbed along a line at the boundary between
 viscous and plastic layer to trigger localization.
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/lumping_advection_test.py b/finley/test/python/lumping_advection_test.py
index e0d0c72..a9505fe 100755
--- a/finley/test/python/lumping_advection_test.py
+++ b/finley/test/python/lumping_advection_test.py
@@ -1,8 +1,4 @@
-"""
-
-   a simple comparison for row-sum and HRZ lumping in case of the advection equation
 
-"""
 
 ##############################################################################
 #
@@ -19,6 +15,14 @@
 #
 ##############################################################################
 
+"""
+
+   a simple comparison for row-sum and HRZ lumping in case of the advection equation
+
+"""
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/lumping_wave_test.py b/finley/test/python/lumping_wave_test.py
index 7817c74..4a9c0b7 100755
--- a/finley/test/python/lumping_wave_test.py
+++ b/finley/test/python/lumping_wave_test.py
@@ -1,8 +1,3 @@
-"""
-
-   a simple comparison for row-sum and HRZ lumping in case of the wave equation
-
-"""
 
 ##############################################################################
 #
@@ -19,6 +14,12 @@
 #
 ##############################################################################
 
+"""
+   a simple comparison for row-sum and HRZ lumping in case of the wave equation
+"""
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/rayleigh_taylor_instabilty.py b/finley/test/python/rayleigh_taylor_instabilty.py
index 42fad15..c7133a6 100644
--- a/finley/test/python/rayleigh_taylor_instabilty.py
+++ b/finley/test/python/rayleigh_taylor_instabilty.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/rectangle.py b/finley/test/python/rectangle.py
index 41a5075..681152e 100644
--- a/finley/test/python/rectangle.py
+++ b/finley/test/python/rectangle.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_amg.py b/finley/test/python/run_amg.py
index ac2b80c..6efbfae 100644
--- a/finley/test/python/run_amg.py
+++ b/finley/test/python/run_amg.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_darcy.py b/finley/test/python/run_darcy.py
index e042041..e03814a 100644
--- a/finley/test/python/run_darcy.py
+++ b/finley/test/python/run_darcy.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_escriptOnFinley.py b/finley/test/python/run_escriptOnFinley.py
index cdddc6e..6b7b0a6 100644
--- a/finley/test/python/run_escriptOnFinley.py
+++ b/finley/test/python/run_escriptOnFinley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -272,7 +274,7 @@ class Test_DiracOnFinley(unittest.TestCase):
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
-    v.setTaggedValue(500,(-100,-100))	# non-existant tag
+    v.setTaggedValue(500,(-100,-100))   # non-existant tag
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
@@ -300,7 +302,7 @@ class Test_DiracOnFinley(unittest.TestCase):
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
       self.assertEquals(inf(v[2]),-500)
-    v.setTaggedValue(500,(-100,-100, -100))	# non-existant tag
+    v.setTaggedValue(500,(-100,-100, -100))     # non-existant tag
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
@@ -328,7 +330,7 @@ class Test_DiracOnFinley(unittest.TestCase):
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
-    v.setTaggedValue(500,(-100,-100))	# non-existant tag
+    v.setTaggedValue(500,(-100,-100))   # non-existant tag
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
@@ -357,7 +359,7 @@ class Test_DiracOnFinley(unittest.TestCase):
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
       self.assertEquals(inf(v[2]), -0.5)
-    v.setTaggedValue(500,(-100,-100, -100))	# non-existant tag
+    v.setTaggedValue(500,(-100,-100, -100))     # non-existant tag
     if mpisize==1:
       self.assertEquals(inf(v[0]), -10)
       self.assertEquals(inf(v[1]), 0.5)
diff --git a/finley/test/python/run_generators.py b/finley/test/python/run_generators.py
index 2671ff5..980530a 100644
--- a/finley/test/python/run_generators.py
+++ b/finley/test/python/run_generators.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_inputOutput.py b/finley/test/python/run_inputOutput.py
index 29555cb..65e5ac3 100644
--- a/finley/test/python/run_inputOutput.py
+++ b/finley/test/python/run_inputOutput.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinley1_2D1.py b/finley/test/python/run_linearPDEsOnFinley1_2D1.py
index b1a1a31..1aef1ef 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_2D1.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_2D1.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinley1_2D2.py b/finley/test/python/run_linearPDEsOnFinley1_2D2.py
index 4b83102..8113ebd 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_2D2.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_2D2.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D1.py b/finley/test/python/run_linearPDEsOnFinley1_3D1.py
index 6626d3b..eb1a0f4 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D1.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D1.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part1.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part1.py
index d2e3d57..18b92c1 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part1.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part1.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,14 +31,9 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
-from test_assemblage import Test_assemblage_2Do1, Test_assemblage_2Do2, Test_assemblage_3Do1, Test_assemblage_3Do2, \
-                            Test_assemblage_2Do1_Contact,Test_assemblage_2Do2_Contact, Test_assemblage_3Do1_Contact, Test_assemblage_3Do2_Contact
-from test_pdetools import Test_pdetools, Test_pdetools_noLumping
+from test_linearPDEs import Test_LinearPDE
 from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
-
+from esys.finley import Brick
 
 try:
      FINLEY_TEST_DATA=os.environ['FINLEY_TEST_DATA']
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part2.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part2.py
index a179998..8a05092 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part2.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part2.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,15 +31,9 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-from test_linearPDEs import Test_Poisson,Test_LinearPDE, \
-    Test_LinearPDE_noLumping, Test_TransportPDE
-from test_assemblage import Test_assemblage_2Do1, Test_assemblage_2Do2, \
-    Test_assemblage_3Do1, Test_assemblage_3Do2, Test_assemblage_2Do1_Contact, \
-    Test_assemblage_2Do2_Contact, Test_assemblage_3Do1_Contact, \
-    Test_assemblage_3Do2_Contact
-from test_pdetools import Test_pdetools, Test_pdetools_noLumping
+from test_pdetools import Test_pdetools
 from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
+from esys.finley import Brick
 
 
 try:
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-1.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-1.py
index ef201ec..35900a6 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-1.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-1.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,12 +31,8 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-#from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
 from test_assemblage_3Do2 import Test_assemblage_3Do2
-#from test_pdetools import Test_pdetools, Test_pdetools_noLumping
-#from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
+from esys.finley import Brick
 
 try:
      FINLEY_TEST_DATA=os.environ['FINLEY_TEST_DATA']
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-2.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-2.py
index f4a52a3..ef0ac2c 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-2.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-2.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,12 +31,8 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-#from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
 from test_assemblage_3Do2 import Test_assemblage_3Do2_cont
-#from test_pdetools import Test_pdetools, Test_pdetools_noLumping
-#from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
+from esys.finley import Brick
 
 
 try:
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-3.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-3.py
index 8a566d9..b3fd53e 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-3.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-3.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,13 +31,8 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-#from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
 from test_assemblage_3Do2 import Test_assemblage_3Do2_cont2
-#from test_pdetools import Test_pdetools, Test_pdetools_noLumping
-#from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
-
+from esys.finley import Brick
 
 try:
      FINLEY_TEST_DATA=os.environ['FINLEY_TEST_DATA']
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-4.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-4.py
index b5a92b1..5310c7a 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-4.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part3-4.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,12 +31,8 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-#from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
 from test_assemblage_3Do2 import Test_assemblage_3Do2_cont3 
-#from test_pdetools import Test_pdetools, Test_pdetools_noLumping
-#from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
+from esys.finley import Brick
 
 
 try:
diff --git a/finley/test/python/run_linearPDEsOnFinley1_3D2_part4.py b/finley/test/python/run_linearPDEsOnFinley1_3D2_part4.py
index 866681e..7f486c2 100644
--- a/finley/test/python/run_linearPDEsOnFinley1_3D2_part4.py
+++ b/finley/test/python/run_linearPDEsOnFinley1_3D2_part4.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
@@ -20,15 +22,7 @@ http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
 """
-Test suite for the linearPDE  and pdetools test on finley
-
-:remark:
-
-:var __author__: name of author
-:var __licence__: licence agreement
-:var __url__: url entry point on documentation
-:var __version__: version
-:var __date__: date of the version
+Test suite for the linearPDE and pdetools test on finley
 """
 
 __author__="Lutz Gross, l.gross at uq.edu.au"
@@ -37,13 +31,9 @@ import os
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
-from test_linearPDEs import Test_Poisson,Test_LinearPDE, Test_LinearPDE_noLumping, Test_TransportPDE
-from test_assemblage import Test_assemblage_2Do1, Test_assemblage_2Do2, Test_assemblage_3Do1, Test_assemblage_3Do2, \
-                            Test_assemblage_2Do1_Contact,Test_assemblage_2Do2_Contact, Test_assemblage_3Do1_Contact, Test_assemblage_3Do2_Contact
-from test_pdetools import Test_pdetools, Test_pdetools_noLumping
+from test_linearPDEs import Test_TransportPDE
 from esys.escript import *
-from esys.finley import Rectangle,Brick,JoinFaces, ReadMesh
-import sys
+from esys.finley import Brick
 
 
 try:
diff --git a/finley/test/python/run_linearPDEsOnFinley2.py b/finley/test/python/run_linearPDEsOnFinley2.py
index e95d177..c30d1eb 100644
--- a/finley/test/python/run_linearPDEsOnFinley2.py
+++ b/finley/test/python/run_linearPDEsOnFinley2.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinley3.py b/finley/test/python/run_linearPDEsOnFinley3.py
index 489fff4..0bf705e 100644
--- a/finley/test/python/run_linearPDEsOnFinley3.py
+++ b/finley/test/python/run_linearPDEsOnFinley3.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_linearPDEsOnFinleyMacro.py b/finley/test/python/run_linearPDEsOnFinleyMacro.py
index 680277f..8e0bb9b 100644
--- a/finley/test/python/run_linearPDEsOnFinleyMacro.py
+++ b/finley/test/python/run_linearPDEsOnFinleyMacro.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_models.py b/finley/test/python/run_models.py
index df14aa8..afc0914 100644
--- a/finley/test/python/run_models.py
+++ b/finley/test/python/run_models.py
@@ -14,6 +14,7 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -25,19 +26,15 @@ __url__="https://launchpad.net/escript-finley"
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
       
-
-
-VERBOSE=False  and True
+VERBOSE = False
 
 from esys.escript import *
-from esys.escript.models import StokesProblemCartesian, PowerLaw, IncompressibleIsotropicFlowCartesian, FaultSystem, DarcyFlow
+from esys.escript.models import StokesProblemCartesian, PowerLaw, IncompressibleIsotropicFlowCartesian, FaultSystem
 from esys.escript.models import Mountains
 from esys.finley import Rectangle, Brick
 
 from math import pi
-import numpy
-import sys
-import os
+import numpy, os, sys, tempfile
 #======================================================================
 try:
      FINLEY_WORKDIR=os.environ['FINLEY_WORKDIR']
@@ -1459,7 +1456,7 @@ class Test_FaultSystem(unittest.TestCase):
       s,d=f.getSideAndDistance([5.,12.,-4], tag=2)
       self.assertTrue( s<0, "wrong side.")
       self.assertTrue( abs(d-2.*0.70710678118654757)<self.EPS, "wrong distance.")
- 
+
 if __name__ == '__main__':
     run_tests(__name__, exit_on_failure=True)
 
diff --git a/finley/test/python/run_nlpde2dOnFinley.py b/finley/test/python/run_nlpde2dOnFinley.py
index be96595..4002d21 100644
--- a/finley/test/python/run_nlpde2dOnFinley.py
+++ b/finley/test/python/run_nlpde2dOnFinley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_nlpde3dOnFinley.py b/finley/test/python/run_nlpde3dOnFinley.py
index 277b409..00c84b3 100644
--- a/finley/test/python/run_nlpde3dOnFinley.py
+++ b/finley/test/python/run_nlpde3dOnFinley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/finley/test/python/run_simplesolve.py b/finley/test/python/run_simplesolve.py
index 95a6d78..70975e6 100644
--- a/finley/test/python/run_simplesolve.py
+++ b/finley/test/python/run_simplesolve.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/run_splitworldOnFinley.py b/finley/test/python/run_splitworldOnFinley.py
index 095f581..1c14474 100644
--- a/finley/test/python/run_splitworldOnFinley.py
+++ b/finley/test/python/run_splitworldOnFinley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -25,7 +27,7 @@ import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
 from esys.escript import *
 from esys.finley import Rectangle, Brick, ReadMesh, ReadGmsh
-from test_splitworld import Test_SplitWorld
+from test_splitworld import Test_SplitWorld, sw_testing
 
 
 mpisize=getMPISizeWorld()
@@ -37,6 +39,30 @@ class Test_SplitOnFinley(Test_SplitWorld):
     
   def tearDown(self):
     del self.domainpars
+    
+class Test_finley_sw_2D(sw_testing):
+    def setUp(self):
+        from esys.finley import Rectangle
+        self.domain_ctr=Rectangle
+        self.domain_vec=(6,6)
+        self.domain_dict={}
+
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+
+
+class Test_finley_sw_3D(sw_testing):
+    def setUp(self):
+        from esys.finley import Brick
+        self.domain_ctr=Brick
+        self.domain_vec=(6,6,6)
+        self.domain_dict={}
+        
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+    
 
 
 
diff --git a/finley/test/python/run_utilOnFinley.py b/finley/test/python/run_utilOnFinley.py
index dfb2b52..3acc164 100644
--- a/finley/test/python/run_utilOnFinley.py
+++ b/finley/test/python/run_utilOnFinley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -47,6 +49,7 @@ except KeyError:
 
 FINLEY_TEST_MESH_PATH=os.path.join(FINLEY_TEST_DATA,"data_meshes")
 
+FINLEY_MERGE_ERROR = "FinleyAdapterException: Mesh_merge: more than 1 processor is not supported yet."
 
 NE=4 # number elements, must be even
 
@@ -158,98 +161,154 @@ class Test_Util_SpatialFunctionsOnFinleyHex3DMacro(Test_Util_SpatialFunctions_no
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex2DOrder1withContact(Test_Util_SpatialFunctions_noGradOnBoundary):
     def setUp(self):
         self.order=1
-        d1 = Rectangle(n0=NE/2+1,n1=NE,l0=0.5,order=1,useElementsOnFace=0)
-        d2 = Rectangle(n0=NE/2,n1=NE,l0=0.5,order=1,useElementsOnFace=0)
+        d1 = Rectangle(n0=NE//2+1,n1=NE,l0=0.5,order=1,useElementsOnFace=0)
+        d2 = Rectangle(n0=NE//2,n1=NE,l0=0.5,order=1,useElementsOnFace=0)
         d2.setX(d2.getX()+[0.5,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex2DOrder2withContact(Test_Util_SpatialFunctions_noGradOnBoundary):
     def setUp(self):
         self.order=2
-        d1 = Rectangle(n0=NE/2,n1=NE,l0=0.5,order=2,useElementsOnFace=0)
-        d2 = Rectangle(n0=NE/2,n1=NE,l0=0.5,order=2,useElementsOnFace=0)
+        d1 = Rectangle(n0=NE//2,n1=NE,l0=0.5,order=2,useElementsOnFace=0)
+        d2 = Rectangle(n0=NE//2,n1=NE,l0=0.5,order=2,useElementsOnFace=0)
         d2.setX(d2.getX()+[0.5,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex3DOrder1withContact(Test_Util_SpatialFunctions_noGradOnBoundary):
     def setUp(self):
         self.order=1
-        d1 = Brick(n0=NE/2+1,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=0)
-        d2 = Brick(n0=NE/2,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=0)
+        d1 = Brick(n0=NE//2+1,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=0)
+        d2 = Brick(n0=NE//2,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=0)
         d2.setX(d2.getX()+[0.5,0.,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex3DOrder2withContact(Test_Util_SpatialFunctions_noGradOnBoundary):
     def setUp(self):
         self.order=2
-        d1 = Brick(n0=NE/2+1,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=0)
-        d2 = Brick(n0=NE/2,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=0)
+        d1 = Brick(n0=NE//2+1,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=0)
+        d2 = Brick(n0=NE//2,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=0)
         d2.setX(d2.getX()+[0.5,0.,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex2DOrder1useElementsOnFacewithContact(Test_Util_SpatialFunctions):
     def setUp(self):
         self.order=1
-        d1 = Rectangle(n0=NE/2+1,n1=NE,l0=0.5,order=1,useElementsOnFace=True)
-        d2 = Rectangle(n0=NE/2,n1=NE,l0=0.5,order=1,useElementsOnFace=True)
+        d1 = Rectangle(n0=NE//2+1,n1=NE,l0=0.5,order=1,useElementsOnFace=True)
+        d2 = Rectangle(n0=NE//2,n1=NE,l0=0.5,order=1,useElementsOnFace=True)
         d2.setX(d2.getX()+[0.5,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex2DOrder2useElementsOnFacewithContact(Test_Util_SpatialFunctions):
     def setUp(self):
         self.order=2
-        d1 = Rectangle(n0=NE/2+1,n1=NE,l0=0.5,order=2,useElementsOnFace=True)
-        d2 = Rectangle(n0=NE/2,n1=NE,l0=0.5,order=2,useElementsOnFace=True)
+        d1 = Rectangle(n0=NE//2+1,n1=NE,l0=0.5,order=2,useElementsOnFace=True)
+        d2 = Rectangle(n0=NE//2,n1=NE,l0=0.5,order=2,useElementsOnFace=True)
         d2.setX(d2.getX()+[0.5,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex3DOrder1useElementsOnFacewithContact(Test_Util_SpatialFunctions):
     def setUp(self):
         self.order=1
-        d1 = Brick(n0=NE/2,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=True)
-        d2 = Brick(n0=NE/2+1,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=True)
+        d1 = Brick(n0=NE//2,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=True)
+        d2 = Brick(n0=NE//2+1,n1=NE,n2=NE,l0=0.5,order=1,useElementsOnFace=True)
         d2.setX(d2.getX()+[0.5,0.,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
 
- at unittest.skip("Test never added to test list")
 class Test_Util_SpatialFunctionsOnFinleyHex3DOrder2useElementsOnFacewithContact(Test_Util_SpatialFunctions):
     def setUp(self):
         self.order=2
-        d1 = Brick(n0=NE/2,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=True)
-        d2 = Brick(n0=NE/2+1,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=True)
+        d1 = Brick(n0=NE//2,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=True)
+        d2 = Brick(n0=NE//2+1,n1=NE,n2=NE,l0=0.5,order=2,useElementsOnFace=True)
         d2.setX(d2.getX()+[0.5,0.,0.])
-        self.domain = JoinFaces([d1,d2],optimize=False)
+        if getMPISizeWorld() > 1:
+            with self.assertRaises(RuntimeError) as pkg:
+                self.domain = JoinFaces([d1,d2],optimize=False)
+            e = pkg.exception
+            if FINLEY_MERGE_ERROR not in str(e):
+                raise e
+            raise unittest.SkipTest(FINLEY_MERGE_ERROR)
+        else:
+            self.domain = JoinFaces([d1,d2],optimize=False)
     def tearDown(self):
         del self.order
         del self.domain
diff --git a/finley/test/python/run_visualization_interface.py b/finley/test/python/run_visualization_interface.py
index 387e031..b2c6f59 100644
--- a/finley/test/python/run_visualization_interface.py
+++ b/finley/test/python/run_visualization_interface.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/runcoalgas.py b/finley/test/python/runcoalgas.py
index b2aca86..40c20ed 100755
--- a/finley/test/python/runcoalgas.py
+++ b/finley/test/python/runcoalgas.py
@@ -15,6 +15,9 @@
 """
 Coal Seam gasL ECLIPSE test case
 """
+
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/seismic_wave.py b/finley/test/python/seismic_wave.py
index 8d999b6..8fbd9cb 100644
--- a/finley/test/python/seismic_wave.py
+++ b/finley/test/python/seismic_wave.py
@@ -13,7 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
+
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -155,7 +156,7 @@ def getDomain():
     global netotal
     
     v_p={}
-    for tag in list(rho_tab.keys()):
+    for tag in sorted(rho_tab.keys()):
        v_p[tag]=sqrt((2*mu_tab[tag]+lmbd_tab[tag])/rho_tab[tag])
     v_p_ref=min(v_p.values())
     print("velocities: bedrock = %s, sand = %s, water =%s, absorber =%s, reference =%s"%(v_p[bedrock],v_p[sand],v_p[water],v_p[absorber],v_p_ref))
@@ -283,7 +284,7 @@ def getMaterialProperties(dom):
    lmbd=Scalar(lmbd_tab[bedrock],Function(dom))
    tags=Scalar(bedrock,Function(dom))
    
-   for tag in list(rho_tab.keys()):
+   for tag in sorted(rho_tab.keys()):
       rho.setTaggedValue(tag,rho_tab[tag])
       eta.setTaggedValue(tag,eta_tab[tag])
       mu.setTaggedValue(tag,mu_tab[tag])
diff --git a/finley/test/python/slip_stress_mesh_old.py b/finley/test/python/slip_stress_mesh_old.py
index 940b1b1..1ef10fe 100644
--- a/finley/test/python/slip_stress_mesh_old.py
+++ b/finley/test/python/slip_stress_mesh_old.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/slip_stress_old.py b/finley/test/python/slip_stress_old.py
index 9c7b56d..3e4c829 100644
--- a/finley/test/python/slip_stress_old.py
+++ b/finley/test/python/slip_stress_old.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/stokes_problems.py b/finley/test/python/stokes_problems.py
index 4848e59..db90375 100644
--- a/finley/test/python/stokes_problems.py
+++ b/finley/test/python/stokes_problems.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/subduction1.py b/finley/test/python/subduction1.py
index 5cf4f60..f48460c 100644
--- a/finley/test/python/subduction1.py
+++ b/finley/test/python/subduction1.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/subduction1_gen.py b/finley/test/python/subduction1_gen.py
index f404d08..1870fcf 100644
--- a/finley/test/python/subduction1_gen.py
+++ b/finley/test/python/subduction1_gen.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/finley/test/python/time_chunks.py b/finley/test/python/time_chunks.py
index 6d5a872..21b1784 100644
--- a/finley/test/python/time_chunks.py
+++ b/finley/test/python/time_chunks.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/finley/test/python/tp.py b/finley/test/python/tp.py
index 926c565..1ac6b10 100644
--- a/finley/test/python/tp.py
+++ b/finley/test/python/tp.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/__init__.py b/modellib/py_src/__init__.py
index e69de29..f4da971 100644
--- a/modellib/py_src/__init__.py
+++ b/modellib/py_src/__init__.py
@@ -0,0 +1,18 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+
+from __future__ import division, print_function
diff --git a/modellib/py_src/flow.py b/modellib/py_src/flow.py
index ee4553e..a641041 100644
--- a/modellib/py_src/flow.py
+++ b/modellib/py_src/flow.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/geometry.py b/modellib/py_src/geometry.py
index ad50c2f..77cc2a0 100644
--- a/modellib/py_src/geometry.py
+++ b/modellib/py_src/geometry.py
@@ -1,4 +1,4 @@
-from __future__ import print_function
+
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +14,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/input.py b/modellib/py_src/input.py
index 75db419..397107f 100644
--- a/modellib/py_src/input.py
+++ b/modellib/py_src/input.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/materials.py b/modellib/py_src/materials.py
index 6e54120..be1571d 100644
--- a/modellib/py_src/materials.py
+++ b/modellib/py_src/materials.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/mechanics.py b/modellib/py_src/mechanics.py
index e729f0d..c077413 100644
--- a/modellib/py_src/mechanics.py
+++ b/modellib/py_src/mechanics.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/probe.py b/modellib/py_src/probe.py
index 1767e33..5a97c06 100644
--- a/modellib/py_src/probe.py
+++ b/modellib/py_src/probe.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/temperature.py b/modellib/py_src/temperature.py
index 51ec3cd..da5efea 100644
--- a/modellib/py_src/temperature.py
+++ b/modellib/py_src/temperature.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/py_src/visualization.py b/modellib/py_src/visualization.py
index a631fb4..ecb4618 100644
--- a/modellib/py_src/visualization.py
+++ b/modellib/py_src/visualization.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/test/python/drucker_prager.py b/modellib/test/python/drucker_prager.py
index d041961..69c49a7 100644
--- a/modellib/test/python/drucker_prager.py
+++ b/modellib/test/python/drucker_prager.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/modellib/test/python/run_convection.py b/modellib/test/python/run_convection.py
index f1fa172..160494a 100644
--- a/modellib/test/python/run_convection.py
+++ b/modellib/test/python/run_convection.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -13,6 +12,7 @@ from __future__ import print_function
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -51,6 +51,7 @@ except ImportError:
 
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
+from esys.escript import getEscriptParamInt
 from esys.escript.modelframe import Link,Simulation
 from esys.modellib.input import Sequencer,InterpolateOverBox,GaussianProfile,LinearCombination
 from esys.modellib.flow import SteadyIncompressibleFlow
@@ -148,6 +149,7 @@ def run(dom, stream):
     s.writeXML(stream)
     s.run()
 
+ at unittest.skipIf(not getEscriptParamInt("PASO_DIRECT"), "Direct solver not available")
 class Test_Convection(unittest.TestCase):
     def setUp(self):
         import sys
diff --git a/modellib/test/python/run_domainreaders.py b/modellib/test/python/run_domainreaders.py
index 0932358..00c57f2 100644
--- a/modellib/test/python/run_domainreaders.py
+++ b/modellib/test/python/run_domainreaders.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,7 +13,7 @@ from __future__ import print_function
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/modellib/test/python/run_flow.py b/modellib/test/python/run_flow.py
index e908a78..3bed69a 100644
--- a/modellib/test/python/run_flow.py
+++ b/modellib/test/python/run_flow.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -14,6 +13,8 @@ from __future__ import print_function
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -38,6 +39,7 @@ else:
 import esys.escriptcore.utestselect as unittest
 from esys.escriptcore.testing import *
 from esys.escript.modelframe import Link,Simulation
+from esys.escript import getEscriptParamInt
 from esys.modellib.input import Sequencer
 from esys.modellib.probe import Probe,EvaluateExpression
 from esys.modellib.flow import SteadyIncompressibleFlow
@@ -49,6 +51,8 @@ try:
 except ImportError:
     HAVE_FINLEY = False
 
+have_direct=getEscriptParamInt("PASO_DIRECT")
+
 #Link() behaves badly inside a TestCase class
 def run(dom, stream):
     constraints=VectorConstrainerOverBox()
@@ -95,6 +99,7 @@ class Test_RunFlow(unittest.TestCase):
         sys.stdout = self.old
 
     @unittest.skipIf(not HAVE_FINLEY, "Finley module not available")
+    @unittest.skipIf(not have_direct, "Direct solver not available")
     def test_order2(self):
         dom=RectangularDomain()
         dom.order=2
diff --git a/modellib/test/python/run_temp.py b/modellib/test/python/run_temp.py
index 75f500d..35b8df4 100644
--- a/modellib/test/python/run_temp.py
+++ b/modellib/test/python/run_temp.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/paso/src/Options.cpp b/paso/src/Options.cpp
index 6b88612..3b5a693 100644
--- a/paso/src/Options.cpp
+++ b/paso/src/Options.cpp
@@ -344,6 +344,12 @@ int Options::getPackage(int solver, int pack, bool symmetry,
 #elif defined PASTIX
                     out = PASO_PASTIX
 #endif
+                } else{
+#if defined MKL
+                    Esys_setError(VALUE_ERROR, "MKL does not currently support MPI");
+#elif defined USE_UMFPACK
+                    Esys_setError(VALUE_ERROR, "UMFPACK does not currently support MPI");
+#endif
                 }
             }
             break;
diff --git a/paso/src/ReactiveSolver.cpp b/paso/src/ReactiveSolver.cpp
index c441e10..a26e562 100644
--- a/paso/src/ReactiveSolver.cpp
+++ b/paso/src/ReactiveSolver.cpp
@@ -57,7 +57,7 @@ err_t ReactiveSolver::solve(double* u, double* u_old, const double* source,
                 const double F_i = source[i];
                 const double e_i = exp(x_i);
                 double u_i = e_i*u_old[i];
-                if (abs(x_i) > EXP_LIM_MIN) {
+                if (std::abs(x_i) > EXP_LIM_MIN) {
                     u_i += F_i/d_ii*(e_i-1.);
                 } else {
                     // second order approximation of (exp(x_i)-1)/x_i
diff --git a/pasowrap/py_src/__init__.py b/pasowrap/py_src/__init__.py
index 01e7b78..9edb172 100644
--- a/pasowrap/py_src/__init__.py
+++ b/pasowrap/py_src/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pasowrap/py_src/pasowrap.py b/pasowrap/py_src/pasowrap.py
index 1c835ab..9b36a4a 100644
--- a/pasowrap/py_src/pasowrap.py
+++ b/pasowrap/py_src/pasowrap.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2011-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -21,5 +23,5 @@ __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
-import esys.escript		# This is just to ensure required libraries are loaded
+import esys.escript     # This is just to ensure required libraries are loaded
 from .pasowrapcpp import *
diff --git a/pasowrap/src/SystemMatrixAdapter.cpp b/pasowrap/src/SystemMatrixAdapter.cpp
index 02f7ec6..dfbe91e 100644
--- a/pasowrap/src/SystemMatrixAdapter.cpp
+++ b/pasowrap/src/SystemMatrixAdapter.cpp
@@ -277,6 +277,8 @@ void SystemMatrixAdapter::setToSolution(escript::Data& out, escript::Data& in,
    }
    out.expand();
    in.expand();
+   out.requireWrite();
+   in.requireWrite();
    double* out_dp=out.getSampleDataRW(0);        
    double* in_dp=in.getSampleDataRW(0);                
    paso::solve(m_system_matrix, out_dp, in_dp, &paso_options);
diff --git a/pasowrap/src/TransportProblemAdapter.cpp b/pasowrap/src/TransportProblemAdapter.cpp
index c80dbf5..e6caabf 100644
--- a/pasowrap/src/TransportProblemAdapter.cpp
+++ b/pasowrap/src/TransportProblemAdapter.cpp
@@ -65,6 +65,7 @@ void TransportProblemAdapter::setToSolution(escript::Data& out,
     u0.expand();
     out.requireWrite();
     source.requireWrite();
+    u0.requireWrite();
     double* out_dp = out.getSampleDataRW(0);
     double* u0_dp = u0.getSampleDataRW(0);
     double* source_dp = source.getSampleDataRW(0);
diff --git a/pycad/py_src/Triangle.py b/pycad/py_src/Triangle.py
index caf686f..48c41cf 100644
--- a/pycad/py_src/Triangle.py
+++ b/pycad/py_src/Triangle.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -264,11 +266,11 @@ class Design(design.AbstractDesign):
                                                                   p.getID())
                                    if pt not in holePts:
                                        holePts.append(pt)
-                           vectors={} # the key corresponds to the ctrlPts index
+                           vectors=[] # the key corresponds to the ctrlPts index
                            # create vectors
                            for i in range(len(holePts)):
                                A=holePts[i]
-                               vectors[i]=[]
+                               vectors.append([])
                                if i == 0:
                                    B=holePts[1]
                                    C=holePts[-1]
@@ -281,13 +283,13 @@ class Design(design.AbstractDesign):
                                vectors[i].append(self.__getVector(A,B))
                                vectors[i].append(self.__getVector(A,C))
                            # get angle between vectors at each vertex
-                           for i in list(vectors.keys()):
+                           for i in range(len(vectors)):
                                angle=self.__getAngle(vectors[i][0],vectors[i][1])
                                vectors[i].append(angle)
                            # find the vertex with the smallest angle
                            minAngle=360.
                            indx=0
-                           for i in list(vectors.keys()):
+                           for i in range(len(vectors)):
                                if vectors[i][2] < minAngle:
                                    indx=i
                                    minAngle=vectors[i][2]
diff --git a/pycad/py_src/__init__.py b/pycad/py_src/__init__.py
index 4005d40..9cb11c8 100644
--- a/pycad/py_src/__init__.py
+++ b/pycad/py_src/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/py_src/design.py b/pycad/py_src/design.py
index aa89bd7..c615e8d 100644
--- a/pycad/py_src/design.py
+++ b/pycad/py_src/design.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
@@ -60,7 +60,7 @@ class TagMap(object):
       to a name.
       """
       self.__mapping={}
-      for tag, name in list(mapping.items()):
+      for tag, name in sorted(mapping.items(), key=lambda x: x[1]):
           if not isinstance(tag, int):
               raise TypeError("tag needs to be an int")
           if not isinstance(name, str):
@@ -74,7 +74,7 @@ class TagMap(object):
       the mapping will be overwritten. Otherwise a new mapping <tag> -> <name>
       is set. Notice that a single name can be assigned to different tags.
       """
-      for name, tag in list(kwargs.items()):
+      for name, tag in sorted(kwargs.items(), key=lambda x: x[0]):
           if not isinstance(tag, int):
              raise TypeError("tag needs to be an int")
           self.__mapping[tag]=name
@@ -85,10 +85,10 @@ class TagMap(object):
         a list of all tags is returned.
         """
         if name == None:
-           out=list(self.__mapping.keys())
+           out=sorted(self.__mapping.keys())
         else:
            out=[]
-           for tag, arg in list(self.__mapping.items()):
+           for tag, arg in sorted(self.__mapping.items(), key=lambda x: x[0]):
              if arg == name: out.append(tag)
         return out
 
@@ -98,7 +98,7 @@ class TagMap(object):
         is returned.
         """
         if tag == None:
-           return list(set(self.__mapping.values()))
+           return sorted(list(set(self.__mapping.values())))
         else:
             return self.__mapping[tag]
 
@@ -137,14 +137,14 @@ class TagMap(object):
         which map onto name with unspecified values.
         """
         d=self.map(default=default,**kwargs)
-        for t,v in list(d.items()):
+        for t,v in sorted(d.items(), key=lambda x: x[0]):
              data.setTaggedValue(t,v)
 
     def passToDomain(self,domain):
         """
         Passes the tag map to the `esys.escript.Domain` ``domain``.
         """
-        for tag, name in list(self.__mapping.items()):
+        for tag, name in sorted(self.__mapping.items(), key=lambda x: x[1]):
           print("Tag",name, "is mapped to id ", tag)
           domain.setTagMap(name,tag)
 
@@ -154,7 +154,7 @@ class TagMap(object):
          """
          tm=dom.createElement("TagMap")
          dom.appendChild(tm)
-         for tag,name in list(self.getMapping().items()):
+         for tag,name in sorted(self.getMapping().items(), key=lambda x: x[1]):
              item_dom=dom.createElement("map")
              tag_dom=dom.createElement("tag")
              name_dom=dom.createElement("name")
diff --git a/pycad/py_src/extras.py b/pycad/py_src/extras.py
index 560bd28..1d28b52 100644
--- a/pycad/py_src/extras.py
+++ b/pycad/py_src/extras.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/py_src/gmsh.py b/pycad/py_src/gmsh.py
index 264c45a..8bdd905 100644
--- a/pycad/py_src/gmsh.py
+++ b/pycad/py_src/gmsh.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/py_src/primitives.py b/pycad/py_src/primitives.py
index ebb29c7..3427464 100644
--- a/pycad/py_src/primitives.py
+++ b/pycad/py_src/primitives.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/py_src/shapes.py b/pycad/py_src/shapes.py
index b16453f..ee20a3a 100644
--- a/pycad/py_src/shapes.py
+++ b/pycad/py_src/shapes.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/py_src/transformations.py b/pycad/py_src/transformations.py
index a7747a1..cd73ac9 100644
--- a/pycad/py_src/transformations.py
+++ b/pycad/py_src/transformations.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/pycad/test/python/run_pycad_test.py b/pycad/test/python/run_pycad_test.py
index 796cbe5..971f895 100644
--- a/pycad/test/python/run_pycad_test.py
+++ b/pycad/test/python/run_pycad_test.py
@@ -15,6 +15,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/generators/lamebuilder.py b/ripley/generators/lamebuilder.py
index 626177c..358bc1a 100644
--- a/ripley/generators/lamebuilder.py
+++ b/ripley/generators/lamebuilder.py
@@ -1,4 +1,20 @@
-from __future__ import print_function
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
 
 import lamesource
 import sys
diff --git a/ripley/generators/lamesource.py b/ripley/generators/lamesource.py
index 4421eb6..72da381 100644
--- a/ripley/generators/lamesource.py
+++ b/ripley/generators/lamesource.py
@@ -1,3 +1,21 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
 expanded3Dtemps = ["const double tmp0 = w18*(-A_{0}1{1}2[7] + A_{0}2{1}1[3]);",
 "const double tmp1 = w13*(A_{0}2{1}2[1] + A_{0}2{1}2[2] + A_{0}2{1}2[5] + A_{0}2{1}2[6]);",
 "const double tmp2 = w11*(-A_{0}0{1}2[2] - A_{0}0{1}2[5] + A_{0}2{1}0[1] + A_{0}2{1}0[6]);",
diff --git a/ripley/py_src/__init__.py b/ripley/py_src/__init__.py
index 17e169e..7732deb 100644
--- a/ripley/py_src/__init__.py
+++ b/ripley/py_src/__init__.py
@@ -17,6 +17,8 @@
 """A domain meshed with uniform rectangles or quadrilaterals
 """
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -24,8 +26,8 @@ __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
-import esys.escript		# This is just to ensure required libraries are loaded
+import esys.escript       # This is just to ensure required libraries are loaded
 from .ripleycpp import *
 from .MultiResolutionDomain import *
 
-__nodocorecursion=['ripleycpp']
\ No newline at end of file
+__nodocorecursion=['ripleycpp']
diff --git a/ripley/src/Brick.cpp b/ripley/src/Brick.cpp
index fba3904..8adfa54 100644
--- a/ripley/src/Brick.cpp
+++ b/ripley/src/Brick.cpp
@@ -231,7 +231,7 @@ string Brick::getDescription() const
     return "ripley::Brick";
 }
 
-bool Brick::operator==(const AbstractDomain& other) const
+bool Brick::operator==(const escript::AbstractDomain& other) const
 {
     const Brick* o=dynamic_cast<const Brick*>(&other);
     if (o) {
@@ -1452,9 +1452,9 @@ void Brick::Print_Mesh_Info(const bool full) const
 void Brick::assembleCoordinates(escript::Data& arg) const
 {
     int numDim = m_numDim;
-    if (&arg!=0 && !arg.isDataPointShapeEqual(1, &numDim))
+    if (!arg.isDataPointShapeEqual(1, &numDim))
         throw RipleyException("setToX: Invalid Data object shape");
-    if (&arg!=0 && !arg.numSamplesEqual(1, getNumNodes()))
+    if (!arg.numSamplesEqual(1, getNumNodes()))
         throw RipleyException("setToX: Illegal number of samples in Data object");
 
     const dim_t NN0 = m_NN[0];
@@ -3345,7 +3345,7 @@ namespace
  */
 escript::Data Brick::randomFill(const escript::DataTypes::ShapeType& shape,
                                 const escript::FunctionSpace& what,
-                                long seed, const bp::tuple& filter) const
+                                long seed, const boost::python::tuple& filter) const
 {
     int numvals=escript::DataTypes::noValues(shape);
     if (len(filter) > 0 && numvals != 1) {
@@ -3398,7 +3398,7 @@ that ripley has.
 */
 escript::Data Brick::randomFillWorker(
                         const escript::DataTypes::ShapeType& shape, long seed,
-                        const bp::tuple& filter) const
+                        const boost::python::tuple& filter) const
 {
     unsigned int radius=0;  // these are only used by gaussian
     double sigma=0.5;
diff --git a/ripley/src/MultiBrick.cpp b/ripley/src/MultiBrick.cpp
index c492176..2fd22d5 100644
--- a/ripley/src/MultiBrick.cpp
+++ b/ripley/src/MultiBrick.cpp
@@ -85,7 +85,7 @@ MultiBrick::MultiBrick(dim_t n0, dim_t n1, dim_t n2, double x0, double y0, doubl
     m_subdivisions(subdivisions)
 {
     if (m_mpiInfo->size != 1)
-        throw RipleyException("Multiresolution domains don't currently support multiple processes");
+        throw RipleyException("Multiresolution Brick domains don't currently support multiple processes");
 
     if (subdivisions == 0 || (subdivisions & (subdivisions - 1)) != 0)
         throw RipleyException("Element subdivisions must be a power of two");
@@ -139,7 +139,7 @@ void MultiBrick::validateInterpolationAcross(int fsType_source,
 
     const double *len = other->getLength();
     const int *subdivs = other->getNumSubdivisionsPerDim();
-    const int *elements = other->getNumElementsPerDim();
+    const dim_t *elements = other->getNumElementsPerDim();
     const unsigned int level = other->getNumSubdivisionsPerElement();
     const unsigned int factor = m_subdivisions > level ? m_subdivisions/level : level/m_subdivisions;
     if ((factor & (factor - 1)) != 0) //factor == 2**x
@@ -169,6 +169,7 @@ void MultiBrick::interpolateNodesToNodesFiner(const escript::Data& source,
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t NN0 = m_NN[0], NN1 = m_NN[1], NN2 = m_NN[2], *otherNN = other.getNumNodesPerDim();
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
 #pragma omp parallel for
     for (dim_t nz = 0; nz < NN2 - 1; nz++) { //source nodes
         for (dim_t ny = 0; ny < NN1 - 1; ny++) {
@@ -208,6 +209,7 @@ void MultiBrick::interpolateReducedToElementsFiner(const escript::Data& source,
 {
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ez = 0; ez < m_NE[2]; ez++) {
@@ -241,6 +243,7 @@ void MultiBrick::interpolateReducedToReducedFiner(const escript::Data& source,
 {
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ey = 0; ey < m_NE[1]; ey++) {
@@ -267,6 +270,7 @@ void MultiBrick::interpolateNodesToElementsFiner(const escript::Data& source,
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t NE0 = m_NE[0], NE1 = m_NE[1], NE2 = m_NE[2], *theirNE = other.getNumElementsPerDim();
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
 #pragma omp parallel for
     for (dim_t ez = 0; ez < NE2; ez++) { //source nodes
         for (dim_t ey = 0; ey < NE1; ey++) {
@@ -398,7 +402,7 @@ void MultiBrick::interpolateElementsToElementsCoarser(const escript::Data& sourc
         first_lagrange[i] = (points[i] - SECOND_QUAD) / (FIRST_QUAD - SECOND_QUAD);
         second_lagrange[i] = (points[i] - FIRST_QUAD) / (SECOND_QUAD - FIRST_QUAD);
     }
-    
+    target.requireWrite();
     //for each of theirs
 #pragma omp parallel for
     for (dim_t tz = 0; tz < theirNE[2]; tz++) {
@@ -455,6 +459,7 @@ void MultiBrick::interpolateElementsToElementsFiner(const escript::Data& source,
         lagranges[i] = (points[i] - SECOND_QUAD) / (FIRST_QUAD - SECOND_QUAD);
         lagranges[i + 2*scaling] = (points[i] - FIRST_QUAD) / (SECOND_QUAD - FIRST_QUAD);
     }
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ez = 0; ez < m_NE[2]; ez++) {
diff --git a/ripley/src/MultiRectangle.cpp b/ripley/src/MultiRectangle.cpp
index 226e3fe..1f7782c 100644
--- a/ripley/src/MultiRectangle.cpp
+++ b/ripley/src/MultiRectangle.cpp
@@ -116,7 +116,7 @@ void MultiRectangle::validateInterpolationAcross(int fsType_source,
 
     const double *len = other->getLength();
     const int *subdivs = other->getNumSubdivisionsPerDim();
-    const int *elements = other->getNumElementsPerDim();
+    const dim_t *elements = other->getNumElementsPerDim();
     const unsigned int level = other->getNumSubdivisionsPerElement();
     const unsigned int factor = m_subdivisions > level ? m_subdivisions/level : level/m_subdivisions;
     if ((factor & (factor - 1)) != 0) //factor == 2**x
@@ -133,7 +133,8 @@ void MultiRectangle::validateInterpolationAcross(int fsType_source,
         }
         if (m_subdivisions > level) {
             if (m_NE[i]/elements[i] != factor) {
-            fprintf(stderr, "m_ownNE[i]/elements[i] = %d != %d\n", m_ownNE[i]/elements[i], factor);
+                std::cerr << "m_ownNE[i]/elements[i] = "
+                    << m_ownNE[i]/elements[i] << " != " << factor << std::endl;
                 throw RipleyException("Invalid interpolation: element factor mismatch");
             }
         } else {
@@ -150,6 +151,7 @@ void MultiRectangle::interpolateNodesToNodesFiner(const escript::Data& source,
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t NN0 = m_NN[0], NN1 = m_NN[1], otherNN0 = other.getNumNodesPerDim()[0];
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
 #pragma omp parallel for
     for (dim_t ny = 0; ny < NN1 - 1; ny++) { //source nodes
         for (dim_t nx = 0; nx < NN0 - 1; nx++) {
@@ -177,6 +179,7 @@ void MultiRectangle::interpolateReducedToElementsFiner(const escript::Data& sour
 {
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ey = 0; ey < m_NE[1]; ey++) {
@@ -206,6 +209,7 @@ void MultiRectangle::interpolateReducedToReducedFiner(const escript::Data& sourc
 {
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ey = 0; ey < m_NE[1]; ey++) {
@@ -232,6 +236,7 @@ void MultiRectangle::interpolateNodesToElementsFiner(const escript::Data& source
     const int scaling = other.getNumSubdivisionsPerElement()/m_subdivisions;
     const dim_t NE0 = m_NE[0], NE1 = m_NE[1];
     const dim_t numComp = source.getDataPointSize();
+    target.requireWrite();
 #pragma omp parallel for
     for (dim_t ey = 0; ey < NE1; ey++) { //source nodes
         for (dim_t ex = 0; ex < NE0; ex++) {
@@ -280,7 +285,7 @@ void MultiRectangle::interpolateElementsToElementsCoarser(const escript::Data& s
         first_lagrange[i] = (points[i] - SECOND_QUAD) / (FIRST_QUAD - SECOND_QUAD);
         second_lagrange[i] = (points[i] - FIRST_QUAD) / (SECOND_QUAD - FIRST_QUAD);
     }
-    
+    target.requireWrite();
     //for each of theirs
 #pragma omp parallel for
     for (dim_t ty = 0; ty < theirNE[1]; ty++) {
@@ -327,6 +332,7 @@ void MultiRectangle::interpolateElementsToElementsFiner(const escript::Data& sou
         lagranges[i] = (points[i] - SECOND_QUAD) / (FIRST_QUAD - SECOND_QUAD);
         lagranges[i + 2*scaling] = (points[i] - FIRST_QUAD) / (SECOND_QUAD - FIRST_QUAD);
     }
+    target.requireWrite();
     //for each of ours
 #pragma omp parallel for
     for (dim_t ey = 0; ey < m_NE[1]; ey++) {
diff --git a/ripley/src/Rectangle.cpp b/ripley/src/Rectangle.cpp
index 411f6ca..2a973a8 100644
--- a/ripley/src/Rectangle.cpp
+++ b/ripley/src/Rectangle.cpp
@@ -198,7 +198,7 @@ string Rectangle::getDescription() const
     return "ripley::Rectangle";
 }
 
-bool Rectangle::operator==(const AbstractDomain& other) const
+bool Rectangle::operator==(const escript::AbstractDomain& other) const
 {
     const Rectangle* o=dynamic_cast<const Rectangle*>(&other);
     if (o) {
@@ -1136,9 +1136,9 @@ void Rectangle::Print_Mesh_Info(const bool full) const
 void Rectangle::assembleCoordinates(escript::Data& arg) const
 {
     int numDim = m_numDim;
-    if (&arg!=0 && !arg.isDataPointShapeEqual(1, &numDim))
+    if (!arg.isDataPointShapeEqual(1, &numDim))
         throw RipleyException("setToX: Invalid Data object shape");
-    if (&arg!=0 && !arg.numSamplesEqual(1, getNumNodes()))
+    if (!arg.numSamplesEqual(1, getNumNodes()))
         throw RipleyException("setToX: Illegal number of samples in Data object");
 
     const dim_t NN0 = m_NN[0];
@@ -2235,7 +2235,7 @@ namespace
  */
 escript::Data Rectangle::randomFill(const escript::DataTypes::ShapeType& shape,
                                 const escript::FunctionSpace& what, long seed,
-                                const bp::tuple& filter) const
+                                const boost::python::tuple& filter) const
 {
     int numvals=escript::DataTypes::noValues(shape);
     if (len(filter) > 0 && numvals != 1)
@@ -2287,7 +2287,7 @@ escript::Data Rectangle::randomFill(const escript::DataTypes::ShapeType& shape,
  */
 escript::Data Rectangle::randomFillWorker(
                         const escript::DataTypes::ShapeType& shape, long seed,
-                        const bp::tuple& filter) const
+                        const boost::python::tuple& filter) const
 {
     unsigned int radius=0;  // these are only used by gaussian
     double sigma=0.5;
diff --git a/ripley/src/generate_assamblage.py b/ripley/src/generate_assamblage.py
index 599c2c0..cd4f67f 100644
--- a/ripley/src/generate_assamblage.py
+++ b/ripley/src/generate_assamblage.py
@@ -1,3 +1,22 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+
 """
 this script generates the assemblage routine for the ripley rectangular grid solver
 
diff --git a/ripley/src/generate_assemblage_cpp.py b/ripley/src/generate_assemblage_cpp.py
index e27a9e1..7077709 100644
--- a/ripley/src/generate_assemblage_cpp.py
+++ b/ripley/src/generate_assemblage_cpp.py
@@ -19,6 +19,8 @@ This script generates the assemblage routine for the ripley rectangular grid
 solver.
 """
 
+from __future__ import print_function, division
+
 from multiprocessing import Process
 from sympy import *
 
diff --git a/ripley/test/python/run_customAssemblersOnMultiRes.py b/ripley/test/python/run_customAssemblersOnMultiRes.py
index a1c2b4b..d012a62 100644
--- a/ripley/test/python/run_customAssemblersOnMultiRes.py
+++ b/ripley/test/python/run_customAssemblersOnMultiRes.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from __future__ import print_function
+from __future__ import print_function, division
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/ripley/test/python/run_escriptOnMultiResolution.py b/ripley/test/python/run_escriptOnMultiResolution.py
index b750ed8..616cd47 100644
--- a/ripley/test/python/run_escriptOnMultiResolution.py
+++ b/ripley/test/python/run_escriptOnMultiResolution.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_escriptOnRipley.py b/ripley/test/python/run_escriptOnRipley.py
index f8f6d75..1461cf5 100644
--- a/ripley/test/python/run_escriptOnRipley.py
+++ b/ripley/test/python/run_escriptOnRipley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_linearPDEsOnMultiRes.py b/ripley/test/python/run_linearPDEsOnMultiRes.py
index 9f7d907..f97859a 100644
--- a/ripley/test/python/run_linearPDEsOnMultiRes.py
+++ b/ripley/test/python/run_linearPDEsOnMultiRes.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/ripley/test/python/run_linearPDEsOnRipley.py b/ripley/test/python/run_linearPDEsOnRipley.py
index 3ad136f..491af6a 100644
--- a/ripley/test/python/run_linearPDEsOnRipley.py
+++ b/ripley/test/python/run_linearPDEsOnRipley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/ripley/test/python/run_nonlinearPDEOnMultiRes.py b/ripley/test/python/run_nonlinearPDEOnMultiRes.py
index 2f00ef1..afa2fec 100644
--- a/ripley/test/python/run_nonlinearPDEOnMultiRes.py
+++ b/ripley/test/python/run_nonlinearPDEOnMultiRes.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/ripley/test/python/run_nonlinearPDEOnRipley.py b/ripley/test/python/run_nonlinearPDEOnRipley.py
index 0b143a8..de2c7b1 100644
--- a/ripley/test/python/run_nonlinearPDEOnRipley.py
+++ b/ripley/test/python/run_nonlinearPDEOnRipley.py
@@ -11,6 +11,8 @@
 #
 ########################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 Earth Systems Science Computational Center (ESSCC)
 http://www.uq.edu.au
diff --git a/ripley/test/python/run_readWriteOnMultiRes.py b/ripley/test/python/run_readWriteOnMultiRes.py
index 5d70684..19cbbfc 100644
--- a/ripley/test/python/run_readWriteOnMultiRes.py
+++ b/ripley/test/python/run_readWriteOnMultiRes.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_readWriteOnRipley.py b/ripley/test/python/run_readWriteOnRipley.py
index 7fd5902..8cc6b78 100644
--- a/ripley/test/python/run_readWriteOnRipley.py
+++ b/ripley/test/python/run_readWriteOnRipley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_simplesolveOnMultiRes.py b/ripley/test/python/run_simplesolveOnMultiRes.py
index 99df6ff..5153185 100644
--- a/ripley/test/python/run_simplesolveOnMultiRes.py
+++ b/ripley/test/python/run_simplesolveOnMultiRes.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_simplesolveOnRipley.py b/ripley/test/python/run_simplesolveOnRipley.py
index 8a31b3c..a4cba81 100644
--- a/ripley/test/python/run_simplesolveOnRipley.py
+++ b/ripley/test/python/run_simplesolveOnRipley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/ripley/test/python/run_splitworldOnRipley.py b/ripley/test/python/run_splitworldOnRipley.py
new file mode 100644
index 0000000..34bdff9
--- /dev/null
+++ b/ripley/test/python/run_splitworldOnRipley.py
@@ -0,0 +1,98 @@
+
+##############################################################################
+#
+# Copyright (c) 2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
+from __future__ import print_function, division
+
+__copyright__="""Copyright (c) 2015 by The University of Queensland
+http://www.uq.edu.au
+Primary Business: Queensland, Australia"""
+__license__="""Licensed under the Open Software License version 3.0
+http://www.opensource.org/licenses/osl-3.0.php"""
+__url__="https://launchpad.net/escript-ripley"
+
+import esys.escriptcore.utestselect as unittest
+from esys.escriptcore.testing import *
+from esys.escript import *
+from esys.ripley import Rectangle, Brick, MultiRectangle,  MultiBrick
+from test_splitworld import Test_SplitWorld, sw_testing, sw_testmany
+
+
+mpisize=getMPISizeWorld()
+NE=4 # number elements, must be even
+
+class Test_SplitOnRipley(Test_SplitWorld):
+  def setUp(self):
+    self.domainpars=[Rectangle, NE, NE]
+    
+  def tearDown(self):
+    del self.domainpars
+    
+class Test_ripley_sw_2D(sw_testing):
+    def setUp(self):
+        from esys.ripley import Rectangle
+        self.domain_ctr=Rectangle
+        self.domain_vec=(6,6)
+        self.domain_dict={}
+
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+
+
+class Test_ripley_sw_3D(sw_testing):
+    def setUp(self):
+        from esys.ripley import Brick
+        self.domain_ctr=Brick
+        self.domain_vec=(6,6,6)
+        self.domain_dict={}
+        
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+    
+class Test_SplitOnMultiRipley(Test_SplitWorld):
+  def setUp(self):
+    self.domainpars=[MultiRectangle, NE, NE]
+    
+  def tearDown(self):
+    del self.domainpars
+    
+class Test_multiripley_sw_2D(sw_testing):
+    def setUp(self):
+        from esys.ripley import MultiRectangle
+        self.domain_ctr=MultiRectangle
+        self.domain_vec=(6,6)
+        self.domain_dict={}
+
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+
+
+class Test_multiripley_sw_3D(sw_testmany):
+    def setUp(self):
+        from esys.ripley import MultiBrick
+        self.domain_ctr=MultiBrick
+        self.domain_vec=(6,6,6)
+        self.domain_dict={}
+        
+    def tearDown(self):
+        del self.domain_ctr
+        del self.domain_vec
+
+
+if __name__ == '__main__':
+    run_tests(__name__, exit_on_failure=True)
diff --git a/ripley/test/python/run_utilOnMultiRes.py b/ripley/test/python/run_utilOnMultiRes.py
index e8411e1..5ac73e7 100644
--- a/ripley/test/python/run_utilOnMultiRes.py
+++ b/ripley/test/python/run_utilOnMultiRes.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -55,15 +57,15 @@ for x in [(int(mpiSize**(1/3.)),int(mpiSize**(1/3.))),(2,3),(2,2),(1,2),(1,1)]:
         break
 
 def Rectangle(**kwargs):
-    kwargs['n0'] /= 2
-    kwargs['n1'] /= 2
+    kwargs['n0'] //= 2
+    kwargs['n1'] //= 2
     m = MultiResolutionDomain(2, **kwargs)
     return m.getLevel(1)
 
 def Brick(**kwargs):
-    kwargs['n0'] /= 2
-    kwargs['n1'] /= 2
-    kwargs['n2'] /= 2
+    kwargs['n0'] //= 2
+    kwargs['n1'] //= 2
+    kwargs['n2'] //= 2
     m = MultiResolutionDomain(3, **kwargs)
     return m.getLevel(1)
 
diff --git a/ripley/test/python/run_utilOnRipley.py b/ripley/test/python/run_utilOnRipley.py
index 5e00af8..57175e6 100644
--- a/ripley/test/python/run_utilOnRipley.py
+++ b/ripley/test/python/run_utilOnRipley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/run-escript.in b/run-escript.in
index 7cbcb55..fd3428b 100644
--- a/run-escript.in
+++ b/run-escript.in
@@ -278,9 +278,9 @@ else
             NUM_HOSTS=$(cat "$HOSTFILE" | wc -l)
             if [ ! -z $ESCRIPT_NUM_NODES ]
             then
-                if [ $NUM_HOSTS -ne $ESCRIPT_NUM_NODES ]
+                if [ $NUM_HOSTS -lt $ESCRIPT_NUM_NODES ]
                 then
-                    die "Number of hosts selected in the host file $ESCRIPT_HOSTFILE needs to match the requested number of nodes $ESCRIPT_NUM_NODES!"
+		   die "Number of requested nodes must not exceed the number of entries selected in the host file $ESCRIPT_HOSTFILE.  You asked for $ESCRIPT_NUM_NODES from $NUM_HOSTS."
                 fi
              else
                 ESCRIPT_NUM_NODES=$NUM_HOSTS
diff --git a/scons/badger_options.py b/scons/badger_options.py
index bef42f9..55354e1 100644
--- a/scons/badger_options.py
+++ b/scons/badger_options.py
@@ -14,13 +14,13 @@
 #
 ##############################################################################
 
-from templates.wheezy_options import *
+from templates.jessie_options import *
 
 debug = True
 
 boost_libs = ['boost_python-py27']
 
 lapack = 'clapack'
-
+umfpack = True
 silo = True
 
diff --git a/scons/badger_options.py b/scons/badger_py3_options.py
similarity index 87%
copy from scons/badger_options.py
copy to scons/badger_py3_options.py
index bef42f9..9136941 100644
--- a/scons/badger_options.py
+++ b/scons/badger_py3_options.py
@@ -14,13 +14,9 @@
 #
 ##############################################################################
 
-from templates.wheezy_options import *
-
-debug = True
-
-boost_libs = ['boost_python-py27']
+from templates.jessie_py3_options import *
 
+debug = False
 lapack = 'clapack'
-
 silo = True
 
diff --git a/scons/guineapig_py3_options.py b/scons/guineapig_py3_options.py
index f94f0f3..301f7a3 100644
--- a/scons/guineapig_py3_options.py
+++ b/scons/guineapig_py3_options.py
@@ -23,3 +23,6 @@ pythoncmd='python3'
 pythonlibname='python3.4m'
 pythonincpath='/usr/include/python3.4'
 
+# This seemed as good a place as any to test this
+cxx_extra = '-Wextra -Wno-unused-parameter -DEXWRITECHK'
+
diff --git a/scons/sage_options.py b/scons/sage_options.py
index ad8d7bc..3e6595c 100644
--- a/scons/sage_options.py
+++ b/scons/sage_options.py
@@ -14,230 +14,15 @@
 #
 ##############################################################################
 
-# This is a template configuration file for escript/finley on Linux.
-# Copy this file to <hostname>_options.py, where <hostname> is your machine's
-# short hostname, then customize to your needs.
+from templates.jessie_options import *
 
-# PREFIXES:
-# There are two ways to specify where to find dependent headers and libraries
-# (via the <dependency>_prefix):
-# 1) If your installation follows the general scheme where headers are located
-#    in <prefix>/include[32,64], and libraries in <prefix>/lib[32,64] then
-#    it is sufficient to specify this prefix, e.g. boost_prefix='/usr'
-# 2) Otherwise provide a list with two elements, where the first one is the
-#    include path, and the second the library path, e.g.
-#    boost_prefix=['/usr/include/boost1_44', '/usr/lib']
-# All <dependency>_prefix settings default to '/usr'
-
-# The options file version. SCons will refuse to build if there have been
-# changes to the set of variables and your file has not been updated.
-# This setting is mandatory.
-escript_opts_version = 202
-
-# Installation prefix. Files will be installed in subdirectories underneath.
-# DEFAULT: '.' (current directory)
-#prefix = '/usr/local'
-
-# Top-level directory for intermediate build and test files.
-# DEFAULT: 'build'
-#build_dir = 'build'
-
-# C++ compiler command name or full path.
-# DEFAULT: auto-detected
-#cxx = 'g++'
-
-# Flags to use with the C++ compiler. Do not set unless you know
-# what you are doing - use cxx_extra to specify additional flags!
-# DEFAULT: compiler-dependent
-#cc_flags = ''
-
-# Additional compiler (optimization) flags for non-debug builds
-# DEFAULT: compiler-dependent
-#cc_optim = '-O3 -mmmx -msse'
-
-# Additional compiler flags for debug builds
-# DEFAULT: compiler-dependent
-#cc_debug = '-g'
-
-# Additional flags to add to the C++ compiler
-# DEFAULT: '' (empty)
-#cxx_extra = ''
-
-# Additional flags to add to the linker
-# DEFAULT: '' (empty)
-#ld_extra = ''
-
-# Whether to treat compiler warnings as errors
-# DEFAULT: True
-werror = False
-
-# Whether to build a debug version
-# DEFAULT: False
 debug = True
 
-# Set to True to print the full compiler/linker command line
-# DEFAULT: False
-#verbose = True
-
-# Set to True to add flags that enable OpenMP parallelization
-# DEFAULT: False
-openmp = True
-
-# Additional compiler flags for OpenMP builds
-# DEFAULT: compiler-dependent
-#omp_flags = '-fopenmp'
-
-# Additional linker flags for OpenMP builds
-# DEFAULT: compiler-dependent
-#omp_ldflags = '-fopenmp'
-
-# Flavour of MPI implementation
-# Recognized values: 'none', 'MPT', 'MPICH', 'MPICH2', 'OPENMPI', 'INTELMPI'
-# DEFAULT: 'none' (disable MPI)
-#mpi = 'OPENMPI'
-
-# Prefix or paths to MPI headers and libraries. See note above about prefixes.
-mpi_prefix = '/usr/lib/openmpi'
-
-# MPI libraries to link against
-mpi_libs = ['mpi_cxx', 'mpi', 'open-rte', 'open-pal']
-
-# Prefix or paths to boost-python headers and libraries. See note above.
-#boost_prefix = '/usr/local'
-
-# boost-python library/libraries to link against
-boost_libs = ['boost_python-py32']
 boost_libs = ['boost_python-py27']
 
-#pythoncmd='python3'
-#usepython3=True
-#pythonlibname='python3.2mu'
-
-# Prefix or paths to CppUnit headers and libraries. See note above.
-#cppunit_prefix = '/usr/local'
-
-# CppUnit library/libraries to link against
-#cppunit_libs = ['cppunit']
-
-# Whether to use the netCDF library for dump file support
-# DEFAULT: False
-netcdf = True
-
-# Prefix or paths to netCDF headers and libraries. See note above.
-#netcdf_prefix = '/usr/local'
-
-# netCDF library/libraries to link against
-#netcdf_libs = ['netcdf_c++', 'netcdf']
-
-# Whether to use the parMETIS library (only in conjunction with MPI)
-# DEFAULT: False
-#parmetis = True
-
-# Prefix or paths to parMETIS headers and libraries. See note above.
-#parmetis_prefix = '/usr/local'
-
-# parMETIS library/libraries to link against
-#parmetis_libs = ['parmetis', 'metis']
-
-# Whether to use the Intel PAPI (Performance API) library
-# DEFAULT: False
-#papi = True
-
-# Prefix or paths to PAPI headers and libraries. See note above.
-#papi_prefix = '/usr/local'
-
-# PAPI library/libraries to link against
-#papi_libs = ['papi']
-
-# Whether to use PAPI to instrument solver iterations
-# DEFAULT: False
-#papi_instrument_solver = True
-
-# Whether to use Intel MKL (Math Kernel Library)
-# DEFAULT: False
-#mkl = True
-
-# Prefix or paths to MKL headers and libraries. See note above.
-#mkl_prefix = '/usr'
-
-# MKL library/libraries to link against
-#mkl_libs = ['mkl_solver', 'mkl_em64t', 'mkl_core', 'guide', 'pthread']
-
-# Whether to use UMFPACK (requires AMD and BLAS)
-# DEFAULT: False
-#umfpack = True
-
-# Prefix or paths to UMFPACK headers and libraries. See note above.
-#umfpack_prefix = ['/usr/include/suitesparse', '/usr/lib']
-
-# UMFPACK library/libraries to link against
-#umfpack_libs = ['umfpack']
-
-# Whether to use BoomerAMG (requires MPI)
-# DEFAULT: False
-#boomeramg = True
-
-# Prefix or paths to BoomerAMG headers and libraries. See note above.
-#boomeramg_prefix = '/usr/local'
-
-# BoomerAMG library/libraries to link against
-#boomeramg_libs = ['HYPRE']
-
-# Flavour of LAPACK implementation
-# Recognized values: 'none', 'clapack', 'mkl'
-# DEFAULT: 'none' (do not use LAPACK)
 #lapack = 'clapack'
 
-# Prefix or paths to LAPACK headers and libraries. See note above.
-#lapack_prefix = '/usr/local'
-
-# LAPACK library/libraries to link against
-#lapack_libs = ['lapack_atlas']
-
-# Whether to use LLNL's SILO library for Silo output file support in weipa
-# DEFAULT: False
 #silo = True
 
-# Prefix or paths to SILO headers and libraries. See note above.
-#silo_prefix = '/usr/local'
-
-# SILO library/libraries to link against
-#silo_libs = ['siloh5', 'hdf5']
-
-# Whether to use LLNL's VisIt simulation interface (only version 2 supported)
-# DEFAULT: False
-#visit = True
-
-# Prefix or paths to VisIt's sim2 headers and libraries. See note above.
-#visit_prefix = '/opt/visit/2.1.0/linux-intel/libsim/V2'
-
-# Sim2 library/libraries to link against
-#visit_libs = ['simV2']
-
-# Build dynamic libraries only
-#DEFAULT: False
-#build_shared = True
-
-
-### ADVANCED OPTIONS ###
-# Do not change the following options unless you know what they do
-
-# Use intel's VSL library for random data
-# DEFAULT: False
-#vsl_random = True
-
-# Extra libraries to link with
-#sys_libs = []
-
-# Additional environmental variables to export to the tools
-#env_export = []
-
-#tools_names = ['default']
-
-#iknowwhatimdoing = False
-
-#forcelazy = 'leave_alone'
-#forcelazy= 'on'
-
-#forcecollres = 'leave_alone'
-
+#cxx_extra = '-Wextra -Wno-unused-parameter -DEXWRITECHK'
+cxx_extra = '-Wextra -Wno-unused-parameter'
\ No newline at end of file
diff --git a/scons/sage_py3_options.py b/scons/sage_py3_options.py
index e8672ff..3fb9caf 100644
--- a/scons/sage_py3_options.py
+++ b/scons/sage_py3_options.py
@@ -14,230 +14,9 @@
 #
 ##############################################################################
 
-# This is a template configuration file for escript/finley on Linux.
-# Copy this file to <hostname>_options.py, where <hostname> is your machine's
-# short hostname, then customize to your needs.
+from templates.jessie_py3_options import *
 
-# PREFIXES:
-# There are two ways to specify where to find dependent headers and libraries
-# (via the <dependency>_prefix):
-# 1) If your installation follows the general scheme where headers are located
-#    in <prefix>/include[32,64], and libraries in <prefix>/lib[32,64] then
-#    it is sufficient to specify this prefix, e.g. boost_prefix='/usr'
-# 2) Otherwise provide a list with two elements, where the first one is the
-#    include path, and the second the library path, e.g.
-#    boost_prefix=['/usr/include/boost1_44', '/usr/lib']
-# All <dependency>_prefix settings default to '/usr'
-
-# The options file version. SCons will refuse to build if there have been
-# changes to the set of variables and your file has not been updated.
-# This setting is mandatory.
-escript_opts_version = 201
-
-# Installation prefix. Files will be installed in subdirectories underneath.
-# DEFAULT: '.' (current directory)
-#prefix = '/usr/local'
-
-# Top-level directory for intermediate build and test files.
-# DEFAULT: 'build'
-#build_dir = 'build'
-
-# C++ compiler command name or full path.
-# DEFAULT: auto-detected
-#cxx = 'g++'
-
-# Flags to use with the C++ compiler. Do not set unless you know
-# what you are doing - use cxx_extra to specify additional flags!
-# DEFAULT: compiler-dependent
-#cc_flags = ''
-
-# Additional compiler (optimization) flags for non-debug builds
-# DEFAULT: compiler-dependent
-#cc_optim = '-O3 -mmmx -msse'
-
-# Additional compiler flags for debug builds
-# DEFAULT: compiler-dependent
-#cc_debug = '-g'
-
-# Additional flags to add to the C++ compiler
-# DEFAULT: '' (empty)
-#cxx_extra = ''
-
-# Additional flags to add to the linker
-# DEFAULT: '' (empty)
-#ld_extra = ''
-
-# Whether to treat compiler warnings as errors
-# DEFAULT: True
-werror = False
-
-# Whether to build a debug version
-# DEFAULT: False
-debug = True
-
-# Set to True to print the full compiler/linker command line
-# DEFAULT: False
-#verbose = True
-
-# Set to True to add flags that enable OpenMP parallelization
-# DEFAULT: False
-openmp = True
-
-# Additional compiler flags for OpenMP builds
-# DEFAULT: compiler-dependent
-#omp_flags = '-fopenmp'
-
-# Additional linker flags for OpenMP builds
-# DEFAULT: compiler-dependent
-#omp_ldflags = '-fopenmp'
-
-# Flavour of MPI implementation
-# Recognized values: 'none', 'MPT', 'MPICH', 'MPICH2', 'OPENMPI', 'INTELMPI'
-# DEFAULT: 'none' (disable MPI)
-#mpi = 'OPENMPI'
-
-# Prefix or paths to MPI headers and libraries. See note above about prefixes.
-mpi_prefix = '/usr/lib/openmpi'
-
-# MPI libraries to link against
-mpi_libs = ['mpi_cxx', 'mpi', 'open-rte', 'open-pal']
-
-# Prefix or paths to boost-python headers and libraries. See note above.
-#boost_prefix = '/usr/local'
-
-# boost-python library/libraries to link against
-boost_libs = ['boost_python-mt-py32']
-#boost_libs = ['boost_python-mt-py27']
-
-pythoncmd='python3'
-usepython3=True
-pythonlibname='python3.2mu'
-
-# Prefix or paths to CppUnit headers and libraries. See note above.
-#cppunit_prefix = '/usr/local'
-
-# CppUnit library/libraries to link against
-#cppunit_libs = ['cppunit']
-
-# Whether to use the netCDF library for dump file support
-# DEFAULT: False
-#netcdf = True
-
-# Prefix or paths to netCDF headers and libraries. See note above.
-#netcdf_prefix = '/usr/local'
-
-# netCDF library/libraries to link against
-#netcdf_libs = ['netcdf_c++', 'netcdf']
-
-# Whether to use the parMETIS library (only in conjunction with MPI)
-# DEFAULT: False
-#parmetis = True
-
-# Prefix or paths to parMETIS headers and libraries. See note above.
-#parmetis_prefix = '/usr/local'
-
-# parMETIS library/libraries to link against
-#parmetis_libs = ['parmetis', 'metis']
-
-# Whether to use the Intel PAPI (Performance API) library
-# DEFAULT: False
-#papi = True
-
-# Prefix or paths to PAPI headers and libraries. See note above.
-#papi_prefix = '/usr/local'
-
-# PAPI library/libraries to link against
-#papi_libs = ['papi']
-
-# Whether to use PAPI to instrument solver iterations
-# DEFAULT: False
-#papi_instrument_solver = True
-
-# Whether to use Intel MKL (Math Kernel Library)
-# DEFAULT: False
-#mkl = True
-
-# Prefix or paths to MKL headers and libraries. See note above.
-#mkl_prefix = '/usr'
-
-# MKL library/libraries to link against
-#mkl_libs = ['mkl_solver', 'mkl_em64t', 'mkl_core', 'guide', 'pthread']
-
-# Whether to use UMFPACK (requires AMD and BLAS)
-# DEFAULT: False
-#umfpack = True
-
-# Prefix or paths to UMFPACK headers and libraries. See note above.
-#umfpack_prefix = ['/usr/include/suitesparse', '/usr/lib']
-
-# UMFPACK library/libraries to link against
-#umfpack_libs = ['umfpack']
-
-# Whether to use BoomerAMG (requires MPI)
-# DEFAULT: False
-#boomeramg = True
-
-# Prefix or paths to BoomerAMG headers and libraries. See note above.
-#boomeramg_prefix = '/usr/local'
-
-# BoomerAMG library/libraries to link against
-#boomeramg_libs = ['HYPRE']
-
-# Flavour of LAPACK implementation
-# Recognized values: 'none', 'clapack', 'mkl'
-# DEFAULT: 'none' (do not use LAPACK)
-#lapack = 'clapack'
-
-# Prefix or paths to LAPACK headers and libraries. See note above.
-#lapack_prefix = '/usr/local'
-
-# LAPACK library/libraries to link against
-#lapack_libs = ['lapack_atlas']
-
-# Whether to use LLNL's SILO library for Silo output file support in weipa
-# DEFAULT: False
-#silo = True
-
-# Prefix or paths to SILO headers and libraries. See note above.
-#silo_prefix = '/usr/local'
-
-# SILO library/libraries to link against
-#silo_libs = ['siloh5', 'hdf5']
-
-# Whether to use LLNL's VisIt simulation interface (only version 2 supported)
-# DEFAULT: False
-#visit = True
-
-# Prefix or paths to VisIt's sim2 headers and libraries. See note above.
-#visit_prefix = '/opt/visit/2.1.0/linux-intel/libsim/V2'
-
-# Sim2 library/libraries to link against
-#visit_libs = ['simV2']
-
-# Build dynamic libraries only
-#DEFAULT: False
-#build_shared = True
-
-
-### ADVANCED OPTIONS ###
-# Do not change the following options unless you know what they do
-
-# Use intel's VSL library for random data
-# DEFAULT: False
-#vsl_random = True
-
-# Extra libraries to link with
-#sys_libs = []
-
-# Additional environmental variables to export to the tools
-#env_export = []
-
-#tools_names = ['default']
-
-#iknowwhatimdoing = False
-
-#forcelazy = 'leave_alone'
-#forcelazy= 'on'
-
-#forcecollres = 'leave_alone'
+debug = False
+lapack = 'none'
+silo = False
 
diff --git a/scons/squirrel_options.py b/scons/squirrel_options.py
index 5365d90..e2f96c0 100644
--- a/scons/squirrel_options.py
+++ b/scons/squirrel_options.py
@@ -15,7 +15,8 @@
 
 from templates.wheezy_options import *
 
-mpi = 'none'
+# mpi = 'none'
+mpi = 'OPENMPI'
 boost_libs = ['boost_python-py27']
 umfpack = True
 #lapack = 'clapack'
diff --git a/scons/templates/__init__.py b/scons/templates/__init__.py
index e69de29..a18a325 100644
--- a/scons/templates/__init__.py
+++ b/scons/templates/__init__.py
@@ -0,0 +1,16 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
diff --git a/scons/templates/freebsd10_0_options.py b/scons/templates/freebsd10_0_options.py
index e3139db..6951710 100644
--- a/scons/templates/freebsd10_0_options.py
+++ b/scons/templates/freebsd10_0_options.py
@@ -137,13 +137,13 @@ cppunit_prefix = '/usr/local'
 # Whether to use the netCDF library for dump file support and netCDF-based
 # downunder data import
 # DEFAULT: False
-netcdf = True
+#netcdf = True
 
 # Prefix or paths to netCDF headers and libraries. See note above.
 netcdf_prefix = '/usr/local'
 
 # netCDF library/libraries to link against
-#netcdf_libs = ['netcdf_c++', 'netcdf']
+netcdf_libs = ['netcdf_c++4', 'netcdf']
 
 # Whether to use the parMETIS library (only in conjunction with MPI)
 # DEFAULT: False
@@ -204,7 +204,7 @@ silo = True
 silo_prefix = '/usr/local'
 
 # SILO library/libraries to link against
-silo_libs = ['silo']
+silo_libs = ['siloh5']
 
 # Whether to use LLNL's VisIt simulation interface (only version 2 supported)
 # DEFAULT: False
diff --git a/scons/templates/homebrew_10.10_options.py b/scons/templates/homebrew_10.10_options.py
index 8555c6b..cedb7e0 100644
--- a/scons/templates/homebrew_10.10_options.py
+++ b/scons/templates/homebrew_10.10_options.py
@@ -54,6 +54,7 @@ escript_opts_version = 202
 # what you are doing - use cc_extra to specify additional flags!
 # DEFAULT: compiler-dependent
 #cc_flags = ''
+cc_flags     = "-pedantic -Wall -fPIC -Wno-unknown-pragmas -Wno-sign-compare -Wno-system-headers -Wno-long-long -Wno-strict-aliasing"
 
 # Additional compiler (optimization) flags for non-debug builds
 # DEFAULT: compiler-dependent
diff --git a/scons/badger_options.py b/scons/templates/vivid_mpi_options.py
similarity index 83%
copy from scons/badger_options.py
copy to scons/templates/vivid_mpi_options.py
index bef42f9..71cf390 100644
--- a/scons/badger_options.py
+++ b/scons/templates/vivid_mpi_options.py
@@ -14,13 +14,6 @@
 #
 ##############################################################################
 
-from templates.wheezy_options import *
-
-debug = True
-
-boost_libs = ['boost_python-py27']
-
-lapack = 'clapack'
-
-silo = True
+from .vivid_options import *
 
+mpi='OPENMPI'
diff --git a/scons/templates/freebsd10_0_options.py b/scons/templates/vivid_options.py
similarity index 92%
copy from scons/templates/freebsd10_0_options.py
copy to scons/templates/vivid_options.py
index e3139db..5c32383 100644
--- a/scons/templates/freebsd10_0_options.py
+++ b/scons/templates/vivid_options.py
@@ -14,11 +14,11 @@
 #
 ##############################################################################
 
-# This is a template configuration file for escript on FreeBSD.
+# This is a template configuration file for escript on Ubuntu Linux.
 # Create a file named <sourcedir>/scons/<hostname>_options.py, where
 # <sourcedir> is the escript source directory and <hostname> is your machine's
 # short hostname, add the line
-# from templates.freebsd10_0_options import *
+# from templates.vivid_options import *
 # then customize to your needs.
 
 # PREFIXES:
@@ -68,7 +68,7 @@ escript_opts_version = 202
 
 # Additional flags to add to the C++ compiler
 # DEFAULT: '' (empty)
-cxx_extra = '-I/usr/local/lib/python2.7/site-packages/numpy/core/include'
+#cxx_extra = '-Wextra -Wno-unused-parameter'
 
 # Additional flags to add to the linker
 # DEFAULT: '' (empty)
@@ -103,14 +103,14 @@ openmp = True
 #omp_ldflags = '-fopenmp'
 
 # Prefix or paths to boost headers and libraries. See note above.
-boost_prefix = '/usr/local'
+#boost_prefix = '/usr/local'
 
 # boost-python library/libraries to link against
-boost_libs = ['boost_python']
+boost_libs = ['boost_python-py27']
 
 # Prefix or paths to CppUnit headers and libraries. See note above.
 # Only required for C++ unit tests.
-cppunit_prefix = '/usr/local'
+#cppunit_prefix = '/usr/local'
 
 # CppUnit library/libraries to link against
 #cppunit_libs = ['cppunit']
@@ -121,10 +121,10 @@ cppunit_prefix = '/usr/local'
 #mpi = 'OPENMPI'
 
 # Prefix or paths to MPI headers and libraries. See note above about prefixes.
-#mpi_prefix = '/usr/lib/openmpi'
+mpi_prefix = '/usr/lib/openmpi'
 
 # MPI libraries to link against
-#mpi_libs = ['mpi_cxx', 'mpi', 'open-rte', 'open-pal']
+mpi_libs = ['mpi_cxx', 'mpi', 'open-rte', 'open-pal']
 
 # Whether to add support for GPU-based ripley system matrix (requires nvcc
 # and thrust headers) [new in 202]
@@ -140,7 +140,7 @@ cppunit_prefix = '/usr/local'
 netcdf = True
 
 # Prefix or paths to netCDF headers and libraries. See note above.
-netcdf_prefix = '/usr/local'
+#netcdf_prefix = ['/usr/include/netcdf-3', '/usr/lib']
 
 # netCDF library/libraries to link against
 #netcdf_libs = ['netcdf_c++', 'netcdf']
@@ -170,10 +170,10 @@ netcdf_prefix = '/usr/local'
 #umfpack = True
 
 # Prefix or paths to UMFPACK headers and libraries. See note above.
-#umfpack_prefix = ['/usr/include/suitesparse', '/usr/lib']
+umfpack_prefix = ['/usr/include/suitesparse', '/usr/lib']
 
 # UMFPACK library/libraries to link against
-#umfpack_libs = ['umfpack', 'blas', 'amd']
+umfpack_libs = ['umfpack', 'blas', 'amd']
 
 # Whether to use BoomerAMG (requires MPI)
 # DEFAULT: False
@@ -191,20 +191,20 @@ netcdf_prefix = '/usr/local'
 #lapack = 'clapack'
 
 # Prefix or paths to LAPACK headers and libraries. See note above.
-#lapack_prefix = ['/usr/include/atlas', '/usr/lib/atlas-base']
+lapack_prefix = ['/usr/include/atlas', '/usr/lib/atlas-base']
 
 # LAPACK library/libraries to link against
-#lapack_libs = ['lapack_atlas']
+lapack_libs = ['lapack_atlas']
 
 # Whether to use LLNL's SILO library for Silo output file support in weipa
 # DEFAULT: False
-silo = True
+#silo = True
 
 # Prefix or paths to SILO headers and libraries. See note above.
-silo_prefix = '/usr/local'
+#silo_prefix = '/usr/local'
 
 # SILO library/libraries to link against
-silo_libs = ['silo']
+#silo_libs = ['siloh5', 'hdf5']
 
 # Whether to use LLNL's VisIt simulation interface (only version 2 supported)
 # DEFAULT: False
@@ -225,7 +225,7 @@ silo_libs = ['silo']
 # Do not change the following options unless you know what they do
 
 # Compiler flags for some optimisations in dudley
-#dudley_assemble_flags = '-funroll-loops'
+dudley_assemble_flags = '-funroll-loops'
 
 # launcher, prelaunch, postlaunch: for MPI builds/batch system runs
 # the following substitutions are applied to all three:
diff --git a/scons/guineapig_py3_options.py b/scons/templates/vivid_py3_mpi_options.py
similarity index 94%
copy from scons/guineapig_py3_options.py
copy to scons/templates/vivid_py3_mpi_options.py
index f94f0f3..b00d865 100644
--- a/scons/guineapig_py3_options.py
+++ b/scons/templates/vivid_py3_mpi_options.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from guineapig_options import *
+from .vivid_options import *
 
 # boost-python library/libraries to link against
 boost_libs = ['boost_python-py34']
@@ -23,3 +23,4 @@ pythoncmd='python3'
 pythonlibname='python3.4m'
 pythonincpath='/usr/include/python3.4'
 
+mpi='OPENMPI'
diff --git a/scons/guineapig_py3_options.py b/scons/templates/vivid_py3_options.py
similarity index 96%
copy from scons/guineapig_py3_options.py
copy to scons/templates/vivid_py3_options.py
index f94f0f3..0ca1ade 100644
--- a/scons/guineapig_py3_options.py
+++ b/scons/templates/vivid_py3_options.py
@@ -14,7 +14,7 @@
 #
 ##############################################################################
 
-from guineapig_options import *
+from .vivid_options import *
 
 # boost-python library/libraries to link against
 boost_libs = ['boost_python-py34']
diff --git a/scripts/makesrc.sh b/scripts/makesrc.sh
new file mode 100755
index 0000000..ddc8561
--- /dev/null
+++ b/scripts/makesrc.sh
@@ -0,0 +1,17 @@
+
+#Make the source tarball for debian release
+#Run this from a clean checkout
+
+SRCVERSION=`head -1 debian/changelog | cut -f2 -d- | cut -d\( -f2`
+
+svnversion | grep -q :
+if [ $? == 0 ]
+then
+    echo "This does not appear to be a clean checkout."
+    echo "Exiting"
+    exit 1
+fi
+svnversion > svn_version
+
+tar -czf ../python-escript_$SRCVERSION.orig.tar.gz --exclude-vcs --exclude=debian --exclude=scons/*options.py *
+
diff --git a/scripts/py27+3_64.valgrind b/scripts/py27+3_64.valgrind
new file mode 100644
index 0000000..cefefe2
--- /dev/null
+++ b/scripts/py27+3_64.valgrind
@@ -0,0 +1,222 @@
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   fun:PyObject_GC_Del
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   ...
+   fun:PyEval_EvalFrameEx
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   fun:_PyBytes_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   fun:_PyBytes_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   fun:_PyBytes_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   fun:_PyObject_GC_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   fun:_PyObject_GC_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   fun:PyObject_GC_Del
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   fun:PyObject_GC_Del
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   fun:PyParser_AddToken
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   fun:PyParser_AddToken
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   fun:PyObject_Free
+   obj:/usr/bin/python3.4
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   fun:PyObject_Free
+   obj:/usr/bin/python3.4
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   fun:PyNode_AddChild
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   fun:PyNode_AddChild
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   ...
+   fun:PyAST_CompileObject
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   ...
+   fun:PyAST_CompileObject
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   ...
+   obj:/usr/bin/python3.4
+   fun:PyEval_EvalFrameEx
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   ...
+   obj:/usr/bin/python3.4
+   fun:PyEval_EvalFrameEx
+
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   ...
+   obj:/usr/bin/python3.4
+   fun:PyAST_FromNodeObject
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   ...
+   obj:/usr/bin/python3.4
+   fun:PyAST_CompileObject
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   ...
+   obj:/usr/bin/python3.4
+   fun:PyAST_FromNodeObject
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   fun:_PyObject_GC_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   ...
+   fun:_PyGC_CollectNoFail
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   fun:_ZN5boost6python7objects8functionD1Ev
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   ...
+   fun:_Py_DisplaySourceLine
+ 
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   ...
+   fun:PyDict_SetItem
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Leak
+   match-leak-kinds: possible
+   fun:malloc
+   obj:/usr/bin/python3.4
+   fun:_PyObject_GC_Resize
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   ...
+   fun:_PyGC_CollectNoFail
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   ...
+   fun:_PyGC_CollectNoFail
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Cond
+   obj:/usr/bin/python3.4
+   ...
+   fun:PyDict_SetItem
+   fun:_PyModule_ClearDict
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Value8
+   obj:/usr/bin/python3.4
+   ...
+   fun:PyDict_SetItem
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   fun:PyObject_Free
+   fun:PyGrammar_AddAccelerators
+}
+{
+   <insert_a_suppression_name_here>
+   Memcheck:Addr4
+   obj:/usr/bin/python3.4
+   fun:_ZN5boost6python3apipLIPKcEERNS1_6objectES6_RKT_
+   fun:_ZN5boost6python7objects8function16add_to_namespaceERKNS0_3api6objectEPKcS6_S8_
+}
+
+
diff --git a/scripts/py_comp.py b/scripts/py_comp.py
index e201cc5..38dccf3 100644
--- a/scripts/py_comp.py
+++ b/scripts/py_comp.py
@@ -1,4 +1,19 @@
-from __future__ import print_function
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+from __future__ import print_function, division
 import py_compile
 from sys import * 
 
diff --git a/site_scons/dependencies.py b/site_scons/dependencies.py
index fb95561..3a3401d 100644
--- a/site_scons/dependencies.py
+++ b/site_scons/dependencies.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -86,14 +88,14 @@ def checkPython(env):
         initstring='from __future__ import print_function;from distutils import sysconfig;'
         if env['pythonlibname']!='':
             python_libs=env['pythonlibname']
-        else:	# work it out by calling python
+        else:   # work it out by calling python
             if ['IS_WINDOWS']:
                 cmd='print("python%s%s"%(sys.version_info[0], sys.version_info[1]))'
             else:
                 cmd='print("python"+sysconfig.get_python_version())'
             p=Popen([env['pythoncmd'], '-c', initstring+cmd], stdout=PIPE)
             python_libs=p.stdout.readline()
-            if env['usepython3']:		# This is to convert unicode str into py2 string
+            if env['usepython3']:       # This is to convert unicode str into py2 string
                 python_libs=python_libs.encode() # If scons runs on py3 then this must be rethought
             p.wait()
             python_libs=python_libs.strip()
@@ -196,8 +198,10 @@ def checkBoost(env):
             maj = boostversion/100000
             minor = (boostversion/100)%1000
             sub = boostversion % 100
+            env['boost_version'] = "%d.%d.%d"%(maj,minor,sub)
             if maj <= REQUIRED_BOOST[0] and minor < REQUIRED_BOOST[1]:
-                raise RuntimeError("The boost version referenced must be at least version %d.%d "%REQUIRED_BOOST + "(have %d.%d.%d)"%(maj,minor,sub))
+                print("The boost version referenced must be at least version %d.%d "%REQUIRED_BOOST + "(have %d.%d.%d)"%(maj,minor,sub))
+                env.Exit(1)
     boosthpp.close()
     env['buildvars']['boost_inc_path']=boost_inc_path
     env['buildvars']['boost_lib_path']=boost_lib_path
@@ -340,6 +344,9 @@ def checkOptionalLibraries(env):
     lapack_inc_path=''
     lapack_lib_path=''
     if env['uselapack']:
+        if env['longindices']:
+            print("Sorry, cannot use LAPACK with 64-bit index types. Set longindices to False or disable LAPACK.")
+            env.Exit(1)
         header='clapack.h'
         if env['lapack']=='mkl':
             env.AppendUnique(CPPDEFINES = ['MKL_LAPACK'])
@@ -426,6 +433,40 @@ def checkOptionalLibraries(env):
         env.AppendUnique(LIBPATH = [parmetis_lib_path])
         env.AppendUnique(LIBS = env['parmetis_libs'])
         env.PrependENVPath(env['LD_LIBRARY_PATH_KEY'], parmetis_lib_path)
+
+        # Try to extract the parmetis version from parmetis.h
+        header=open(os.path.join(parmetis_inc_path, 'parmetis.h')).readlines()
+        major,minor,sub = None,None,None
+        for line in header:
+            ver=re.match(r'#define PARMETIS_MAJOR_VERSION\s*(\d+)',line)
+            if ver:
+                major = int(ver.group(1))
+                continue
+            ver=re.match(r'#define PARMETIS_MINOR_VERSION\s*(\d+)',line)
+            if ver:
+                minor = int(ver.group(1))
+                continue
+            ver=re.match(r'#define PARMETIS_SUBMINOR_VERSION\s*(\d+)',line)
+            if ver:
+                sub = int(ver.group(1))
+                continue
+        if major is not None:
+            env['parmetis_version'] = "%d.%d.%d"%(major,minor,0 if sub is None else sub)
+            if env['longindices']:
+                # ParMETIS version 3.x does not support 64-bit indices
+                if major < 4:
+                    print("Sorry, cannot use ParMETIS version < 4.0 with 64-bit index types. Set longindices to False or disable ParMETIS.")
+                    env.Exit(1)
+                else:
+                    # check if ParMETIS was built with 64-bit indices
+                    conf = Configure(env.Clone())
+                    idxsize=conf.CheckTypeSize('idx_t', '#include <parmetis.h>', 'C++')
+                    if idxsize != 8:
+                        print("Sorry, ParMETIS was not compiled with 64-bit indices. Set longindices to False or disable/rebuild ParMETIS.")
+                        env.Exit(1)
+        else:
+            env['parmetis_version'] = "unknown"
+
         env.Append(CPPDEFINES = ['USE_PARMETIS'])
         env['buildvars']['parmetis_inc_path']=parmetis_inc_path
         env['buildvars']['parmetis_lib_path']=parmetis_lib_path
diff --git a/site_scons/extractdebbuild.py b/site_scons/extractdebbuild.py
index 45f5141..a22c609 100644
--- a/site_scons/extractdebbuild.py
+++ b/site_scons/extractdebbuild.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -28,7 +30,6 @@ import subprocess
 import sys
 
 def getdebbuildflags():
-  print("Starting getflags")
   usedflags={'CFLAGS':None, 'CPPFLAGS':'cpp_flags', 'CXXFLAGS':'cxx_extra', 'LDFLAGS':'ld_extra'}
   ignoreflags=['FFLAGS','FCFLAGS', 'GCJFLAGS','OBJCFLAGS','OBJCXXFLAGS']
   mycflags=None
@@ -58,5 +59,4 @@ def getdebbuildflags():
     if usedflags[key] is None:
         continue
     res.append([usedflags[key],val])
-  print("Ending getflags")  
   return res    
diff --git a/site_scons/grouptest.py b/site_scons/grouptest.py
index 5530f87..0a6f81c 100644
--- a/site_scons/grouptest.py
+++ b/site_scons/grouptest.py
@@ -13,6 +13,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/site_scons/site_init.py b/site_scons/site_init.py
index c6457be..ec563ca 100644
--- a/site_scons/site_init.py
+++ b/site_scons/site_init.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -149,7 +151,7 @@ def build_py(target, source, env):
        py_compile.compile(str(source[0]), str(target[0]), doraise=True)
        return 0
     except py_compile.PyCompileError, e:
-       print e
+       print(e)
        return 1
 
 
@@ -167,12 +169,12 @@ def runUnitTest(target, source, env):
             %(pn,env['ENV']['ESCRIPT_NUM_NODES'], sn)
       else:
            app = "cd "+ pn +" & "+sn
-  print "Executing test: " + app
+  print("Executing test: " + app)
   if not env.Execute(app):
     open(str(target[0]),'w').write("PASSED\n")
   else:
     return 1
-  print "Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath)
+  print("Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath))
   return None
 
 def binpath(env, name=None):
@@ -200,12 +202,12 @@ def runPyUnitTest(target, source, env):
         pass
      app = "cd "+pn+"; "+binpath(env, "run-escript")+" -ov "+binpath(env,
             "../tools/testrunner.py")+" -skipfile="+skipfile+" "+"-failfile="+failfile+" "+"-exit "+sn
-   print "Executing test: ",app
+   print("Executing test: ",app)
    if env.Execute(app) == 0:
       open(str(target[0]),'w').write("PASSED\n")
    else:
      return 1
-   print "Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath)
+   print("Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath))
    return None
 
 def runPyExample(target, source, env): 
@@ -222,12 +224,12 @@ def runPyExample(target, source, env):
    else:
     
      app = "cd "+pn+"; pwd; "+binpath(env, "run-escript")+" -ov "+sn
-   print "Executing test: ",app
+   print("Executing test: ",app)
    if env.Execute(app) == 0:
       open(str(target[0]),'w').write("PASSED\n")
    else:
      return 1
-   print "Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath)
+   print("Test execution time: ", round(time.time() - time_start, 1), " seconds wall time for " + str(source[0].abspath))
    return None
 
 def eps2pdf(target, source, env):
diff --git a/site_scons/site_tools/nvcc.py b/site_scons/site_tools/nvcc.py
index 68ea1e1..fab77d6 100644
--- a/site_scons/site_tools/nvcc.py
+++ b/site_scons/site_tools/nvcc.py
@@ -1,3 +1,19 @@
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+
 """SCons.Tool.nvcc
 
 Tool-specific initialization for NVIDIA CUDA Compiler.
@@ -9,6 +25,8 @@ selection method.
 This file copied with modifications from: http://www.scons.org/wiki/CudaTool
 """
 
+from __future__ import print_function, division
+
 import SCons.Tool
 import SCons.Scanner.C
 import SCons.Defaults
diff --git a/speckley/py_src/__init__.py b/speckley/py_src/__init__.py
index a45c368..fa5d2a5 100644
--- a/speckley/py_src/__init__.py
+++ b/speckley/py_src/__init__.py
@@ -17,6 +17,8 @@
 """A domain meshed with uniform rectangles or quadrilaterals
 """
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -24,7 +26,7 @@ __license__="""Licensed under the Open Software License version 3.0
 http://www.opensource.org/licenses/osl-3.0.php"""
 __url__="https://launchpad.net/escript-finley"
 
-import esys.escript		# This is just to ensure required libraries are loaded
+import esys.escript      # This is just to ensure required libraries are loaded
 from .speckleycpp import *
 
 
diff --git a/speckley/src/Brick.cpp b/speckley/src/Brick.cpp
index a3ccaf0..e60c468 100644
--- a/speckley/src/Brick.cpp
+++ b/speckley/src/Brick.cpp
@@ -220,7 +220,7 @@ std::string Brick::getDescription() const
     return "speckley::Brick";
 }
 
-bool Brick::operator==(const AbstractDomain& other) const
+bool Brick::operator==(const escript::AbstractDomain& other) const
 {
     const Brick* o=dynamic_cast<const Brick*>(&other);
     if (o) {
@@ -1132,9 +1132,9 @@ void Brick::Print_Mesh_Info(const bool full) const
 void Brick::assembleCoordinates(escript::Data& arg) const
 {
     int numDim = m_numDim;
-    if (&arg!=0 && !arg.isDataPointShapeEqual(1, &numDim))
+    if (!arg.isDataPointShapeEqual(1, &numDim))
         throw SpeckleyException("setToX: Invalid Data object shape");
-    if (&arg!=0 && !arg.numSamplesEqual(1, getNumNodes()))
+    if (!arg.numSamplesEqual(1, getNumNodes()))
         throw SpeckleyException("setToX: Illegal number of samples in Data object");
 
     const dim_t NN0 = m_NN[0];
diff --git a/speckley/src/Rectangle.cpp b/speckley/src/Rectangle.cpp
index eef9549..172eed2 100644
--- a/speckley/src/Rectangle.cpp
+++ b/speckley/src/Rectangle.cpp
@@ -188,7 +188,7 @@ std::string Rectangle::getDescription() const
     return "speckley::Rectangle";
 }
 
-bool Rectangle::operator==(const AbstractDomain& other) const
+bool Rectangle::operator==(const escript::AbstractDomain& other) const
 {
     const Rectangle* o=dynamic_cast<const Rectangle*>(&other);
     if (o) {
@@ -958,9 +958,9 @@ void Rectangle::Print_Mesh_Info(const bool full) const
 void Rectangle::assembleCoordinates(escript::Data& arg) const
 {
     int numDim = m_numDim;
-    if (&arg!=0 && !arg.isDataPointShapeEqual(1, &numDim))
+    if (!arg.isDataPointShapeEqual(1, &numDim))
         throw SpeckleyException("setToX: Invalid Data object shape");
-    if (&arg!=0 && !arg.numSamplesEqual(1, getNumNodes()))
+    if (!arg.numSamplesEqual(1, getNumNodes()))
         throw SpeckleyException("setToX: Illegal number of samples in Data object");
 
     const dim_t NN0 = m_NN[0];
diff --git a/speckley/src/SpeckleyDomain.cpp b/speckley/src/SpeckleyDomain.cpp
index 43a0026..cd5a12f 100644
--- a/speckley/src/SpeckleyDomain.cpp
+++ b/speckley/src/SpeckleyDomain.cpp
@@ -320,6 +320,7 @@ void SpeckleyDomain::interpolateOnDomain(escript::Data& target,
     const int inFS = in.getFunctionSpace().getTypeCode();
     const int outFS = target.getFunctionSpace().getTypeCode();
 
+    target.requireWrite();
     // simplest case: 1:1 copy
     if (inFS==outFS) {
         copyData(target, in);
diff --git a/speckley/test/python/run_SpeckleyRipleyCoupler.py b/speckley/test/python/run_SpeckleyRipleyCoupler.py
index 1db24a4..d6f63a0 100644
--- a/speckley/test/python/run_SpeckleyRipleyCoupler.py
+++ b/speckley/test/python/run_SpeckleyRipleyCoupler.py
@@ -1,6 +1,3 @@
-from __future__ import print_function
-from __future__ import division
-
 ##############################################################################
 #
 # Copyright (c) 2003-2015 by The University of Queensland
@@ -15,6 +12,7 @@ from __future__ import division
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/speckley/test/python/run_readWriteOnSpeckley.py b/speckley/test/python/run_readWriteOnSpeckley.py
index a053d42..cb536ae 100644
--- a/speckley/test/python/run_readWriteOnSpeckley.py
+++ b/speckley/test/python/run_readWriteOnSpeckley.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import division, print_function
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
diff --git a/speckley/test/python/run_specialOnSpeckley.py b/speckley/test/python/run_specialOnSpeckley.py
index 0f95042..2eedc5a 100644
--- a/speckley/test/python/run_specialOnSpeckley.py
+++ b/speckley/test/python/run_specialOnSpeckley.py
@@ -13,9 +13,8 @@
 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
 #
 ##############################################################################
-from __future__ import print_function
-from __future__ import division
 
+from __future__ import division, print_function
 
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
diff --git a/svn_version b/svn_version
new file mode 100644
index 0000000..d93404e
--- /dev/null
+++ b/svn_version
@@ -0,0 +1 @@
+5777
diff --git a/tools/testrunner.py b/tools/testrunner.py
index bf17f49..df7f153 100644
--- a/tools/testrunner.py
+++ b/tools/testrunner.py
@@ -1,4 +1,19 @@
-from __future__ import print_function
+
+##############################################################################
+#
+# Copyright (c) 2003-2015 by The University of Queensland
+# http://www.uq.edu.au
+#
+# Primary Business: Queensland, Australia
+# Licensed under the Open Software License version 3.0
+# http://www.opensource.org/licenses/osl-3.0.php
+#
+# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
+# Development 2012-2013 by School of Earth Sciences
+# Development from 2014 by Centre for Geoscience Computing (GeoComp)
+#
+##############################################################################
+from __future__ import print_function, division
 import sys
 
 fail_format = """======================================================================
@@ -27,7 +42,7 @@ def run_tests(modules, exit_on_failure=False):
         m = __import__(module)
         res = m.run_tests(module, exit_on_failure=exit_on_failure)
         skiplist.extend(["%s : %s\n"%(rearrange(str(i[0])),i[1]) for i in res.skipped])
-        faillist.extend([fail_format.format(str(i[0]).split()[0],str(i[0]).split()[1], i[1]) for i in res.failures])
+        faillist.extend([fail_format.format(str(i[0]).split()[0],str(i[0]).split()[1], i[1]) for i in res.failures+res.errors])
     return skiplist, faillist
 
 if __name__ == "__main__":
diff --git a/weipa/py_src/__init__.py b/weipa/py_src/__init__.py
index d50cf1a..49554b2 100644
--- a/weipa/py_src/__init__.py
+++ b/weipa/py_src/__init__.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -36,7 +38,7 @@ def interpolateEscriptData(domain, data):
     from esys.escript.util import interpolate
     
     new_data={}
-    for n,d in list(data.items()):
+    for n,d in sorted(list(data.items()), key=lambda x: x[0]):
         if not d.isEmpty():
             fs=d.getFunctionSpace()
             if domain is None:
@@ -174,7 +176,7 @@ def saveVTK(filename, domain=None, metadata='', metadata_schema=None,
         ms=metadata
     if not metadata_schema is None:
         if hasattr(metadata_schema, 'items'):
-            for i,p in list(metadata_schema.items()):
+            for i,p in sorted(metadata_schema.items(), key=lambda x: x[0]):
                 ss="%s xmlns:%s=\"%s\""%(ss, i, p)
         else:
             ss=metadata_schema
@@ -203,7 +205,7 @@ def saveVoxet(filename, **data):
 
     new_data={}
     domain=None
-    for n,d in list(data.items()):
+    for n,d in sorted(data.items(), key=lambda x: x[0]):
         if d.isEmpty():
             continue
         fs=d.getFunctionSpace()
@@ -264,7 +266,7 @@ END_ORIGINAL_COORDINATE_SYSTEM\n""")
     f.write("\n")
 
     num=0
-    for n,d in list(new_data.items()):
+    for n,d in sorted(new_data.items(), key=lambda x: x[0]):
         num=num+1
         propfile=fileprefix+n
         domain.writeBinaryGrid(d, propfile, BYTEORDER_BIG_ENDIAN, DATATYPE_FLOAT32)
diff --git a/weipa/src/FinleyElements.cpp b/weipa/src/FinleyElements.cpp
index 214afeb..31f484e 100644
--- a/weipa/src/FinleyElements.cpp
+++ b/weipa/src/FinleyElements.cpp
@@ -221,22 +221,20 @@ bool FinleyElements::initFromFinley(const finley::ElementFile* finleyFile)
     if (numElements > 0) {
         nodesPerElement = finleyFile->numNodes;
 
-        int* iPtr;
-   
-        iPtr = finleyFile->Nodes;
+        index_t* idxPtr = finleyFile->Nodes;
         nodes.clear();
         nodes.insert(nodes.end(), numElements*nodesPerElement, 0);
-        copy(iPtr, iPtr+numElements*nodesPerElement, nodes.begin());
+        copy(idxPtr, idxPtr+numElements*nodesPerElement, nodes.begin());
 
-        iPtr = finleyFile->Color;
+        int* iPtr = finleyFile->Color;
         color.clear();
         color.insert(color.end(), numElements, 0);
         copy(iPtr, iPtr+numElements, color.begin());
 
-        iPtr = finleyFile->Id;
+        idxPtr = finleyFile->Id;
         ID.clear();
         ID.insert(ID.end(), numElements, 0);
-        copy(iPtr, iPtr+numElements, ID.begin());
+        copy(idxPtr, idxPtr+numElements, ID.begin());
 
         iPtr = finleyFile->Owner;
         owner.clear();
@@ -484,7 +482,8 @@ void FinleyElements::reorderArray(IntVec& v, const IntVec& idx,
     } else {
         for (idxIt=idx.begin(); idxIt!=idx.end(); idxIt++) {
             int i = *idxIt;
-            copy(&v[i*elementsPerIndex], &v[(i+1)*elementsPerIndex], arrIt);
+	    int* start = &v[i*elementsPerIndex];
+	    copy(start, start+elementsPerIndex, arrIt);
             arrIt += elementsPerIndex;
         }
     }
diff --git a/weipa/src/FinleyNodes.cpp b/weipa/src/FinleyNodes.cpp
index e309b49..6271a65 100644
--- a/weipa/src/FinleyNodes.cpp
+++ b/weipa/src/FinleyNodes.cpp
@@ -206,10 +206,10 @@ bool FinleyNodes::initFromFinley(const finley::NodeFile* finleyFile)
     numNodes = finleyFile->numNodes;
 
     int mpisize = finleyFile->MPIInfo->size;
-    int* iPtr = finleyFile->nodesDistribution->first_component;
+    index_t* idxPtr = finleyFile->nodesDistribution->first_component;
     nodeDist.clear();
     nodeDist.insert(nodeDist.end(), mpisize+1, 0);
-    copy(iPtr, iPtr+mpisize+1, nodeDist.begin());
+    copy(idxPtr, idxPtr+mpisize+1, nodeDist.begin());
 
     CoordArray::iterator it;
     for (it = coords.begin(); it != coords.end(); it++)
@@ -227,34 +227,34 @@ bool FinleyNodes::initFromFinley(const finley::NodeFile* finleyFile)
             double* srcPtr = finleyFile->Coordinates + i;
             float* c = new float[numNodes];
             coords.push_back(c);
-            for (int j=0; j<numNodes; j++, srcPtr+=numDims) {
+            for (index_t j=0; j<numNodes; j++, srcPtr+=numDims) {
                 *c++ = (float) *srcPtr;
             }
         }
 
-        iPtr = finleyFile->Id;
+        idxPtr = finleyFile->Id;
         nodeID.insert(nodeID.end(), numNodes, 0);
-        copy(iPtr, iPtr+numNodes, nodeID.begin());
+        copy(idxPtr, idxPtr+numNodes, nodeID.begin());
 
-        iPtr = finleyFile->Tag;
+        int* iPtr = finleyFile->Tag;
         nodeTag.insert(nodeTag.end(), numNodes, 0);
         copy(iPtr, iPtr+numNodes, nodeTag.begin());
 
-        iPtr = finleyFile->globalDegreesOfFreedom;
+        idxPtr = finleyFile->globalDegreesOfFreedom;
         nodeGDOF.insert(nodeGDOF.end(), numNodes, 0);
-        copy(iPtr, iPtr+numNodes, nodeGDOF.begin());
+        copy(idxPtr, idxPtr+numNodes, nodeGDOF.begin());
 
-        iPtr = finleyFile->globalNodesIndex;
+        idxPtr = finleyFile->globalNodesIndex;
         nodeGNI.insert(nodeGNI.end(), numNodes, 0);
-        copy(iPtr, iPtr+numNodes, nodeGNI.begin());
+        copy(idxPtr, idxPtr+numNodes, nodeGNI.begin());
 
-        iPtr = finleyFile->globalReducedDOFIndex;
+        idxPtr = finleyFile->globalReducedDOFIndex;
         nodeGRDFI.insert(nodeGRDFI.end(), numNodes, 0);
-        copy(iPtr, iPtr+numNodes, nodeGRDFI.begin());
+        copy(idxPtr, idxPtr+numNodes, nodeGRDFI.begin());
 
-        iPtr = finleyFile->globalReducedNodesIndex;
+        idxPtr = finleyFile->globalReducedNodesIndex;
         nodeGRNI.insert(nodeGRNI.end(), numNodes, 0);
-        copy(iPtr, iPtr+numNodes, nodeGRNI.begin());
+        copy(idxPtr, idxPtr+numNodes, nodeGRNI.begin());
 
     }
     return true;
diff --git a/weipa/src/RipleyNodes.cpp b/weipa/src/RipleyNodes.cpp
index 65abcea..6836112 100644
--- a/weipa/src/RipleyNodes.cpp
+++ b/weipa/src/RipleyNodes.cpp
@@ -129,7 +129,8 @@ bool RipleyNodes::initFromRipley(const ripley::RipleyDomain* dom)
     globalNumNodes = dom->getNumDataPointsGlobal();
     pair<int,dim_t> shape = dom->getDataShape(ripley::Nodes);
     numNodes = shape.second;
-    nodeDist = dom->getNodeDistribution();
+    ripley::IndexVector dist = dom->getNodeDistribution();
+    nodeDist.assign(dist.begin(), dist.end());
 
     if (numNodes > 0) {
         for (int d=0; d<numDims; d++) {
diff --git a/weipa/src/SpeckleyNodes.cpp b/weipa/src/SpeckleyNodes.cpp
index 355af08..fe093c2 100644
--- a/weipa/src/SpeckleyNodes.cpp
+++ b/weipa/src/SpeckleyNodes.cpp
@@ -129,7 +129,8 @@ bool SpeckleyNodes::initFromSpeckley(const speckley::SpeckleyDomain* dom)
     globalNumNodes = dom->getNumDataPointsGlobal();
     pair<int,dim_t> shape = dom->getDataShape(speckley::Nodes);
     numNodes = shape.second;
-    nodeDist = dom->getNodeDistribution();
+    speckley::IndexVector dist = dom->getNodeDistribution();
+    nodeDist.assign(dist.begin(), dist.end());
 
     if (numNodes > 0) {
         for (int d=0; d<numDims; d++) {
diff --git a/weipa/test/python/run_savesilo_tests.py b/weipa/test/python/run_savesilo_tests.py
index dadb8c9..50fa2ea 100644
--- a/weipa/test/python/run_savesilo_tests.py
+++ b/weipa/test/python/run_savesilo_tests.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""
@@ -248,6 +250,15 @@ class SiloSaver(unittest.TestCase): #requires subclassing
         out=os.path.join(WEIPA_WORKDIR, outFileBase+".silo")
         self.compareSiloFiles(out, ref)
 
+class Test_Silo_import(unittest.TestCase):
+    def test_import(self):
+        if not HAVE_SILO:
+            try:
+                import Silo
+            except ImportError as e:
+                if "No module named Silo" not in str(e):
+                    raise unittest.SkipTest("Silo module broken")
+
 @unittest.skipIf(getMPISizeWorld()>1, "MPI size > 1")
 @unittest.skipIf(not HAVE_FINLEY, "finley module not available")
 @unittest.skipIf(not HAVE_SILO, "Silo module not available")
diff --git a/weipa/test/python/run_savevtk_tests.py b/weipa/test/python/run_savevtk_tests.py
index 3bcd9c8..e36b688 100644
--- a/weipa/test/python/run_savevtk_tests.py
+++ b/weipa/test/python/run_savevtk_tests.py
@@ -14,6 +14,8 @@
 #
 ##############################################################################
 
+from __future__ import print_function, division
+
 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
 http://www.uq.edu.au
 Primary Business: Queensland, Australia"""

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/python-escript.git



More information about the debian-science-commits mailing list