[ffc] 03/12: New upstream version 2016.2.0

Johannes Ring johannr-guest at moszumanska.debian.org
Thu Dec 1 20:27:35 UTC 2016


This is an automated email from the git hooks/post-receive script.

johannr-guest pushed a commit to branch master
in repository ffc.

commit f897d17ae10d0af74364646227bd65e26502ee47
Author: Johannes Ring <johannr at simula.no>
Date:   Thu Dec 1 20:24:56 2016 +0100

    New upstream version 2016.2.0
---
 .bzrignore                                         |    8 -
 .gitignore                                         |   55 -
 AUTHORS                                            |   19 +
 ChangeLog                                          |   11 +
 .../ChangeLog => ChangeLog.uflacs                  |    0
 INSTALL                                            |    4 +-
 LICENSE                                            |    7 +
 README.rst                                         |   81 +-
 bench/bench.py                                     |   20 +-
 bench/plot.py                                      |    7 +-
 bench/utils.py                                     |    1 +
 cmake/templates/UFCConfig.cmake.in                 |   38 -
 cmake/templates/UFCConfigVersion.cmake.in          |   21 -
 cmake/templates/UseUFC.cmake.in                    |   19 -
 cmake/templates/ufc-1.pc.in                        |    4 -
 demo/BiharmonicHHJ.ufl                             |   39 +
 demo/BiharmonicRegge.ufl                           |   41 +
 demo/QuadratureElement.ufl                         |    2 +-
 demo/TraceElement.ufl                              |    2 +-
 doc/man/man1/ffc.1.gz                              |  Bin 2947 -> 2956 bytes
 doc/sphinx/README                                  |    9 +-
 doc/sphinx/generate-apidoc                         |   31 -
 doc/sphinx/requirements.txt                        |    2 +-
 doc/sphinx/source/api-doc/ffc.backends.dolfin.rst  |   62 -
 doc/sphinx/source/api-doc/ffc.backends.rst         |   18 -
 doc/sphinx/source/api-doc/ffc.backends.ufc.rst     |   78 -
 doc/sphinx/source/api-doc/ffc.errorcontrol.rst     |   30 -
 doc/sphinx/source/api-doc/ffc.quadrature.rst       |  150 -
 doc/sphinx/source/api-doc/ffc.rst                  |  257 --
 doc/sphinx/source/api-doc/ffc.tensor.rst           |   94 -
 doc/sphinx/source/api-doc/ffc.uflacsrepr.rst       |   38 -
 doc/sphinx/source/api-doc/modules.rst              |    8 -
 doc/sphinx/source/api-doc/uflacs.analysis.rst      |  102 -
 doc/sphinx/source/api-doc/uflacs.backends.ffc.rst  |   62 -
 doc/sphinx/source/api-doc/uflacs.backends.rst      |   18 -
 doc/sphinx/source/api-doc/uflacs.backends.ufc.rst  |   94 -
 .../source/api-doc/uflacs.datastructures.rst       |   38 -
 doc/sphinx/source/api-doc/uflacs.elementtables.rst |   30 -
 doc/sphinx/source/api-doc/uflacs.generation.rst    |   22 -
 doc/sphinx/source/api-doc/uflacs.language.rst      |   62 -
 .../source/api-doc/uflacs.representation.rst       |   22 -
 doc/sphinx/source/api-doc/uflacs.rst               |   35 -
 doc/sphinx/source/conf.py                          |   27 +
 doc/sphinx/source/index.rst                        |   60 +-
 doc/sphinx/source/installation.rst                 |   52 +
 doc/sphinx/source/manual.rst                       |    8 +
 doc/sphinx/source/releases.rst                     |    2 +
 doc/sphinx/source/releases/next.rst                |   23 +-
 doc/sphinx/source/releases/v1.6.0.rst              |    7 +-
 doc/sphinx/source/releases/v2016.1.0.rst           |   13 +
 doc/sphinx/source/releases/v2016.2.0.rst           |   34 +
 ffc/__init__.py                                    |   17 +-
 ffc/{main.py => __main__.py}                       |  143 +-
 ffc/analysis.py                                    |  107 +-
 ffc/backends/dolfin/capsules.py                    |   27 +-
 ffc/backends/dolfin/form.py                        |   43 +-
 ffc/backends/dolfin/functionspace.py               |   27 +-
 ffc/backends/dolfin/goalfunctional.py              |   57 +-
 ffc/backends/dolfin/includes.py                    |   48 +-
 ffc/backends/dolfin/wrappers.py                    |   23 +-
 ffc/backends/ufc/__init__.py                       |  234 +-
 ffc/backends/ufc/build.py                          |  102 -
 ffc/backends/ufc/coordinate_mapping.py             |    3 +-
 ffc/backends/ufc/dofmap.py                         |   43 +-
 ffc/backends/ufc/factory.py                        |   30 -
 ffc/backends/ufc/finite_element.py                 |   16 +-
 ffc/backends/ufc/form.py                           |   14 +-
 ffc/backends/ufc/function.py                       |    3 +-
 ffc/backends/ufc/integrals.py                      |   19 +-
 {ufc => ffc/backends/ufc}/ufc.h                    |   16 +-
 {ufc => ffc/backends/ufc}/ufc_geometry.h           |    0
 ffc/codegeneration.py                              |  255 +-
 ffc/codesnippets.py                                |   29 +-
 ffc/compiler.py                                    |  139 +-
 ffc/cpp.py                                         |  507 +--
 ffc/enrichedelement.py                             |    5 +-
 ffc/errorcontrol/__init__.py                       |    1 +
 ffc/errorcontrol/errorcontrol.py                   |   27 +-
 ffc/errorcontrol/errorcontrolgenerators.py         |   30 +-
 ffc/evaluatebasis.py                               |  294 +-
 ffc/evaluatebasisderivatives.py                    |  313 +-
 ffc/evaluatedof.py                                 |  169 +-
 ffc/extras.py                                      |   13 +-
 ffc/fiatinterface.py                               |  108 +-
 ffc/formatting.py                                  |   79 +-
 ffc/interpolatevertexvalues.py                     |   44 +-
 ffc/jitcompiler.py                                 |  336 +-
 ffc/jitobject.py                                   |   96 -
 ffc/log.py                                         |   19 +-
 ffc/mixedelement.py                                |   14 +-
 ffc/optimization.py                                |   25 +-
 ffc/parameters.py                                  |   84 +-
 ffc/plot.py                                        |  300 +-
 ffc/quadrature/__init__.py                         |    1 +
 ffc/quadrature/expr.py                             |   46 +-
 ffc/quadrature/floatvalue.py                       |   81 +-
 ffc/quadrature/fraction.py                         |  179 +-
 ffc/quadrature/optimisedquadraturetransformer.py   |  517 +--
 ffc/quadrature/parameters.py                       |   33 +-
 ffc/quadrature/product.py                          |  200 +-
 ffc/quadrature/quadraturegenerator.py              |  518 +--
 ffc/quadrature/quadratureoptimization.py           |   26 +-
 ffc/quadrature/quadraturerepresentation.py         |  112 +-
 ffc/quadrature/quadraturetransformer.py            |  294 +-
 ffc/quadrature/quadraturetransformerbase.py        |  338 +-
 ffc/quadrature/quadratureutils.py                  |  162 +-
 ffc/quadrature/reduce_operations.py                |  317 +-
 ffc/quadrature/sumobj.py                           |  285 +-
 ffc/quadrature/symbol.py                           |   36 +-
 ffc/quadrature/symbolics.py                        |  184 +-
 ffc/quadrature/tabulate_basis.py                   |  236 +-
 ffc/quadrature_schemes.py                          |  313 --
 ffc/quadratureelement.py                           |   22 +-
 ffc/representation.py                              |  485 ++-
 ffc/representationutils.py                         |  123 +-
 ffc/restrictedelement.py                           |    4 +
 ffc/tensor/__init__.py                             |    1 +
 ffc/tensor/costestimation.py                       |    3 +-
 ffc/tensor/geometrytensor.py                       |    1 +
 ffc/tensor/monomialextraction.py                   |    9 +-
 ffc/tensor/monomialintegration.py                  |   41 +-
 ffc/tensor/monomialtransformation.py               |   76 +-
 ffc/tensor/multiindex.py                           |    9 +-
 ffc/tensor/referencetensor.py                      |   11 +-
 ffc/tensor/tensorgenerator.py                      |   44 +-
 ffc/tensor/tensorreordering.py                     |   15 +-
 ffc/tensor/tensorrepresentation.py                 |   16 +-
 ffc/ufc_include.py.in                              |   24 -
 ffc/ufc_signature.py.in                            |   24 -
 {uflacs => ffc/uflacs}/__init__.py                 |    9 +-
 {uflacs => ffc/uflacs}/analysis/__init__.py        |    2 +-
 ffc/uflacs/analysis/balancing.py                   |   94 +
 .../crs.py => ffc/uflacs/analysis/crsarray.py      |   55 +-
 {uflacs => ffc/uflacs}/analysis/expr_shapes.py     |    8 +-
 {uflacs => ffc/uflacs}/analysis/factorization.py   |  195 +-
 {uflacs => ffc/uflacs}/analysis/graph.py           |    6 +-
 .../uflacs}/analysis/graph_dependencies.py         |   23 +-
 {uflacs => ffc/uflacs}/analysis/graph_rebuild.py   |  121 +-
 {uflacs => ffc/uflacs}/analysis/graph_ssa.py       |   31 +-
 {uflacs => ffc/uflacs}/analysis/graph_symbols.py   |   35 +-
 {uflacs => ffc/uflacs}/analysis/graph_vertices.py  |   11 +-
 ffc/uflacs/analysis/indexing.py                    |  162 +
 .../uflacs}/analysis/modified_terminals.py         |  201 +-
 {uflacs => ffc/uflacs}/analysis/valuenumbering.py  |   91 +-
 {uflacs => ffc/uflacs}/backends/__init__.py        |    2 +-
 {uflacs => ffc/uflacs}/backends/ffc/__init__.py    |    2 +-
 {uflacs => ffc/uflacs}/backends/ffc/access.py      |  300 +-
 ffc/uflacs/backends/ffc/backend.py                 |   40 +
 ffc/uflacs/backends/ffc/common.py                  |   65 +
 {uflacs => ffc/uflacs}/backends/ffc/definitions.py |  280 +-
 ffc/uflacs/backends/ffc/symbols.py                 |  171 +
 {uflacs => ffc/uflacs}/backends/ufc/__init__.py    |    2 +-
 .../uflacs}/backends/ufc/coordinate_mapping.py     |    4 +-
 {uflacs => ffc/uflacs}/backends/ufc/dofmap.py      |   51 +-
 .../uflacs}/backends/ufc/evaluatebasis.py          |   37 +-
 .../uflacs}/backends/ufc/finite_element.py         |   19 +-
 {uflacs => ffc/uflacs}/backends/ufc/form.py        |   34 +-
 {uflacs => ffc/uflacs}/backends/ufc/generator.py   |   12 +-
 {uflacs => ffc/uflacs}/backends/ufc/generators.py  |   12 +-
 {uflacs => ffc/uflacs}/backends/ufc/integrals.py   |    7 +-
 {uflacs => ffc/uflacs}/backends/ufc/templates.py   |    1 +
 ffc/uflacs/backends/ufc/utils.py                   |   32 +
 {uflacs => ffc/uflacs}/elementtables/__init__.py   |    2 +-
 ffc/uflacs/elementtables/table_utils.py            |  230 ++
 ffc/uflacs/elementtables/terminaltables.py         |  416 +++
 {uflacs => ffc/uflacs}/generation/__init__.py      |    2 +-
 ffc/uflacs/generation/integralgenerator.py         |  510 +++
 {uflacs => ffc/uflacs}/language/__init__.py        |    2 +-
 {uflacs => ffc/uflacs}/language/cnodes.py          |  182 +-
 {uflacs => ffc/uflacs}/language/format_lines.py    |    6 +-
 {uflacs => ffc/uflacs}/language/format_value.py    |    3 +-
 {uflacs => ffc/uflacs}/language/precedence.py      |    2 +-
 {uflacs => ffc/uflacs}/language/ufl_to_cnodes.py   |  140 +-
 .../analysis/__init__.py => ffc/uflacs/params.py   |    7 +-
 {uflacs => ffc/uflacs}/representation/__init__.py  |    2 +-
 ffc/uflacs/representation/build_uflacs_ir.py       |  437 +++
 ffc/uflacs/tools.py                                |  110 +
 ffc/{uflacsrepr => uflacs}/uflacsgenerator.py      |   40 +-
 ffc/{uflacsrepr => uflacs}/uflacsoptimization.py   |    3 +-
 ffc/uflacs/uflacsrepresentation.py                 |  132 +
 ffc/uflacsrepr/__init__.py                         |    3 -
 ffc/uflacsrepr/uflacsrepresentation.py             |   74 -
 ffc/utils.py                                       |   48 +-
 ffc/wrappers.py                                    |   14 +-
 release.conf                                       |   35 -
 requirements.txt                                   |    2 +-
 scripts/ffc                                        |   33 -
 scripts/makedist                                   |  105 -
 scripts/makedoc                                    |   56 -
 setup.cfg                                          |   15 +
 setup.py                                           |  342 +-
 test/evaluate_basis/cppcode.py                     |   76 -
 test/evaluate_basis/test.py                        |  260 --
 test/evaluate_basis_derivatives/cppcode.py         |   76 -
 test/evaluate_basis_derivatives/test.py            |  269 --
 test/regression/elements.py                        |    8 +-
 test/regression/ffc-reference-data-id              |    2 +-
 test/regression/recdiff.py                         |   90 +-
 test/regression/test.py                            |  219 +-
 test/regression/ufctest.py                         |    4 +-
 test/test.py                                       |    5 +-
 test/uflacs/README                                 |   21 -
 test/uflacs/README.md                              |   24 +
 test/uflacs/crosslanguage/Makefile                 |   27 +-
 test/uflacs/crosslanguage/conftest.py              |  131 +-
 test/uflacs/crosslanguage/cppsupport/mock_cells.h  |   28 +-
 .../crosslanguage/test_element_combinations.py     |    1 +
 test/uflacs/crosslanguage/test_gtest_framework.py  |    1 +
 test/uflacs/crosslanguage/test_mock_cells.py       |    1 +
 .../crosslanguage/test_ufc_integral_types.py       |    3 +-
 .../crosslanguage/xtest_tabulate_tensor_body.py    |    5 +-
 .../xtest_ufl_expression_compilation.py            |    3 +-
 .../system/xtest_dolfin_expression_compilation.py  |   13 +-
 test/uflacs/unit/test_cnodes.py                    |  116 +-
 test/uflacs/unit/test_cpp_compiler.py              |   72 +-
 test/uflacs/unit/test_crs.py                       |   19 +-
 test/uflacs/unit/test_factorization.py             |  115 +-
 test/uflacs/unit/test_format_code_structure.py     |   30 +-
 test/uflacs/unit/test_graph_algorithm.py           |  157 +-
 test/uflacs/unit/test_snippets.py                  |   10 +-
 test/uflacs/unit/test_ssa_manipulations.py         |   23 +-
 test/uflacs/unit/test_table_utils.py               |   62 +-
 test/uflacs/unit/test_ufc_backend.py               |  125 +-
 test/uflacs/unit/test_ufl_to_cnodes.py             |   68 +-
 test/uflacs/unit/test_valuenumbering.py            |   79 +
 test/uflacs/unit/xtest_latex_formatting.py         |   46 +-
 test/uflacs/unit/xtest_ufl_shapes_and_indexing.py  |   45 +-
 test/uflacs/unit/xtest_ufl_to_cpp_formatting.py    |   35 +-
 test/unit/elements/test.py                         |   43 -
 test/unit/evaluate_basis/__init__.py               |    0
 test/unit/evaluate_basis/cppcode.py                |   65 +-
 test/unit/evaluate_basis/elements.py               |   92 -
 test/unit/evaluate_basis/test.py                   |   38 -
 test/unit/evaluate_basis/test_against_fiat.py      |  262 --
 .../unit/evaluate_basis/test_against_ref_values.py |  237 --
 .../unit/evaluate_basis/test_basis_against_fiat.py |  409 +++
 test/unit/evaluate_basis/test_common.py            |  194 --
 test/unit/misc/__init__.py                         |    0
 test/unit/misc/test.py                             |  364 ---
 test/unit/misc/test_elements.py                    |  223 ++
 test/unit/pytest.ini                               |    9 +
 test/unit/symbolics/__init__.py                    |    0
 test/unit/symbolics/test.py                        |  137 -
 test/unit/symbolics/test_dg_elastodyn.py           |   57 +
 test/unit/symbolics/test_elas_weighted.py          |   85 +
 test/unit/symbolics/test_elas_weighted2.py         |  102 +
 test/unit/symbolics/test_elasticity_2d.py          |  133 +
 test/unit/symbolics/test_elasticity_term.py        |   59 +
 test/unit/symbolics/test_expand_operations.py      |  217 ++
 test/unit/symbolics/test_float.py                  |   63 +
 test/unit/symbolics/test_float_operators.py        |   97 +
 test/unit/symbolics/test_fraction.py               |   85 +
 test/unit/symbolics/test_fraction_operators.py     |   95 +
 test/unit/symbolics/test_mixed_symbols.py          |  245 ++
 test/unit/symbolics/test_not_finished.py           |   91 +
 test/unit/symbolics/test_poisson.py                |   80 +
 test/unit/symbolics/test_product.py                |  115 +
 test/unit/symbolics/test_product_operators.py      |  106 +
 test/unit/symbolics/test_real_examples.py          |   52 +
 test/unit/symbolics/test_reduce_gip.py             |  213 ++
 test/unit/symbolics/test_reduce_operations.py      |  247 ++
 test/unit/symbolics/test_reduce_vartype.py         |  147 +
 test/unit/symbolics/test_sum.py                    |   93 +
 test/unit/symbolics/test_sum_operators.py          |   89 +
 test/unit/symbolics/test_symbol.py                 |   66 +
 test/unit/symbolics/test_symbol_operators.py       |  100 +
 test/unit/symbolics/testdgelastodyn.py             |   83 -
 test/unit/symbolics/testelasticity2d.py            |  198 --
 test/unit/symbolics/testelasticityterm.py          |   87 -
 test/unit/symbolics/testelasweighted.py            |  113 -
 test/unit/symbolics/testelasweighted2.py           |  130 -
 test/unit/symbolics/testexpandoperations.py        |  287 --
 test/unit/symbolics/testfloat.py                   |   81 -
 test/unit/symbolics/testfloatoperators.py          |  107 -
 test/unit/symbolics/testfraction.py                |  105 -
 test/unit/symbolics/testfractionoperators.py       |  109 -
 test/unit/symbolics/testmixedsymbols.py            |  303 --
 test/unit/symbolics/testnotfinished.py             |  106 -
 test/unit/symbolics/testpoisson.py                 |  121 -
 test/unit/symbolics/testproduct.py                 |  145 -
 test/unit/symbolics/testproductoperators.py        |  120 -
 test/unit/symbolics/testrealexamples.py            |  158 -
 test/unit/symbolics/testreducegip.py               |  240 --
 test/unit/symbolics/testreduceoperations.py        |  397 ---
 test/unit/symbolics/testreducevartype.py           |  189 --
 test/unit/symbolics/testsum.py                     |  120 -
 test/unit/symbolics/testsumoperators.py            |  102 -
 test/unit/symbolics/testsymbol.py                  |   86 -
 test/unit/symbolics/testsymboloperators.py         |  114 -
 test/unit/test.py                                  |    7 +-
 ufc-merge-into-ffc/COPYING.GPL-2                   |  339 --
 ufc-merge-into-ffc/COPYING.LGPL                    |  165 -
 ufc-merge-into-ffc/LICENSE                         |   48 -
 ufc-merge-into-ffc/README.merge                    |   20 -
 ufc-merge-into-ffc/README.rst                      |   84 -
 ufc-merge-into-ffc/doc/manual/Makefile             |   30 -
 ufc-merge-into-ffc/doc/manual/algorithm.sty        |   96 -
 ufc-merge-into-ffc/doc/manual/bibliography.bib     |  180 --
 .../doc/manual/chapters/assembly.tex               |  397 ---
 .../doc/manual/chapters/assembly_cpp.tex           |  111 -
 .../doc/manual/chapters/examples.tex               |   76 -
 .../doc/manual/chapters/installation.tex           |   47 -
 .../doc/manual/chapters/interface.tex              | 1210 -------
 .../doc/manual/chapters/interface_cpp.tex          |    8 -
 .../doc/manual/chapters/introduction.tex           |   80 -
 ufc-merge-into-ffc/doc/manual/chapters/license.tex |    5 -
 .../doc/manual/chapters/numbering.tex              |    4 -
 .../doc/manual/chapters/numbering_common.tex       |  412 ---
 .../doc/manual/chapters/pythonutils.tex            |   43 -
 .../doc/manual/chapters/referencecells.tex         |    4 -
 .../doc/manual/chapters/referencecells_common.tex  |  250 --
 .../doc/manual/chapters/versions.tex               |   92 -
 ufc-merge-into-ffc/doc/manual/code/Poisson.ufl     |   16 -
 ufc-merge-into-ffc/doc/manual/code/poisson_ffc.h   | 1418 --------
 ufc-merge-into-ffc/doc/manual/code/poisson_syfi.h  |    1 -
 .../code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp  |  189 --
 .../code/stiffness_syfi/dof_map_Lagrange_1_2D.h    |   92 -
 .../code/stiffness_syfi/fe_Lagrange_1_2D.cpp       |  160 -
 .../manual/code/stiffness_syfi/fe_Lagrange_1_2D.h  |   87 -
 .../form__stiffness_form__Lagrange_1_2D.cpp        |  203 --
 .../form__stiffness_form__Lagrange_1_2D.h          |   92 -
 ufc-merge-into-ffc/doc/manual/eps/hexahedron.eps   |  215 --
 ufc-merge-into-ffc/doc/manual/eps/insertion.eps    |  472 ---
 ufc-merge-into-ffc/doc/manual/eps/interval.eps     |   76 -
 .../eps/numbering_example_quadrilaterals.eps       |  339 --
 .../doc/manual/eps/numbering_example_triangles.eps |  245 --
 .../manual/eps/ordering_example_tetrahedron.eps    |  110 -
 .../doc/manual/eps/ordering_example_triangle.eps   |   71 -
 .../manual/eps/orientation_example_triangles.eps   |  375 ---
 .../doc/manual/eps/quadrilateral.eps               |  125 -
 ufc-merge-into-ffc/doc/manual/eps/tetrahedron.eps  |  184 --
 ufc-merge-into-ffc/doc/manual/eps/triangle.eps     |  112 -
 ufc-merge-into-ffc/doc/manual/eps/ufcfig.eps       |  643 ----
 ufc-merge-into-ffc/doc/manual/fenicsmanual.cls     |  110 -
 ufc-merge-into-ffc/doc/manual/svg/hexahedron.svg   |  249 --
 ufc-merge-into-ffc/doc/manual/svg/insertion.svg    |  429 ---
 ufc-merge-into-ffc/doc/manual/svg/interval.svg     |  172 -
 .../svg/numbering_example_quadrilaterals.svg       |  412 ---
 .../doc/manual/svg/numbering_example_triangles.svg |  348 --
 .../manual/svg/ordering_example_tetrahedron.svg    |  217 --
 .../doc/manual/svg/ordering_example_triangle.svg   |  176 -
 .../manual/svg/orientation_example_triangles.svg   |  374 ---
 .../doc/manual/svg/quadrilateral.svg               |  193 --
 ufc-merge-into-ffc/doc/manual/svg/tetrahedron.svg  |  222 --
 ufc-merge-into-ffc/doc/manual/svg/triangle.svg     |  174 -
 ufc-merge-into-ffc/doc/manual/ufc-user-manual.tex  |   35 -
 ufc-merge-into-ffc/doc/sphinx/README               |    3 -
 ufc-merge-into-ffc/doc/sphinx/index.rst            |  104 -
 ufc/__init__.py                                    |    6 -
 ufc_benchmark/Makefile                             |   42 -
 ufc_benchmark/setup.py                             |   23 -
 ufc_benchmark/ufc_benchmark.cpp                    |  372 ---
 ufc_benchmark/ufc_benchmark.h                      |   45 -
 ufc_benchmark/ufc_benchmark.i                      |   62 -
 ufc_benchmark/ufc_data.h                           |  184 --
 ufc_benchmark/ufc_reference_cell.h                 |  294 --
 uflacs-merge-into-ffc/COPYING                      |  674 ----
 uflacs-merge-into-ffc/COPYING.LESSER               |  165 -
 uflacs-merge-into-ffc/README.rst                   |   60 -
 .../doc/roadmap/css/print/paper.css                |  176 -
 .../doc/roadmap/css/print/pdf.css                  |  190 --
 uflacs-merge-into-ffc/doc/roadmap/css/reveal.css   | 1880 -----------
 .../doc/roadmap/css/reveal.min.css                 |    7 -
 .../doc/roadmap/css/theme/README.md                |   25 -
 .../doc/roadmap/css/theme/beige.css                |  148 -
 .../doc/roadmap/css/theme/blood.css                |  175 -
 .../doc/roadmap/css/theme/default.css              |  148 -
 .../doc/roadmap/css/theme/moon.css                 |  148 -
 .../doc/roadmap/css/theme/night.css                |  136 -
 .../doc/roadmap/css/theme/serif.css                |  138 -
 .../doc/roadmap/css/theme/simple.css               |  138 -
 .../doc/roadmap/css/theme/sky.css                  |  145 -
 .../doc/roadmap/css/theme/solarized.css            |  148 -
 .../doc/roadmap/css/theme/source/beige.scss        |   50 -
 .../doc/roadmap/css/theme/source/blood.scss        |   91 -
 .../doc/roadmap/css/theme/source/default.scss      |   42 -
 .../doc/roadmap/css/theme/source/moon.scss         |   68 -
 .../doc/roadmap/css/theme/source/night.scss        |   35 -
 .../doc/roadmap/css/theme/source/serif.scss        |   35 -
 .../doc/roadmap/css/theme/source/simple.scss       |   38 -
 .../doc/roadmap/css/theme/source/sky.scss          |   46 -
 .../doc/roadmap/css/theme/source/solarized.scss    |   74 -
 .../doc/roadmap/css/theme/template/mixins.scss     |   29 -
 .../doc/roadmap/css/theme/template/settings.scss   |   34 -
 .../doc/roadmap/css/theme/template/theme.scss      |  170 -
 uflacs-merge-into-ffc/doc/roadmap/images/image.jpg |  Bin 98762 -> 0 bytes
 uflacs-merge-into-ffc/doc/roadmap/index.html       |  407 ---
 uflacs-merge-into-ffc/doc/roadmap/js/reveal.js     | 3382 --------------------
 uflacs-merge-into-ffc/doc/roadmap/js/reveal.min.js |    9 -
 .../doc/roadmap/lib/css/zenburn.css                |  114 -
 .../doc/roadmap/lib/font/league_gothic-webfont.eot |  Bin 18485 -> 0 bytes
 .../doc/roadmap/lib/font/league_gothic-webfont.svg |  230 --
 .../doc/roadmap/lib/font/league_gothic-webfont.ttf |  Bin 42324 -> 0 bytes
 .../roadmap/lib/font/league_gothic-webfont.woff    |  Bin 21288 -> 0 bytes
 .../doc/roadmap/lib/font/league_gothic_license     |    2 -
 .../doc/roadmap/lib/js/classList.js                |    2 -
 .../doc/roadmap/lib/js/head.min.js                 |    8 -
 .../doc/roadmap/lib/js/html5shiv.js                |    7 -
 .../doc/roadmap/plugin/highlight/highlight.js      |   32 -
 .../doc/roadmap/plugin/leap/leap.js                |  157 -
 .../doc/roadmap/plugin/markdown/example.html       |  129 -
 .../doc/roadmap/plugin/markdown/example.md         |   31 -
 .../doc/roadmap/plugin/markdown/markdown.js        |  392 ---
 .../doc/roadmap/plugin/markdown/marked.js          |   37 -
 .../doc/roadmap/plugin/math/math.js                |   64 -
 .../doc/roadmap/plugin/multiplex/client.js         |   13 -
 .../doc/roadmap/plugin/multiplex/index.js          |   56 -
 .../doc/roadmap/plugin/multiplex/master.js         |   51 -
 .../doc/roadmap/plugin/notes-server/client.js      |   57 -
 .../doc/roadmap/plugin/notes-server/index.js       |   59 -
 .../doc/roadmap/plugin/notes-server/notes.html     |  142 -
 .../doc/roadmap/plugin/notes/notes.html            |  267 --
 .../doc/roadmap/plugin/notes/notes.js              |   78 -
 .../doc/roadmap/plugin/postmessage/example.html    |   39 -
 .../doc/roadmap/plugin/postmessage/postmessage.js  |   42 -
 .../doc/roadmap/plugin/print-pdf/print-pdf.js      |   44 -
 .../doc/roadmap/plugin/remotes/remotes.js          |   39 -
 .../doc/roadmap/plugin/search/search.js            |  196 --
 .../doc/roadmap/plugin/tagcloud/tagcloud.js        |   21 -
 .../doc/roadmap/plugin/zoom-js/zoom.js             |  258 --
 uflacs-merge-into-ffc/doc/sphinx/Makefile          |  177 -
 uflacs-merge-into-ffc/doc/sphinx/README            |   27 -
 uflacs-merge-into-ffc/doc/sphinx/generate-apidoc   |   29 -
 uflacs-merge-into-ffc/doc/sphinx/requirements.txt  |    5 -
 .../doc/sphinx/source/api-doc/modules.rst          |    7 -
 .../doc/sphinx/source/api-doc/uflacs.analysis.rst  |  102 -
 .../sphinx/source/api-doc/uflacs.backends.ffc.rst  |   62 -
 .../doc/sphinx/source/api-doc/uflacs.backends.rst  |   18 -
 .../sphinx/source/api-doc/uflacs.backends.ufc.rst  |   94 -
 .../source/api-doc/uflacs.datastructures.rst       |   38 -
 .../sphinx/source/api-doc/uflacs.elementtables.rst |   30 -
 .../sphinx/source/api-doc/uflacs.generation.rst    |   22 -
 .../doc/sphinx/source/api-doc/uflacs.language.rst  |   62 -
 .../source/api-doc/uflacs.representation.rst       |   22 -
 .../doc/sphinx/source/api-doc/uflacs.rst           |   35 -
 uflacs-merge-into-ffc/doc/sphinx/source/conf.py    |  272 --
 uflacs-merge-into-ffc/doc/sphinx/source/index.rst  |   56 -
 uflacs-merge-into-ffc/release.conf                 |    6 -
 uflacs-merge-into-ffc/setup.py                     |   57 -
 uflacs/analysis/indexing.py                        |  292 --
 uflacs/backends/ffc/common.py                      |  209 --
 uflacs/backends/ffc/ffc_compiler.py                |   62 -
 uflacs/backends/ffc/generation.py                  |   63 -
 uflacs/backends/ffc/representation.py              |  134 -
 uflacs/backends/ufc/utils.py                       |   15 -
 uflacs/datastructures/__init__.py                  |   19 -
 uflacs/datastructures/arrays.py                    |   31 -
 uflacs/datastructures/types.py                     |   43 -
 uflacs/elementtables/table_utils.py                |  234 --
 uflacs/elementtables/terminaltables.py             |  226 --
 uflacs/generation/integralgenerator.py             |  520 ---
 uflacs/language/typenodes.py                       |  132 -
 uflacs/params.py                                   |   27 -
 uflacs/representation/compute_expr_ir.py           |  201 --
 454 files changed, 12925 insertions(+), 41221 deletions(-)

diff --git a/.bzrignore b/.bzrignore
deleted file mode 100644
index 1b59c58..0000000
--- a/.bzrignore
+++ /dev/null
@@ -1,8 +0,0 @@
-syntax: glob
-(^|/)CVS($|/)
-(^|/)\.hg($|/)
-./*~
-./*.pyc
-./build/*
-./demo/*.py
-./demo/*.h
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 68e8a9e..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,55 +0,0 @@
-# Compiled source
-*.o
-*.Plo
-*.Po
-*.lo
-*.la
-*.a
-*.os
-*.pyc
-*.so
-*.pc
-*.pyc
-*.pyd
-*.def
-*.dll
-*.exe
-*.dylib
-
-# Files generated by setup
-/ffc/git_commit_hash.py
-/ffc/ufc_signature.py
-/ffc/ufc_include.py
-
-# CMake and Make files
-CMakeCache.txt
-CMakeFiles
-cmake_install.cmake
-cmake_uninstall.cmake
-Makefile
-install_manifest.txt
-/cmake/templates/UFCConfig.cmake
-/cmake/templates/UFCConfigVersion.cmake
-/cmake/templates/UseUFC.cmake
-
-# Temporaries
-*~
-
-# OS X files
-.DS_Store
-.DS_Store?
-
-# Local build files
-/build
-/bench/bench.log
-
-# Tests
-**/.cache/
-/test/regression/error.log
-/test/regression/ffc-reference-data/
-/test/regression/output/
-/test/unit/tmp/
-__pycache__
-/test/uflacs/crosslanguage/generated
-/test/uflacs/crosslanguage/gtest.log
-/test/uflacs/crosslanguage/run_gtest
diff --git a/AUTHORS b/AUTHORS
index 93b3449..189de06 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -57,6 +57,7 @@ Contributors:
     email: aterrel at uchicago.edu
     www:   http://people.cs.uchicago.edu/~aterrel/
 
+
 Credits for UFC
 ===============
 
@@ -79,3 +80,21 @@ Main contributors:
     Garth N. Wells         <gnw20 at cam.ac.uk>
     Marie E. Rognes        <meg at simula.no>
     Johannes Ring          <johannr at simula.no>
+
+
+Credits for UFLACS
+==================
+
+UFLACS was merged into FFC 2016-02-16.
+
+Author:
+
+    Martin Sandve Alnæs    <martinal at simula.no>
+
+Contributors:
+
+    Anders Logg            <logg at chalmers.se>
+    Garth N. Wells         <gnw20 at cam.ac.uk>
+    Johannes Ring          <johannr at simula.no>
+    Matthias Liertzer      <matthias at liertzer.at>
+    Steffen Müthing        <steffen.muething at ipvs.uni-stuttgart.de>
diff --git a/ChangeLog b/ChangeLog
index be7faf5..ec9c6c1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2016.2.0 [2016-11-30]
+ - Jit compiler now compiles elements separately from forms to avoid duplicate work
+ - Add parameter max_signature_length to optionally shorten signatures in the jit cache
+ - Move uflacs module into ffc.uflacs
+ - Remove installation of pkg-config and CMake files (UFC path and
+   compiler flags are available from ffc module)
+ - Add dependency on dijitso and remove dependency on instant
+ - Add experimental Bitbucket pipelines
+ - Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This
+   includes removal of instructions how to merge two repos, commit hash
+   c8389032268041fe94682790cb773663bdf27286.
 2016.1.0 [2016-06-23]
  - Add function get_ufc_include to get path to ufc.h
  - Merge UFLACS into FFC
diff --git a/uflacs-merge-into-ffc/ChangeLog b/ChangeLog.uflacs
similarity index 100%
rename from uflacs-merge-into-ffc/ChangeLog
rename to ChangeLog.uflacs
diff --git a/INSTALL b/INSTALL
index 0cb5648..417de2d 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,9 +1,9 @@
 To install FFC, type
 
-    sudo python setup.py install
+    pip install --prefix=/path/to/install/ .
 
 This will install FFC in the default Python path of your system,
-something like /usr/lib/python2.6/site-packages/.
+something like /path/to/install/lib/python2.7/site-packages/.
 
 To specify C++ compiler and/or compiler flags used for compiling UFC
 and JITing, set environment variables CXX, CXXFLAGS respectively
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..1450425
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,7 @@
+The header files ufc.h and ufc_geometry.h are released into the public domain.
+
+------------------------------------------------------------------------------
+
+Other files, unless stated otherwise in their head, are licensed by GNU Lesser
+General Public License, version 3, or later. See COPYING and COPYING.LESSER for
+the license text.
diff --git a/README.rst b/README.rst
index 7f6397a..e61d904 100644
--- a/README.rst
+++ b/README.rst
@@ -1,61 +1,68 @@
------------------------------
+=============================
 FFC: The FEniCS Form Compiler
------------------------------
+=============================
 
 FFC is a compiler for finite element variational forms. From a
 high-level description of the form, it generates efficient low-level
 C++ code that can be used to assemble the corresponding discrete
 operator (tensor). In particular, a bilinear form may be assembled
-into a matrix and a linear form may be assembled into a vector.
-
-FFC may be used either from the command line (by invoking the ``ffc``
+into a matrix and a linear form may be assembled into a vector.  FFC
+may be used either from the command line (by invoking the ``ffc``
 command) or as a Python module (``import ffc``).
 
-FFC is part of the FEniCS project (http://www.fenicsproject.org) and
-functions as a just-in-time (JIT) compiler for DOLFIN.
+FFC is part of the FEniCS Project.
 
-For further introduction to FFC, open the FFC user manual available in
-the subdirectory ``doc/manual/`` of this source tree, or try out the
-demos available in the subdirectory ``src/demo/`` of this source tree.
+For more information, visit http://www.fenicsproject.org
 
 
-License
--------
+Documentation
+=============
+
+Documentation can be viewed at http://fenics-ffc.readthedocs.org/.
 
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Lesser General Public License as
-published by the Free Software Foundation, either version 3 of the
-License, or (at your option) any later version.
+.. image:: https://readthedocs.org/projects/fenics-ffc/badge/?version=latest
+   :target: http://fenics.readthedocs.io/projects/ffc/en/latest/?badge=latest
+   :alt: Documentation Status
 
-This program is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
 
-You should have received a copy of the GNU Lesser General Public
-License along with this program. If not, see
-<http://www.gnu.org/licenses/>.
+Automated Testing
+=================
 
+We use Bitbucket Pipelines and Atlassian Bamboo to perform automated
+testing.
 
-Dependencies
-------------
+.. image:: https://bitbucket-badges.useast.atlassian.io/badge/fenics-project/ffc.svg
+   :target: https://bitbucket.org/fenics-project/ffc/addon/pipelines/home
+   :alt: Pipelines Build Status
 
-#. Python, version 2.7 or later
+.. image:: http://fenics-bamboo.simula.no:8085/plugins/servlet/wittified/build-status/FFC-FD
+   :target: http://fenics-bamboo.simula.no:8085/browse/FFC-FD/latest
+   :alt: Bamboo Build Status
 
-#. The latest version of FIAT, Instant and UFL
 
-   You need to have FIAT, Instant and UFL installed. They are
-   available from the web page: https://bitbucket.org/fenics-project/.
+Code Coverage
+=============
 
-#. The Python NumPy module
+Code coverage reports can be viewed at
+https://coveralls.io/repos/bitbucket/fenics-project/ffc.
 
-#. The Python Six module
+.. image:: https://coveralls.io/repos/bitbucket/fenics-project/ffc/badge.svg?branch=master
+   :target: https://coveralls.io/bitbucket/fenics-project/ffc?branch=master
+   :alt: Coverage Status
+
+
+License
+=======
 
+  This program is free software: you can redistribute it and/or modify
+  it under the terms of the GNU Lesser General Public License as published by
+  the Free Software Foundation, either version 3 of the License, or
+  (at your option) any later version.
 
-Notes
------
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+  GNU Lesser General Public License for more details.
 
-From February 2014, the code generation interface UFC is distributed
-as part of FFC, and the UFC repository has been merged into the FFC
-repository. From this point onwards, UFC version numbers are reset to
-the same version numbers as for FFC.
+  You should have received a copy of the GNU Lesser General Public License
+  along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/bench/bench.py b/bench/bench.py
index 1cc285b..d63d566 100644
--- a/bench/bench.py
+++ b/bench/bench.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """This script runs a benchmark study on the form files found in the
 current directory. It relies on the regression test script for
 timings."""
@@ -18,15 +19,20 @@ timings."""
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-05-11
-# Last changed: 2010-05-11
 
-import os, glob
+from __future__ import print_function
+
+import os
+import glob
+import sys
 from utils import print_table
 
 # Test options
-test_options = ["-r tensor", "-r tensor -O", "-r quadrature", "-r quadrature -O"]
+test_options = ["-r tensor",
+                "-r tensor -O",
+                "-r quadrature",
+                "-r quadrature -O",
+                "-r uflacs"]
 
 # Get list of test cases
 test_cases = sorted([f.split(".")[0] for f in glob.glob("*.ufl")])
@@ -40,8 +46,8 @@ table = {}
 for (j, test_option) in enumerate(test_options):
 
     # Run benchmark
-    print "\nUsing options %s\n" % test_option
-    os.system("python test.py --bench %s" % test_option)
+    print("\nUsing options %s\n" % test_option)
+    os.system(sys.executable + " test.py --bench %s" % test_option)
 
     # Collect results
     for (i, test_case) in enumerate(test_cases):
diff --git a/bench/plot.py b/bench/plot.py
index 52465b0..09dfecc 100644
--- a/bench/plot.py
+++ b/bench/plot.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This script plots the results found in bench.log."
 
 # Copyright (C) 2010 Anders Logg
@@ -29,16 +30,16 @@ try:
 except:
     output = open("results/bench.log").read()
 for line in output.split("\n"):
-    if not "," in line: continue
+    if "," not in line: continue
     test_case, test_option, timing = [w.strip() for w in line.split(",")]
     try:
         form, degree = test_case.split("_")
     except:
         form, dim, degree = test_case.split("_")
         form = form + "_" + dim
-    if not form in results:
+    if form not in results:
         results[form] = {}
-    if not test_option in results[form]:
+    if test_option not in results[form]:
         results[form][test_option] = ([], [])
     results[form][test_option][0].append(int(degree))
     results[form][test_option][1].append(float(timing))
diff --git a/bench/utils.py b/bench/utils.py
index 06b7b0f..4dcd1df 100644
--- a/bench/utils.py
+++ b/bench/utils.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010 Anders Logg
 #
 # This file is part of FFC.
diff --git a/cmake/templates/UFCConfig.cmake.in b/cmake/templates/UFCConfig.cmake.in
deleted file mode 100644
index 6ab6349..0000000
--- a/cmake/templates/UFCConfig.cmake.in
+++ /dev/null
@@ -1,38 +0,0 @@
-# CMake configuration for UFC (http://fenicsproject.org/)
-#
-# This file has been generated automatically by the FFC/UFC installation.
-
-# FIXME: Check that naming conforms to CMake standards
-
-# Package found
-set(UFC_FOUND TRUE)
-
-# Include directories
-set(UFC_INCLUDE_DIRS "@INSTALL_PREFIX/include")
-
-# Compiler flags
-set(UFC_CXX_FLAGS "@CXX_FLAGS")
-
-# 3rd party include directories
-set(UFC_3RD_PARTY_INCLUDE_DIRS "@BOOST_INCLUDE_DIR")
-
-# 3rd party libraries
-set(UFC_3RD_PARTY_LIBRARIES "@BOOST_MATH_LIBRARY")
-
-# Python include directories
-set(UFC_PYTHON_INCLUDE_DIRS "@PYTHON_INCLUDE_DIR")
-
-# Python libraries
-set(UFC_PYTHON_LIBRARIES "@PYTHON_LIBRARY")
-
-# Python executable
-set(UFC_PYTHON_EXECUTABLE "@PYTHON_EXECUTABLE")
-
-# Version
-set(UFC_VERSION_STRING "@FULLVERSION")
-
-# Signature (SHA-1 hash of ufc.h)
-set(UFC_SIGNATURE "@UFC_SIGNATURE")
-
-# The location of the UseUFC.cmake file
-set(UFC_USE_FILE "@INSTALL_PREFIX/share/ufc/UseUFC.cmake")
diff --git a/cmake/templates/UFCConfigVersion.cmake.in b/cmake/templates/UFCConfigVersion.cmake.in
deleted file mode 100644
index f9ac44a..0000000
--- a/cmake/templates/UFCConfigVersion.cmake.in
+++ /dev/null
@@ -1,21 +0,0 @@
-# CMake configuration for UFC (http://fenicsproject.org/)
-#
-# This file has been generated automatically by the FFC/UFC installation.
-
-# FIXME: When should versions be defined as compatible?
-
-set(PACKAGE_VERSION "@FULLVERSION")
-set(PACKAGE_VERSION_MAJOR "@MAJOR")
-set(PACKAGE_VERSION_MINOR "@MINOR")
-set(PACKAGE_VERSION_PATCH "@MICRO")
-
-# This version is compatible only with matching major.minor versions.
-if ("${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}" VERSION_EQUAL "${PACKAGE_FIND_VERSION_MAJOR}.${PACKAGE_FIND_VERSION_MINOR}")
-  # This version is compatible with equal or lesser patch versions.
-  if (NOT "${PACKAGE_VERSION_PATCH}" VERSION_LESS "${PACKAGE_FIND_VERSION_PATCH}")
-    set(PACKAGE_VERSION_COMPATIBLE 1)
-    if ("${PACKAGE_VERSION_PATCH}" VERSION_EQUAL "${PACKAGE_FIND_VERSION_PATCH}")
-      set(PACKAGE_VERSION_EXACT 1)
-    endif()
-  endif()
-endif()
diff --git a/cmake/templates/UseUFC.cmake.in b/cmake/templates/UseUFC.cmake.in
deleted file mode 100644
index fbc4040..0000000
--- a/cmake/templates/UseUFC.cmake.in
+++ /dev/null
@@ -1,19 +0,0 @@
-# CMake configuration for UFC (http://fenicsproject.org/)
-#
-# This file has been generated automatically by the FFC/UFC installation.
-#
-# This file sets up include directories, link directories, and
-# compiler settings for a project to use UFC. It should not be
-# included directly, but rather through the UFC_USE_FILE setting
-# obtained from UFCConfig.cmake.
-
-if (NOT UFC_USE_FILE_INCLUDED)
-  set(UFC_USE_FILE_INCLUDED 1)
-
-  # Add include directories needed to use UFC
-  include_directories(${UFC_INCLUDE_DIRS})
-
-  # Add compiler flags needed to use UFC
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${UFC_CXX_FLAGS}")
-
-endif()
diff --git a/cmake/templates/ufc-1.pc.in b/cmake/templates/ufc-1.pc.in
deleted file mode 100644
index 025317d..0000000
--- a/cmake/templates/ufc-1.pc.in
+++ /dev/null
@@ -1,4 +0,0 @@
-Name: UFC
-Description: Unified Form-assembly Code
-Version: @FULLVERSION
-Cflags: -I at INSTALL_PREFIX/include @CXX_FLAGS
diff --git a/demo/BiharmonicHHJ.ufl b/demo/BiharmonicHHJ.ufl
new file mode 100644
index 0000000..69fe854
--- /dev/null
+++ b/demo/BiharmonicHHJ.ufl
@@ -0,0 +1,39 @@
+# Copyright (C) 2016 Lizao Li
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# The bilinear form a(u, v) and linear form L(v) for
+# Biharmonic equation in Hellan-Herrmann-Johnson (HHJ)
+# formulation.
+#
+# Compile this form with FFC: ffc -l dolfin BiharmonicHHJ.ufl
+
+HHJ = FiniteElement('HHJ', triangle, 2)
+CG =  FiniteElement('CG', triangle, 3)
+mixed_element = HHJ * CG
+
+(sigma, u) = TrialFunctions(mixed_element)
+(tau,   v) = TestFunctions(mixed_element)
+f = Coefficient(CG)
+
+def b(sigma, v):
+    n = FacetNormal(triangle)
+    return inner(sigma, grad(grad(v))) * dx \
+      - dot(dot(sigma('+'), n('+')), n('+')) * jump(grad(v), n) * dS \
+      - dot(dot(sigma, n), n) * dot(grad(v), n) * ds
+
+a = inner(sigma, tau) * dx - b(tau, u) + b(sigma, v)
+L = f * v * dx
diff --git a/demo/BiharmonicRegge.ufl b/demo/BiharmonicRegge.ufl
new file mode 100644
index 0000000..6f6e507
--- /dev/null
+++ b/demo/BiharmonicRegge.ufl
@@ -0,0 +1,41 @@
+# Copyright (C) 2016 Lizao Li
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# The bilinear form a(u, v) and linear form L(v) for
+# Biharmonic equation in Regge formulation.
+#
+# Compile this form with FFC: ffc -l dolfin BiharmonicRegge.ufl
+
+REG = FiniteElement('Regge', tetrahedron, 1)
+CG =  FiniteElement('Lagrange', tetrahedron, 2)
+mixed_element = REG * CG
+
+(sigma, u) = TrialFunctions(mixed_element)
+(tau,   v) = TestFunctions(mixed_element)
+f = Coefficient(CG)
+
+def S(mu):
+    return mu - Identity(3) * tr(mu)
+
+def b(mu, v):
+    n = FacetNormal(tetrahedron)
+    return inner(S(mu), grad(grad(v))) * dx \
+      - dot(dot(S(mu('+')), n('+')), n('+')) * jump(grad(v), n) * dS \
+      - dot(dot(S(mu), n), n) * dot(grad(v), n) * ds
+
+a = inner(S(sigma), S(tau)) * dx - b(tau, u) + b(sigma, v)
+L = f * v * dx
diff --git a/demo/QuadratureElement.ufl b/demo/QuadratureElement.ufl
index fbfc7c4..e85fe7e 100644
--- a/demo/QuadratureElement.ufl
+++ b/demo/QuadratureElement.ufl
@@ -24,7 +24,7 @@
 scheme = "default"
 degree = 3
 dx = Measure("dx")
-dx = dx(degree=degree, rule=scheme)
+dx = dx(degree=degree, scheme=scheme)
 
 # Configure quadrature elements with compatible rule
 element = FiniteElement("Lagrange", triangle, 2)
diff --git a/demo/TraceElement.ufl b/demo/TraceElement.ufl
index d1cce15..61173ca 100644
--- a/demo/TraceElement.ufl
+++ b/demo/TraceElement.ufl
@@ -15,6 +15,6 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Discontinuous Lagrange Trace", "triangle", 0)
+element = FiniteElement("HDiv Trace", "triangle", 0)
 v = TestFunction(element)
 L = v*ds + avg(v)*dS
diff --git a/doc/man/man1/ffc.1.gz b/doc/man/man1/ffc.1.gz
index 10ffb71..3a0920b 100644
Binary files a/doc/man/man1/ffc.1.gz and b/doc/man/man1/ffc.1.gz differ
diff --git a/doc/sphinx/README b/doc/sphinx/README
index cabf4d8..1246a8d 100644
--- a/doc/sphinx/README
+++ b/doc/sphinx/README
@@ -2,10 +2,11 @@
 Sphinx documentation
 ====================
 
-FFC is documented using Sphinx and reStructured text. The
-documnentation is hosted at http://fenics-ffc.readthedocs.org/. The
-online documentation is automatically updated upon pushes to the FFC
-master branch.
+FFC is documented using Sphinx and reStructured text.
+The documentation is hosted at http://fenics-ffc.readthedocs.org/.
+
+The online documentation is automatically updated upon pushes to the
+FFC master branch.
 
 
 Updating the API documentation
diff --git a/doc/sphinx/generate-apidoc b/doc/sphinx/generate-apidoc
deleted file mode 100755
index ceefb64..0000000
--- a/doc/sphinx/generate-apidoc
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) 2015 Garth N. Wells
-#
-# This file is part of UFL.
-#
-# UFL is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFL is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFL. If not, see <http://www.gnu.org/licenses/>.
-
-# This script calls sphinx-apidoc to generate files ready for autodoc
-
-echo ""
-echo "--- Generating FFC autodoc RST files"
-echo ""
-
-# Get location of Sphinx files
-SPHINX_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-SPHINX_SOURCE_DIR=$SPHINX_DIR/source
-
-# Generate .rst files ready for autodoc (exclude setup.py and UFC)
-#sphinx-apidoc -f -o $SPHINX_SOURCE_DIR/api-doc $SPHINX_DIR/../../ffc
-sphinx-apidoc -f -o $SPHINX_SOURCE_DIR/api-doc $SPHINX_DIR/../.. $SPHINX_DIR/../../ufc $SPHINX_DIR/../../setup.py
-#sphinx-apidoc -f -o $SPHINX_SOURCE_DIR/api-doc $SPHINX_DIR/../../uflacs
diff --git a/doc/sphinx/requirements.txt b/doc/sphinx/requirements.txt
index 8fc779f..e854bf6 100644
--- a/doc/sphinx/requirements.txt
+++ b/doc/sphinx/requirements.txt
@@ -1,3 +1,3 @@
+-e git+https://bitbucket.org/fenics-project/dijitso.git#egg=dijitso
 -e git+https://bitbucket.org/fenics-project/fiat.git#egg=fiat
--e git+https://bitbucket.org/fenics-project/instant.git#egg=instant
 -e git+https://bitbucket.org/fenics-project/ufl.git#egg=ufl
diff --git a/doc/sphinx/source/api-doc/ffc.backends.dolfin.rst b/doc/sphinx/source/api-doc/ffc.backends.dolfin.rst
deleted file mode 100644
index 680b5a7..0000000
--- a/doc/sphinx/source/api-doc/ffc.backends.dolfin.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-ffc.backends.dolfin package
-===========================
-
-Submodules
-----------
-
-ffc.backends.dolfin.capsules module
------------------------------------
-
-.. automodule:: ffc.backends.dolfin.capsules
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.dolfin.form module
--------------------------------
-
-.. automodule:: ffc.backends.dolfin.form
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.dolfin.functionspace module
-----------------------------------------
-
-.. automodule:: ffc.backends.dolfin.functionspace
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.dolfin.goalfunctional module
------------------------------------------
-
-.. automodule:: ffc.backends.dolfin.goalfunctional
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.dolfin.includes module
------------------------------------
-
-.. automodule:: ffc.backends.dolfin.includes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.dolfin.wrappers module
------------------------------------
-
-.. automodule:: ffc.backends.dolfin.wrappers
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.backends.dolfin
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.backends.rst b/doc/sphinx/source/api-doc/ffc.backends.rst
deleted file mode 100644
index e8963a0..0000000
--- a/doc/sphinx/source/api-doc/ffc.backends.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-ffc.backends package
-====================
-
-Subpackages
------------
-
-.. toctree::
-
-    ffc.backends.dolfin
-    ffc.backends.ufc
-
-Module contents
----------------
-
-.. automodule:: ffc.backends
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.backends.ufc.rst b/doc/sphinx/source/api-doc/ffc.backends.ufc.rst
deleted file mode 100644
index c3e726c..0000000
--- a/doc/sphinx/source/api-doc/ffc.backends.ufc.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-ffc.backends.ufc package
-========================
-
-Submodules
-----------
-
-ffc.backends.ufc.build module
------------------------------
-
-.. automodule:: ffc.backends.ufc.build
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.coordinate_mapping module
-------------------------------------------
-
-.. automodule:: ffc.backends.ufc.coordinate_mapping
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.dofmap module
-------------------------------
-
-.. automodule:: ffc.backends.ufc.dofmap
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.factory module
--------------------------------
-
-.. automodule:: ffc.backends.ufc.factory
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.finite_element module
---------------------------------------
-
-.. automodule:: ffc.backends.ufc.finite_element
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.form module
-----------------------------
-
-.. automodule:: ffc.backends.ufc.form
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.function module
---------------------------------
-
-.. automodule:: ffc.backends.ufc.function
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.backends.ufc.integrals module
----------------------------------
-
-.. automodule:: ffc.backends.ufc.integrals
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.backends.ufc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.errorcontrol.rst b/doc/sphinx/source/api-doc/ffc.errorcontrol.rst
deleted file mode 100644
index f871b88..0000000
--- a/doc/sphinx/source/api-doc/ffc.errorcontrol.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-ffc.errorcontrol package
-========================
-
-Submodules
-----------
-
-ffc.errorcontrol.errorcontrol module
-------------------------------------
-
-.. automodule:: ffc.errorcontrol.errorcontrol
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.errorcontrol.errorcontrolgenerators module
-----------------------------------------------
-
-.. automodule:: ffc.errorcontrol.errorcontrolgenerators
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.errorcontrol
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.quadrature.rst b/doc/sphinx/source/api-doc/ffc.quadrature.rst
deleted file mode 100644
index 0f000ce..0000000
--- a/doc/sphinx/source/api-doc/ffc.quadrature.rst
+++ /dev/null
@@ -1,150 +0,0 @@
-ffc.quadrature package
-======================
-
-Submodules
-----------
-
-ffc.quadrature.expr module
---------------------------
-
-.. automodule:: ffc.quadrature.expr
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.floatvalue module
---------------------------------
-
-.. automodule:: ffc.quadrature.floatvalue
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.fraction module
-------------------------------
-
-.. automodule:: ffc.quadrature.fraction
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.optimisedquadraturetransformer module
-----------------------------------------------------
-
-.. automodule:: ffc.quadrature.optimisedquadraturetransformer
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.parameters module
---------------------------------
-
-.. automodule:: ffc.quadrature.parameters
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.product module
------------------------------
-
-.. automodule:: ffc.quadrature.product
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadraturegenerator module
------------------------------------------
-
-.. automodule:: ffc.quadrature.quadraturegenerator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadratureoptimization module
---------------------------------------------
-
-.. automodule:: ffc.quadrature.quadratureoptimization
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadraturerepresentation module
-----------------------------------------------
-
-.. automodule:: ffc.quadrature.quadraturerepresentation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadraturetransformer module
--------------------------------------------
-
-.. automodule:: ffc.quadrature.quadraturetransformer
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadraturetransformerbase module
------------------------------------------------
-
-.. automodule:: ffc.quadrature.quadraturetransformerbase
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.quadratureutils module
--------------------------------------
-
-.. automodule:: ffc.quadrature.quadratureutils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.reduce_operations module
----------------------------------------
-
-.. automodule:: ffc.quadrature.reduce_operations
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.sumobj module
-----------------------------
-
-.. automodule:: ffc.quadrature.sumobj
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.symbol module
-----------------------------
-
-.. automodule:: ffc.quadrature.symbol
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.symbolics module
--------------------------------
-
-.. automodule:: ffc.quadrature.symbolics
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature.tabulate_basis module
-------------------------------------
-
-.. automodule:: ffc.quadrature.tabulate_basis
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.quadrature
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.rst b/doc/sphinx/source/api-doc/ffc.rst
deleted file mode 100644
index 4ed6cef..0000000
--- a/doc/sphinx/source/api-doc/ffc.rst
+++ /dev/null
@@ -1,257 +0,0 @@
-ffc package
-===========
-
-Subpackages
------------
-
-.. toctree::
-
-    ffc.backends
-    ffc.errorcontrol
-    ffc.quadrature
-    ffc.tensor
-    ffc.uflacsrepr
-
-Submodules
-----------
-
-ffc.analysis module
--------------------
-
-.. automodule:: ffc.analysis
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.codegeneration module
--------------------------
-
-.. automodule:: ffc.codegeneration
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.codesnippets module
------------------------
-
-.. automodule:: ffc.codesnippets
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.compiler module
--------------------
-
-.. automodule:: ffc.compiler
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.cpp module
---------------
-
-.. automodule:: ffc.cpp
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.enrichedelement module
---------------------------
-
-.. automodule:: ffc.enrichedelement
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.evaluatebasis module
-------------------------
-
-.. automodule:: ffc.evaluatebasis
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.evaluatebasisderivatives module
------------------------------------
-
-.. automodule:: ffc.evaluatebasisderivatives
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.evaluatedof module
-----------------------
-
-.. automodule:: ffc.evaluatedof
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.extras module
------------------
-
-.. automodule:: ffc.extras
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.fiatinterface module
-------------------------
-
-.. automodule:: ffc.fiatinterface
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.formatting module
----------------------
-
-.. automodule:: ffc.formatting
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.git_commit_hash module
---------------------------
-
-.. automodule:: ffc.git_commit_hash
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.interpolatevertexvalues module
-----------------------------------
-
-.. automodule:: ffc.interpolatevertexvalues
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.jitcompiler module
-----------------------
-
-.. automodule:: ffc.jitcompiler
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.jitobject module
---------------------
-
-.. automodule:: ffc.jitobject
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.log module
---------------
-
-.. automodule:: ffc.log
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.mixedelement module
------------------------
-
-.. automodule:: ffc.mixedelement
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.optimization module
------------------------
-
-.. automodule:: ffc.optimization
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.parameters module
----------------------
-
-.. automodule:: ffc.parameters
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.plot module
----------------
-
-.. automodule:: ffc.plot
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadrature_schemes module
------------------------------
-
-.. automodule:: ffc.quadrature_schemes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.quadratureelement module
-----------------------------
-
-.. automodule:: ffc.quadratureelement
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.representation module
--------------------------
-
-.. automodule:: ffc.representation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.representationutils module
-------------------------------
-
-.. automodule:: ffc.representationutils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.restrictedelement module
-----------------------------
-
-.. automodule:: ffc.restrictedelement
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.ufc_signature module
-------------------------
-
-.. automodule:: ffc.ufc_signature
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.utils module
-----------------
-
-.. automodule:: ffc.utils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.wrappers module
--------------------
-
-.. automodule:: ffc.wrappers
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.tensor.rst b/doc/sphinx/source/api-doc/ffc.tensor.rst
deleted file mode 100644
index 0cfe998..0000000
--- a/doc/sphinx/source/api-doc/ffc.tensor.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-ffc.tensor package
-==================
-
-Submodules
-----------
-
-ffc.tensor.costestimation module
---------------------------------
-
-.. automodule:: ffc.tensor.costestimation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.geometrytensor module
---------------------------------
-
-.. automodule:: ffc.tensor.geometrytensor
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.monomialextraction module
-------------------------------------
-
-.. automodule:: ffc.tensor.monomialextraction
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.monomialintegration module
--------------------------------------
-
-.. automodule:: ffc.tensor.monomialintegration
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.monomialtransformation module
-----------------------------------------
-
-.. automodule:: ffc.tensor.monomialtransformation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.multiindex module
-----------------------------
-
-.. automodule:: ffc.tensor.multiindex
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.referencetensor module
----------------------------------
-
-.. automodule:: ffc.tensor.referencetensor
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.tensorgenerator module
----------------------------------
-
-.. automodule:: ffc.tensor.tensorgenerator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.tensorreordering module
-----------------------------------
-
-.. automodule:: ffc.tensor.tensorreordering
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.tensor.tensorrepresentation module
---------------------------------------
-
-.. automodule:: ffc.tensor.tensorrepresentation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.tensor
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/ffc.uflacsrepr.rst b/doc/sphinx/source/api-doc/ffc.uflacsrepr.rst
deleted file mode 100644
index 9e80b82..0000000
--- a/doc/sphinx/source/api-doc/ffc.uflacsrepr.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-ffc.uflacsrepr package
-======================
-
-Submodules
-----------
-
-ffc.uflacsrepr.uflacsgenerator module
--------------------------------------
-
-.. automodule:: ffc.uflacsrepr.uflacsgenerator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.uflacsrepr.uflacsoptimization module
-----------------------------------------
-
-.. automodule:: ffc.uflacsrepr.uflacsoptimization
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-ffc.uflacsrepr.uflacsrepresentation module
-------------------------------------------
-
-.. automodule:: ffc.uflacsrepr.uflacsrepresentation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: ffc.uflacsrepr
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/modules.rst b/doc/sphinx/source/api-doc/modules.rst
deleted file mode 100644
index 013de91..0000000
--- a/doc/sphinx/source/api-doc/modules.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-ffc
-===
-
-.. toctree::
-   :maxdepth: 4
-
-   ffc
-   uflacs
diff --git a/doc/sphinx/source/api-doc/uflacs.analysis.rst b/doc/sphinx/source/api-doc/uflacs.analysis.rst
deleted file mode 100644
index 3ff2854..0000000
--- a/doc/sphinx/source/api-doc/uflacs.analysis.rst
+++ /dev/null
@@ -1,102 +0,0 @@
-uflacs.analysis package
-=======================
-
-Submodules
-----------
-
-uflacs.analysis.expr_shapes module
-----------------------------------
-
-.. automodule:: uflacs.analysis.expr_shapes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.factorization module
-------------------------------------
-
-.. automodule:: uflacs.analysis.factorization
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph module
-----------------------------
-
-.. automodule:: uflacs.analysis.graph
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_dependencies module
------------------------------------------
-
-.. automodule:: uflacs.analysis.graph_dependencies
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_rebuild module
-------------------------------------
-
-.. automodule:: uflacs.analysis.graph_rebuild
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_ssa module
---------------------------------
-
-.. automodule:: uflacs.analysis.graph_ssa
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_symbols module
-------------------------------------
-
-.. automodule:: uflacs.analysis.graph_symbols
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_vertices module
--------------------------------------
-
-.. automodule:: uflacs.analysis.graph_vertices
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.indexing module
--------------------------------
-
-.. automodule:: uflacs.analysis.indexing
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.modified_terminals module
------------------------------------------
-
-.. automodule:: uflacs.analysis.modified_terminals
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.valuenumbering module
--------------------------------------
-
-.. automodule:: uflacs.analysis.valuenumbering
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.analysis
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst b/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst
deleted file mode 100644
index e92cefb..0000000
--- a/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-uflacs.backends.ffc package
-===========================
-
-Submodules
-----------
-
-uflacs.backends.ffc.access module
----------------------------------
-
-.. automodule:: uflacs.backends.ffc.access
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.common module
----------------------------------
-
-.. automodule:: uflacs.backends.ffc.common
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.definitions module
---------------------------------------
-
-.. automodule:: uflacs.backends.ffc.definitions
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.ffc_compiler module
----------------------------------------
-
-.. automodule:: uflacs.backends.ffc.ffc_compiler
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.generation module
--------------------------------------
-
-.. automodule:: uflacs.backends.ffc.generation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.representation module
------------------------------------------
-
-.. automodule:: uflacs.backends.ffc.representation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends.ffc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.backends.rst b/doc/sphinx/source/api-doc/uflacs.backends.rst
deleted file mode 100644
index 8553451..0000000
--- a/doc/sphinx/source/api-doc/uflacs.backends.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-uflacs.backends package
-=======================
-
-Subpackages
------------
-
-.. toctree::
-
-    uflacs.backends.ffc
-    uflacs.backends.ufc
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst b/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst
deleted file mode 100644
index 865e061..0000000
--- a/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-uflacs.backends.ufc package
-===========================
-
-Submodules
-----------
-
-uflacs.backends.ufc.coordinate_mapping module
----------------------------------------------
-
-.. automodule:: uflacs.backends.ufc.coordinate_mapping
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.dofmap module
----------------------------------
-
-.. automodule:: uflacs.backends.ufc.dofmap
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.evaluatebasis module
-----------------------------------------
-
-.. automodule:: uflacs.backends.ufc.evaluatebasis
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.finite_element module
------------------------------------------
-
-.. automodule:: uflacs.backends.ufc.finite_element
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.form module
--------------------------------
-
-.. automodule:: uflacs.backends.ufc.form
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.generator module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.generator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.generators module
--------------------------------------
-
-.. automodule:: uflacs.backends.ufc.generators
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.integrals module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.integrals
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.templates module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.templates
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.utils module
---------------------------------
-
-.. automodule:: uflacs.backends.ufc.utils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends.ufc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.datastructures.rst b/doc/sphinx/source/api-doc/uflacs.datastructures.rst
deleted file mode 100644
index 55554ed..0000000
--- a/doc/sphinx/source/api-doc/uflacs.datastructures.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-uflacs.datastructures package
-=============================
-
-Submodules
-----------
-
-uflacs.datastructures.arrays module
------------------------------------
-
-.. automodule:: uflacs.datastructures.arrays
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.datastructures.crs module
---------------------------------
-
-.. automodule:: uflacs.datastructures.crs
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.datastructures.types module
-----------------------------------
-
-.. automodule:: uflacs.datastructures.types
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.datastructures
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.elementtables.rst b/doc/sphinx/source/api-doc/uflacs.elementtables.rst
deleted file mode 100644
index 71f3b11..0000000
--- a/doc/sphinx/source/api-doc/uflacs.elementtables.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-uflacs.elementtables package
-============================
-
-Submodules
-----------
-
-uflacs.elementtables.table_utils module
----------------------------------------
-
-.. automodule:: uflacs.elementtables.table_utils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.elementtables.terminaltables module
-------------------------------------------
-
-.. automodule:: uflacs.elementtables.terminaltables
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.elementtables
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.generation.rst b/doc/sphinx/source/api-doc/uflacs.generation.rst
deleted file mode 100644
index 4582d0c..0000000
--- a/doc/sphinx/source/api-doc/uflacs.generation.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-uflacs.generation package
-=========================
-
-Submodules
-----------
-
-uflacs.generation.integralgenerator module
-------------------------------------------
-
-.. automodule:: uflacs.generation.integralgenerator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.generation
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.language.rst b/doc/sphinx/source/api-doc/uflacs.language.rst
deleted file mode 100644
index 39dff6a..0000000
--- a/doc/sphinx/source/api-doc/uflacs.language.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-uflacs.language package
-=======================
-
-Submodules
-----------
-
-uflacs.language.cnodes module
------------------------------
-
-.. automodule:: uflacs.language.cnodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.format_lines module
------------------------------------
-
-.. automodule:: uflacs.language.format_lines
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.format_value module
------------------------------------
-
-.. automodule:: uflacs.language.format_value
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.precedence module
----------------------------------
-
-.. automodule:: uflacs.language.precedence
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.typenodes module
---------------------------------
-
-.. automodule:: uflacs.language.typenodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.ufl_to_cnodes module
-------------------------------------
-
-.. automodule:: uflacs.language.ufl_to_cnodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.language
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.representation.rst b/doc/sphinx/source/api-doc/uflacs.representation.rst
deleted file mode 100644
index 2708bb6..0000000
--- a/doc/sphinx/source/api-doc/uflacs.representation.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-uflacs.representation package
-=============================
-
-Submodules
-----------
-
-uflacs.representation.compute_expr_ir module
---------------------------------------------
-
-.. automodule:: uflacs.representation.compute_expr_ir
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.representation
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/api-doc/uflacs.rst b/doc/sphinx/source/api-doc/uflacs.rst
deleted file mode 100644
index 4da0a02..0000000
--- a/doc/sphinx/source/api-doc/uflacs.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-uflacs package
-==============
-
-Subpackages
------------
-
-.. toctree::
-
-    uflacs.analysis
-    uflacs.backends
-    uflacs.datastructures
-    uflacs.elementtables
-    uflacs.generation
-    uflacs.language
-    uflacs.representation
-
-Submodules
-----------
-
-uflacs.params module
---------------------
-
-.. automodule:: uflacs.params
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/doc/sphinx/source/conf.py b/doc/sphinx/source/conf.py
index 0ff558d..7f07fe2 100644
--- a/doc/sphinx/source/conf.py
+++ b/doc/sphinx/source/conf.py
@@ -296,3 +296,30 @@ texinfo_documents = [
 
 # Example configuration for intersphinx: refer to the Python standard library.
 intersphinx_mapping = {'https://docs.python.org/': None}
+
+
+
+def run_apidoc(_):
+    from sphinx.apidoc import main
+
+    # Get location of Sphinx files
+    sphinx_source_dir = os.path.abspath(os.path.dirname(__file__))
+    repo_dir = os.path.abspath(os.path.join(sphinx_source_dir, os.path.pardir,
+                                            os.path.pardir, os.path.pardir))
+    apidoc_dir = os.path.join(sphinx_source_dir, "api-doc")
+
+    # Include these modules
+    modules = ['ffc']
+
+    for module in modules:
+        # Generate .rst files ready for autodoc
+        module_dir = os.path.join(repo_dir, module)
+        main(["-f",             # Overwrite existing files
+              "-d", "1",        # Maximum depth of submodules to show in the TOC
+              "-o", apidoc_dir, # Directory to place all output
+              module_dir        # Module directory
+             ]
+        )
+
+def setup(app):
+    app.connect('builder-inited', run_apidoc)
diff --git a/doc/sphinx/source/index.rst b/doc/sphinx/source/index.rst
index 2829d6e..a60a8cb 100644
--- a/doc/sphinx/source/index.rst
+++ b/doc/sphinx/source/index.rst
@@ -1,57 +1,37 @@
-.. title:: FEniCS Form Compiler
+.. title:: FFC
 
 
-FEniCS Form Compiler (FFC)
-==========================
+=============================
+FFC: The FEniCS Form Compiler
+=============================
 
-This is the documentation for the FEniCS Form Compiler (FFC) from the
-FEniCS Project (http://fenicsproject.org).  FFC is a compiler for
-finite element variational forms. From a high-level description of the
-form, it generates efficient low-level C++ code that can be used to
-assemble the corresponding discrete operator (tensor). In particular,
-a bilinear form may be assembled into a matrix and a linear form may
-be assembled into a vector.
+FFC is a compiler for finite element variational forms. From a
+high-level description of the form, it generates efficient low-level
+C++ code that can be used to assemble the corresponding discrete
+operator (tensor). In particular, a bilinear form may be assembled
+into a matrix and a linear form may be assembled into a vector.  FFC
+may be used either from the command line (by invoking the ``ffc``
+command) or as a Python module (``import ffc``).
 
+FFC is part of the FEniCS Project.
 
-Installation
-------------
-
-To install FFC::
-
-  TODO
-
-Help and support
-----------------
-
-Send help requests and questions to fenics-support at googlegroups.com,
-and send feature requests and questions to
-fenics-dev at googlegroups.com.
-
-
-Development and reporting bugs
-------------------------------
-
-The git source repository for FFC is located at
-https://bitbucket.org/fenics-project/ffc.
-
-Bugs can be registered at
-https://bitbucket.org/fenics-project/ffc/issues.  For general FFC
-development questions and to make feature requests, use
-fenics-dev at googlegroups.com.
-
+For more information, visit http://www.fenicsproject.org
 
 
 Documentation
--------------
+=============
 
 .. toctree::
    :titlesonly:
+   :maxdepth: 1
 
-   api-doc/ffc
-   api-doc/uflacs
+   installation
+   manual
+   API reference (FFC) <api-doc/ffc>
+   API reference (UFLACS) <api-doc/uflacs>
    releases
 
+[FIXME: These links don't belong here, should go under API reference somehow.]
 
 * :ref:`genindex`
 * :ref:`modindex`
-* :ref:`search`
diff --git a/doc/sphinx/source/installation.rst b/doc/sphinx/source/installation.rst
new file mode 100644
index 0000000..7eb13ec
--- /dev/null
+++ b/doc/sphinx/source/installation.rst
@@ -0,0 +1,52 @@
+.. title:: Installation
+
+
+============
+Installation
+============
+
+FFC is normally installed as part of an installation of FEniCS.
+If you are using FFC as part of the FEniCS software suite, it
+is recommended that you follow the
+`installation instructions for FEniCS
+<https://fenics.readthedocs.io/en/latest/>`__.
+
+To install FFC itself, read on below for a list of requirements
+and installation instructions.
+
+Requirements and dependencies
+=============================
+
+FFC requires Python version 2.7 or later and depends on the
+following Python packages:
+
+* NumPy
+* six
+
+FFC also depends on the following FEniCS Python packages:
+
+* FIAT
+* UFL
+* dijitso
+
+These packages will be automatically installed as part of the
+installation of FFC, if not already present on your system.
+
+Installation instructions
+=========================
+
+To install FFC, download the source code from the
+`FFC Bitbucket repository
+<https://bitbucket.org/fenics-project/ffc>`__,
+and run the following command:
+
+.. code-block:: console
+
+    pip install .
+
+To install to a specific location, add the ``--prefix`` flag
+to the installation command:
+
+.. code-block:: console
+
+    pip install --prefix=<some directory> .
diff --git a/doc/sphinx/source/manual.rst b/doc/sphinx/source/manual.rst
new file mode 100644
index 0000000..95f7078
--- /dev/null
+++ b/doc/sphinx/source/manual.rst
@@ -0,0 +1,8 @@
+.. title:: User manual
+
+
+===========
+User manual
+===========
+
+.. note:: This page is work in progress.
diff --git a/doc/sphinx/source/releases.rst b/doc/sphinx/source/releases.rst
index 57717e7..e28cf80 100644
--- a/doc/sphinx/source/releases.rst
+++ b/doc/sphinx/source/releases.rst
@@ -5,4 +5,6 @@ Release notes
    :maxdepth: 2
 
    releases/next
+   releases/v2016.2.0
+   releases/v2016.1.0
    releases/v1.6.0
diff --git a/doc/sphinx/source/releases/next.rst b/doc/sphinx/source/releases/next.rst
index 18b51ad..1bfdf0e 100644
--- a/doc/sphinx/source/releases/next.rst
+++ b/doc/sphinx/source/releases/next.rst
@@ -1,6 +1,19 @@
-Changes in the next release of FFC
-==================================
+===========================
+Changes in the next release
+===========================
 
-- Generalize ufc interface to non-affine parameterized coordinates
-- Add ``ufc::coordinate_mapping`` class
-- Make ufc interface depend on C++11 features requiring gcc version >= 4.8
+
+Summary of changes
+==================
+
+.. note:: Developers should use this page to track and list changes
+          during development. At the time of release, this page should
+          be published (and renamed) to list the most important
+          changes in the new release.
+
+
+Detailed changes
+================
+
+.. note:: At the time of release, make a verbatim copy of the
+          ChangeLog here (and remove this note).
diff --git a/doc/sphinx/source/releases/v1.6.0.rst b/doc/sphinx/source/releases/v1.6.0.rst
index 04c714c..9525351 100644
--- a/doc/sphinx/source/releases/v1.6.0.rst
+++ b/doc/sphinx/source/releases/v1.6.0.rst
@@ -1,7 +1,8 @@
-Changes in FFC 1.6.0
-====================
+========================
+Changes in version 1.6.0
+========================
 
-FFC 1.6.0 was released on 2015-07-28
+FFC 1.6.0 was released on 2015-07-28.
 
 - Rename and modify a number of UFC interface functions. See
   docstrings in ufc.h for details.
diff --git a/doc/sphinx/source/releases/v2016.1.0.rst b/doc/sphinx/source/releases/v2016.1.0.rst
new file mode 100644
index 0000000..fa735a9
--- /dev/null
+++ b/doc/sphinx/source/releases/v2016.1.0.rst
@@ -0,0 +1,13 @@
+===========================
+Changes in version 2016.1.0
+===========================
+
+FFC 2016.1.0 was released on 2016-06-23.
+
+- Add function get_ufc_include to get path to ufc.h
+- Merge UFLACS into FFC
+- Generalize ufc interface to non-affine parameterized coordinates
+- Add ufc::coordinate_mapping class
+- Make ufc interface depend on C++11 features requiring gcc version >= 4.8
+- Add function ufc_signature() to the form compiler interface
+- Add function git_commit_hash()
diff --git a/doc/sphinx/source/releases/v2016.2.0.rst b/doc/sphinx/source/releases/v2016.2.0.rst
new file mode 100644
index 0000000..3de8a78
--- /dev/null
+++ b/doc/sphinx/source/releases/v2016.2.0.rst
@@ -0,0 +1,34 @@
+===========================
+Changes in version 2016.2.0
+===========================
+
+FFC 2016.2.0 was released on 2016-11-30.
+
+Summary of changes
+==================
+
+- Generalize ufc interface to non-affine parameterized coordinates
+- Add ``ufc::coordinate_mapping`` class
+- Make ufc interface depend on C++11 features requiring gcc version >= 4.8
+- Change the mapping ``pullback as metric`` to ``double covariant piola`` (this
+  preserves tangential-tangential trace).
+- Added Hellan-Herrmann-Johnson element as supported element
+- Add mapping ``double contravariant piola`` (this preserves normal-normal
+  trace).
+- Include comment with effective representation and integral metadata
+  to generated ``tabulate_tensor`` code
+
+
+Detailed changes
+================
+
+- Jit compiler now compiles elements separately from forms to avoid duplicate work
+- Add parameter max_signature_length to optionally shorten signatures in the jit cache
+- Move uflacs module into ffc.uflacs
+- Remove installation of pkg-config and CMake files (UFC path and
+  compiler flags are available from ffc module)
+- Add dependency on dijitso and remove dependency on instant
+- Add experimental Bitbucket pipelines
+- Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This
+  includes removal of instructions how to merge two repos, commit hash
+  c8389032268041fe94682790cb773663bdf27286.
diff --git a/ffc/__init__.py b/ffc/__init__.py
index 4489c06..667092a 100644
--- a/ffc/__init__.py
+++ b/ffc/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 FEniCS Form Compiler (FFC)
 --------------------------
@@ -13,7 +14,7 @@ The interface consists of the following functions:
   ufc_signature      - Signature of UFC interface (SHA-1 hash of ufc.h)
 """
 
-__version__ = "2016.1.0"
+__version__ = "2016.2.0"
 from ffc.git_commit_hash import git_commit_hash
 
 # Import compiler functions
@@ -22,11 +23,8 @@ from ffc.compiler import compile_form, compile_element
 # Import JIT compiler
 from ffc.jitcompiler import jit
 
-# Import UFC signature
-from ffc.ufc_signature import ufc_signature
-
-# Import UFC include dir function
-from ffc.ufc_include import get_ufc_include
+# Import UFC config functions
+from ffc.backends.ufc import get_include_path, get_ufc_cxx_flags, get_ufc_signature, ufc_signature
 
 # Import default parameters
 from .parameters import default_parameters, default_jit_parameters
@@ -42,8 +40,7 @@ try:
 
     # Import list of supported elements from FIAT
     from FIAT import supported_elements
-    supported_elements = list(supported_elements.keys())
-    supported_elements.sort()
+    supported_elements = sorted(supported_elements.keys())
 
     # Append elements that we can plot
     from .plot import element_colors
@@ -59,3 +56,7 @@ except:
 
     supported_elements = []
     supported_elements_for_plotting = []
+
+# Import main function, entry point to script
+from ffc.__main__ import main
+
diff --git a/ffc/main.py b/ffc/__main__.py
similarity index 61%
rename from ffc/main.py
rename to ffc/__main__.py
index 231fcbc..1533e5d 100644
--- a/ffc/main.py
+++ b/ffc/__main__.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
 
 # This script is the command-line interface to FFC. It parses
 # command-line arguments and wraps the given form file code in a
@@ -45,11 +46,12 @@ from ufl.algorithms import load_ufl_file
 import ufl
 
 # FFC modules.
-from ffc.log import set_level
-from ffc.log import DEBUG, INFO, ERROR
+from ffc.log import push_level, pop_level
+from ffc.log import DEBUG, INFO, WARNING, ERROR
 from ffc.parameters import default_parameters
-from ffc import __version__ as FFC_VERSION, ufc_signature
+from ffc import __version__ as FFC_VERSION, get_ufc_signature
 from ffc.backends.ufc import __version__ as UFC_VERSION
+from ffc.backends.ufc import get_include_path
 from ffc.compiler import compile_form, compile_element
 from ffc.formatting import write_code
 from ffc.errorcontrol import compile_with_error_control
@@ -66,7 +68,7 @@ def info_version():
 This is FFC, the FEniCS Form Compiler, version {0}.
 UFC backend version {1}, signature {2}.
 For further information, visit https://bitbucket.org/fenics-project/ffc/.
-""".format(FFC_VERSION, UFC_VERSION, ufc_signature()))
+""".format(FFC_VERSION, UFC_VERSION, get_ufc_signature()))
 
 
 def info_usage():
@@ -79,21 +81,35 @@ the FFC man page which may invoked by 'man ffc' (if installed).
 """)
 
 
-def main(argv):
-    "Main function."
-
-    # Append current directory to path, such that the *_debug module created by
-    # ufl_load_file can be found when FFC  compiles a form which is not in the
-    # PYHTONPATH
-    sys.path.append(getcwd())
+def compile_ufl_data(ufd, prefix, parameters):
+    if parameters["error_control"]:
+        code_h, code_c = compile_with_error_control(ufd.forms,
+                                                    ufd.object_names,
+                                                    ufd.reserved_objects,
+                                                    prefix,
+                                                    parameters)
+    elif len(ufd.forms) > 0:
+        code_h, code_c = compile_form(ufd.forms, ufd.object_names,
+                                      prefix=prefix,
+                                      parameters=parameters)
+    else:
+        code_h, code_c = compile_element(ufd.elements, prefix=prefix,
+                                         parameters=parameters)
+    return code_h, code_c
+
+
+def main(args=None):
+    """This is the commandline tool for the python module ffc."""
+    if args is None:
+        args = sys.argv[1:]
 
     # Get command-line arguments
     try:
-        opts, args = getopt.getopt(argv, "hVSvsl:r:f:Oo:q:ep",
-            ["help", "version", "signature", "verbose", "silent",
-             "language=", "representation=", "optimize",
-             "output-directory=", "quadrature-rule=", "error-control",
-             "profile"])
+        opts, args = getopt.getopt(args, "hIVSdvsl:r:f:Oo:q:ep",
+                                   ["help", "includes", "version", "signature", "debug", "verbose", "silent",
+                                    "language=", "representation=", "optimize",
+                                    "output-directory=", "quadrature-rule=", "error-control",
+                                    "profile"])
     except getopt.GetoptError:
         info_usage()
         print_error("Illegal command-line arguments.")
@@ -104,6 +120,11 @@ def main(argv):
         info_usage()
         return 0
 
+    # Check for --includes
+    if ("-I", "") in opts or ("--includes", "") in opts:
+        print(get_include_path())
+        return 0
+
     # Check for --version
     if ("-V", "") in opts or ("--version", "") in opts:
         info_version()
@@ -111,7 +132,7 @@ def main(argv):
 
     # Check for --signature
     if ("-S", "") in opts or ("--signature", "") in opts:
-        print(ufc_signature())
+        print(get_ufc_signature())
         return 0
 
     # Check that we get at least one file
@@ -119,9 +140,11 @@ def main(argv):
         print_error("Missing file.")
         return 1
 
-    # Get parameters and choose INFO as default for script
+    # Get parameters
     parameters = default_parameters()
-    parameters["log_level"] = INFO
+
+    # Choose WARNING as default for script
+    parameters["log_level"] = WARNING
 
     # Set default value (not part of in parameters[])
     enable_profile = False
@@ -129,6 +152,8 @@ def main(argv):
     # Parse command-line parameters
     for opt, arg in opts:
         if opt in ("-v", "--verbose"):
+            parameters["log_level"] = INFO
+        elif opt in ("-d", "--debug"):
             parameters["log_level"] = DEBUG
         elif opt in ("-s", "--silent"):
             parameters["log_level"] = ERROR
@@ -141,6 +166,14 @@ def main(argv):
         elif opt == "-f":
             if len(arg.split("=")) == 2:
                 (key, value) = arg.split("=")
+                if key not in parameters:
+                    info_usage()
+                    return 1
+                default = parameters[key]
+                if isinstance(default, int):
+                    value = int(value)
+                elif isinstance(default, float):
+                    value = float(value)
                 parameters[key] = value
             elif len(arg.split("==")) == 1:
                 key = arg.split("=")[0]
@@ -158,64 +191,60 @@ def main(argv):
             enable_profile = True
 
     # Set log_level
-    set_level(parameters["log_level"])
+    push_level(parameters["log_level"])
 
     # Set UFL precision
     ufl.constantvalue.precision = int(parameters["precision"])
 
-    # Print a nice message
-    info_version()
+    # Print a versioning message if verbose output was requested
+    if parameters["log_level"] <= INFO:
+        info_version()
 
     # Call parser and compiler for each file
+    resultcode = 0
     for filename in args:
 
         # Get filename prefix and suffix
         prefix, suffix = os.path.splitext(os.path.basename(filename))
         suffix = suffix.replace(os.path.extsep, "")
 
-        # Remove weird characters (file system allows more than the C preprocessor)
-        prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0]
-        prefix = re.subn("!+", "_", prefix)[0]
-
         # Check file suffix
         if suffix != "ufl":
             print_error("Expecting a UFL form file (.ufl).")
-            return 1
+            resultcode = 1
+            break
+
+        # Remove weird characters (file system allows more than the C
+        # preprocessor)
+        prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0]
+        prefix = re.subn("!+", "_", prefix)[0]
 
         # Turn on profiling
-        if enable_profile:  #parameters.get("profile"):
+        if enable_profile:
             pr = cProfile.Profile()
             pr.enable()
 
         # Load UFL file
         ufd = load_ufl_file(filename)
 
-        # Compile
-        try:
-            if parameters["error_control"]:
-                code_h, code_c = \
-                    compile_with_error_control(ufd.forms, ufd.object_names,
-                                         ufd.reserved_objects, prefix,
-                                         parameters)
-            elif len(ufd.forms) > 0:
-                code_h, code_c = \
-                    compile_form(ufd.forms, ufd.object_names, prefix=prefix, parameters=parameters)
-            else:
-                code_h, code_c = \
-                    compile_element(ufd.elements, prefix=prefix, parameters=parameters)
+        # Previously wrapped in try-except, disabled to actually get information we need
+        #try:
 
-            # Write to file
-            write_code(code_h, code_c, prefix, parameters)
+        # Generate code
+        code_h, code_c = compile_ufl_data(ufd, prefix, parameters)
 
-        except Exception as exception:
-            # Catch exceptions only when not in debug mode
-            if parameters["log_level"] <= DEBUG:
-                raise
-            else:
-                print("")
-                print_error(str(exception))
-                print_error("To get more information about this error, rerun FFC with --verbose.")
-                return 1
+        # Write to file
+        write_code(code_h, code_c, prefix, parameters)
+
+        #except Exception as exception:
+        #    # Catch exceptions only when not in debug mode
+        #    if parameters["log_level"] <= DEBUG:
+        #        raise
+        #    else:
+        #        print("")
+        #        print_error(str(exception))
+        #        print_error("To get more information about this error, rerun FFC with --debug.")
+        #        return 1
 
         # Turn off profiling and write status to file
         if enable_profile:
@@ -223,6 +252,12 @@ def main(argv):
             pfn = "ffc_{0}.profile".format(prefix)
             pr.dump_stats(pfn)
             print("Wrote profiling info to file {0}".format(pfn))
-            #pr.print_stats()
 
-    return 0
+    # Reset logging level
+    pop_level()
+
+    return resultcode
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/ffc/analysis.py b/ffc/analysis.py
index 30244ca..725897f 100644
--- a/ffc/analysis.py
+++ b/ffc/analysis.py
@@ -1,13 +1,6 @@
-"""
-Compiler stage 1: Analysis
---------------------------
-
-This module implements the analysis/preprocessing of variational
-forms, including automatic selection of elements, degrees and
-form representation type.
-"""
+# -*- coding: utf-8 -*-
 
-# Copyright (C) 2007-201r Anders Logg and Kristian B. Oelgaard
+# Copyright (C) 2007-2016 Anders Logg and Kristian B. Oelgaard
 #
 # This file is part of FFC.
 #
@@ -25,7 +18,16 @@ form representation type.
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Marie E. Rognes, 2010
-# Modified by Martin Alnaes, 2013-2014
+# Modified by Martin Sandve Alnæs, 2013-2014
+
+"""
+Compiler stage 1: Analysis
+--------------------------
+
+This module implements the analysis/preprocessing of variational
+forms, including automatic selection of elements, degrees and
+form representation type.
+"""
 
 import os
 import copy
@@ -39,11 +41,12 @@ from ufl.algorithms import compute_form_data
 from ufl.algorithms.analysis import extract_sub_elements
 
 # FFC modules
-from ffc.log import log, info, begin, end, warning, debug, error, ffc_assert, warning_blue
+from ffc.log import log, info, begin, end, warning, debug, error, warning_blue
 from ffc.quadratureelement import default_quadrature_degree
 from ffc.utils import all_equal
 from ffc.tensor import estimate_cost
 
+
 def analyze_forms(forms, parameters):
     """
     Analyze form(s), returning
@@ -52,56 +55,77 @@ def analyze_forms(forms, parameters):
        unique_elements - a tuple of unique elements across all forms
        element_numbers - a mapping to unique numbers for all elements
     """
+    return analyze_ufl_objects(forms, "form", parameters)
 
-    begin("Compiler stage 1: Analyzing form(s)")
 
-    # Analyze forms
-    form_datas = tuple(_analyze_form(form,
-                                     parameters) for form in forms)
+def analyze_elements(elements, parameters):
+    return analyze_ufl_objects(elements, "element", parameters)
+
+
+def analyze_coordinate_mappings(coordinate_elements, parameters):
+    return analyze_ufl_objects(coordinate_elements, "coordinate_mapping", parameters)
+
+
+def analyze_ufl_objects(ufl_objects, kind, parameters):
+    """
+    Analyze ufl object(s), either forms, elements, or coordinate mappings, returning:
 
-    # Extract unique elements accross all forms
+       form_datas      - a tuple of form_data objects
+       unique_elements - a tuple of unique elements across all forms
+       element_numbers - a mapping to unique numbers for all elements
+
+    """
+    begin("Compiler stage 1: Analyzing %s(s)" % (kind,))
+
+    form_datas = ()
     unique_elements = set()
-    for form_data in form_datas:
-        unique_elements.update(form_data.unique_sub_elements)
+    unique_coordinate_elements = set()
 
-    # Sort elements
-    unique_elements = sort_elements(unique_elements)
+    if kind == "form":
+        forms = ufl_objects
 
-    # Compute element numbers
-    element_numbers = _compute_element_numbers(unique_elements)
+        # Analyze forms
+        form_datas = tuple(_analyze_form(form, parameters)
+                           for form in forms)
 
-    # Extract coordinate elements
-    unique_coordinate_elements = sorted(set(chain(*[form_data.coordinate_elements for form_data in form_datas])))
+        # Extract unique elements accross all forms
+        for form_data in form_datas:
+            unique_elements.update(form_data.unique_sub_elements)
 
-    end()
+        # Extract coordinate elements across all forms
+        for form_data in form_datas:
+            unique_coordinate_elements.update(form_data.coordinate_elements)
 
-    return form_datas, unique_elements, element_numbers, unique_coordinate_elements
+    elif kind == "element":
+        elements = ufl_objects
 
+        # Extract unique (sub)elements
+        unique_elements.update(extract_sub_elements(elements))
 
-def analyze_elements(elements, parameters):
+    elif kind == "coordinate_mapping":
+        meshes = ufl_objects
 
-    begin("Compiler stage 1: Analyzing elements(s)")
+        # Extract unique (sub)elements
+        unique_coordinate_elements = [mesh.ufl_coordinate_element() for mesh in meshes]
 
-    # Extract unique (sub)elements
-    unique_elements = set(extract_sub_elements(elements))
+    # Make sure coordinate elements and their subelements are included
+    unique_elements.update(extract_sub_elements(unique_coordinate_elements))
 
     # Sort elements
     unique_elements = sort_elements(unique_elements)
 
-    # Build element map
-    element_numbers = _compute_element_numbers(unique_elements)
-
-    # Update scheme for QuadratureElements
+    # Check for schemes for QuadratureElements
     for element in unique_elements:
         if element.family() == "Quadrature":
             qs = element.quadrature_scheme()
             if qs is None:
                 error("Missing quad_scheme in quadrature element.")
 
+    # Compute element numbers
+    element_numbers = _compute_element_numbers(unique_elements)
+
     end()
 
-    form_datas = ()
-    unique_coordinate_elements = ()
     return form_datas, unique_elements, element_numbers, unique_coordinate_elements
 
 
@@ -117,13 +141,13 @@ def _analyze_form(form, parameters):
     "Analyze form, returning form data."
 
     # Check that form is not empty
-    ffc_assert(not form.empty(),
-               "Form (%s) seems to be zero: cannot compile it." % str(form))
+    if form.empty():
+        error("Form (%s) seems to be zero: cannot compile it." % str(form))
 
     # Hack to override representation with environment variable
     forced_r = os.environ.get("FFC_FORCE_REPRESENTATION")
     if forced_r:
-        warning("representation:    forced by $FFC_FORCE_REPRESENTATION to '%s'" % r)
+        warning("representation:    forced by $FFC_FORCE_REPRESENTATION to '%s'" % forced_r)
 
     # Compute form metadata
     if parameters["representation"] == "uflacs" or forced_r == "uflacs":
@@ -184,7 +208,7 @@ def _autoselect_quadrature_degree(integral_metadata, integral, form_data):
         else:
             error("Illegal negative quadrature degree %s " % (qd,))
     else:
-        error("Invalid quadrature_degree {}." % (qd,))
+        error("Invalid quadrature_degree %s." % (qd,))
 
     tdim = integral.ufl_domain().topological_dimension()
     _check_quadrature_degree(qd, tdim)
@@ -200,7 +224,6 @@ def _check_quadrature_degree(degree, top_dim):
         warning_blue("         Consider using the option 'quadrature_degree' to reduce the number of points")
 
 
-
 def _extract_common_quadrature_rule(integral_metadatas):
     # Check that quadrature rule is the same
     # (To support mixed rules would be some work since num_points is
@@ -347,7 +370,7 @@ def _attach_integral_metadata(form_data, parameters):
     _validate_quadrature_schemes_of_elements(quad_schemes, form_data.unique_sub_elements)
 
 
-def _validate_quadrature_schemes_of_elements(quad_schemes, elements): #form_data):
+def _validate_quadrature_schemes_of_elements(quad_schemes, elements):  # form_data):
     # Update scheme for QuadratureElements
     if quad_schemes and all_equal(quad_schemes):
         scheme = quad_schemes[0]
diff --git a/ffc/backends/dolfin/capsules.py b/ffc/backends/dolfin/capsules.py
index 48b1649..f85e00a 100644
--- a/ffc/backends/dolfin/capsules.py
+++ b/ffc/backends/dolfin/capsules.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2008-2015 Martin Sandve Alnes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2008-2016 Martin Sandve Alnes
 #
 # This file is part of DOLFIN.
 #
@@ -17,8 +18,11 @@
 #
 # Modified by Marie E. Rognes
 
+
 class UFCFormNames:
+
     "Encapsulation of the names related to a generated UFC form."
+
     def __init__(self, name, coefficient_names, ufc_form_classname,
                  ufc_finite_element_classnames, ufc_dofmap_classnames,
                  superclassname='Form'):
@@ -42,14 +46,14 @@ class UFCFormNames:
         assert len(coefficient_names) <= len(ufc_dofmap_classnames)
         assert len(ufc_finite_element_classnames) == len(ufc_dofmap_classnames)
 
-        self.num_coefficients              = len(coefficient_names)
-        self.rank                          = len(ufc_finite_element_classnames) - self.num_coefficients
-        self.name                          = name
-        self.coefficient_names             = coefficient_names
-        self.ufc_form_classname            = ufc_form_classname
+        self.num_coefficients = len(coefficient_names)
+        self.rank = len(ufc_finite_element_classnames) - self.num_coefficients
+        self.name = name
+        self.coefficient_names = coefficient_names
+        self.ufc_form_classname = ufc_form_classname
         self.ufc_finite_element_classnames = ufc_finite_element_classnames
-        self.ufc_dofmap_classnames        = ufc_dofmap_classnames
-        self.superclassname                = superclassname
+        self.ufc_dofmap_classnames = ufc_dofmap_classnames
+        self.superclassname = superclassname
 
     def __str__(self):
         s = "UFCFormNames instance:\n"
@@ -62,8 +66,11 @@ class UFCFormNames:
         s += "ufc_dofmap_classnames:    %s\n" % str(self.ufc_dofmap_classnames)
         return s
 
+
 class UFCElementNames:
+
     "Encapsulation of the names related to a generated UFC element."
+
     def __init__(self, name,
                  ufc_finite_element_classnames,
                  ufc_dofmap_classnames):
@@ -72,9 +79,9 @@ class UFCElementNames:
         """
         assert len(ufc_finite_element_classnames) == len(ufc_dofmap_classnames)
 
-        self.name                          = name
+        self.name = name
         self.ufc_finite_element_classnames = ufc_finite_element_classnames
-        self.ufc_dofmap_classnames        = ufc_dofmap_classnames
+        self.ufc_dofmap_classnames = ufc_dofmap_classnames
 
     def __str__(self):
         s = "UFCFiniteElementNames instance:\n"
diff --git a/ffc/backends/dolfin/form.py b/ffc/backends/dolfin/form.py
index 45ccfa7..346b7c3 100644
--- a/ffc/backends/dolfin/form.py
+++ b/ffc/backends/dolfin/form.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2011 Marie E. Rognes
 #
 # This file is part of DOLFIN.
@@ -48,7 +49,7 @@ def generate_form(form, classname, error_control):
 
     # Generate code for Form_x_MultiMeshFunctionSpace_y subclasses
     wrap = apply_multimesh_function_space_template
-    if not error_control: # FIXME: Issue #91
+    if not error_control:  # FIXME: Issue #91
         blocks += [wrap("%s_MultiMeshFunctionSpace_%d" % (classname, i),
                         "%s_FunctionSpace_%d" % (classname, i),
                         form.ufc_finite_element_classnames[i],
@@ -111,7 +112,7 @@ def generate_coefficient_map_data(form):
 dolfin::dolfin_error("generated code for class %s",
                          "access coefficient data",
                          "There are no coefficients");''' % form.superclassname
-        num  = "\n    %s\n    return 0;" % message
+        num = "\n    %s\n    return 0;" % message
         name = '\n    %s\n    return "unnamed";' % message
         return (num, name)
 
@@ -129,7 +130,7 @@ dolfin::dolfin_error("generated code for class %s",
 dolfin::dolfin_error("generated code for class %s",
                          "access coefficient data",
                          "Invalid coefficient");''' % form.superclassname
-    num +=  "\n    %s\n    return 0;" % message
+    num += "\n    %s\n    return 0;" % message
     name += '    }\n\n    %s\n    return "unnamed";' % message
 
     return (num, name)
@@ -171,7 +172,7 @@ def generate_multimesh_form_constructors(form, classname):
 
     # Treat functionals a little special
     if form.rank == 0:
-        spaces = ("shared_ptr_mesh",)
+        spaces = ("shared_ptr_multimesh",)
 
     # Generate permutations of constructors
     constructors = []
@@ -205,15 +206,15 @@ def generate_constructor(form, classname, space_tag, coefficient_tag=None):
     if coefficient_tag is not None:
         (argument, assign) = snippets[coefficient_tag]
         arguments += [argument % name for name in form.coefficient_names]
-        if form.rank > 0: # FIXME: To match old generated code only
+        if form.rank > 0:  # FIXME: To match old generated code only
             assignments += [""]
-        assignments += [assign %(name, name) for name in form.coefficient_names]
+        assignments += [assign % (name, name) for name in form.coefficient_names]
 
     # Add assignment of _ufc_form variable
     line = "\n    _ufc_form = std::make_shared<const %s>();"
     # FIXME: To match old generated code only
     if form.rank == 0 and coefficient_tag is None:
-        line =  "    _ufc_form = std::make_shared<const %s>();"
+        line = "    _ufc_form = std::make_shared<const %s>();"
     assignments += [line % form.ufc_form_classname]
 
     # Construct list for initialization of Coefficient references
@@ -252,16 +253,16 @@ def generate_multimesh_constructor(form, classname, space_tag,
         spaces = [name % i for i in reversed(range(form.rank))]
     else:
         arguments = [argument]
-        spaces = ""
+        spaces = "mesh"
 
     # Add coefficients to argument/assignment lists if specified
     assignments = []
     if coefficient_tag is not None:
         (argument, assign) = snippets[coefficient_tag]
         arguments += [argument % name for name in form.coefficient_names]
-        if form.rank > 0: # FIXME: To match old generated code only
+        if form.rank > 0:  # FIXME: To match old generated code only
             assignments += [""]
-        assignments += [assign %(name, name) for name in form.coefficient_names]
+        assignments += [assign % (name, name) for name in form.coefficient_names]
 
     # Construct list for initialization of Coefficient references
     initializers = ["%s(*this, %d)" % (name, number)
@@ -270,7 +271,10 @@ def generate_multimesh_constructor(form, classname, space_tag,
     # Join lists together
     arguments = ", ".join(arguments)
     initializers = ", " + ", ".join(initializers) if initializers else ""
-    spaces = ", ".join(spaces)
+
+    # Ignore if functional
+    if form.rank != 0:
+        spaces = ", ".join(spaces)
 
     # Set access method
     if space_tag == "multimesh_shared_ptr_space":
@@ -284,15 +288,24 @@ def generate_multimesh_constructor(form, classname, space_tag,
     body += "    std::size_t num_parts = V0%snum_parts(); // assume all equal and pick first\n" % access
     body += "    for (std::size_t part = 0; part < num_parts; part++)\n"
     body += "    {\n"
-    body += "      std::shared_ptr<const dolfin::Form> a(new %s(%s));\n" % (classname, ", ".join("V%d%spart(part)" % (i, access) for i in reversed(range(form.rank))))
-    body += "      add(a);\n"
-    body += "    }\n\n"
+    body += "      std::shared_ptr<dolfin::Form> a(new %s(%s));\n" % (classname, ", ".join("V%d%spart(part)" % (i, access) for i in reversed(range(form.rank))))
+    body += "    add(a);\n\n"
+    body += "    }\n"
     body += "    // Build multimesh form\n"
     body += "    build();\n"
 
     # FIXME: Issue #91
     if form.rank == 0:
         body = ""
+        body += "    // Creating a form for each part of the mesh\n"
+        body += "    for (std::size_t i=0; i< mesh->num_parts(); i++)\n"
+        body += "    {\n"
+        body += "      std::shared_ptr<dolfin::Form> a(new %s(mesh->part(i))); " % (classname)
+        body += "      add(a);"
+        body += "    }\n"
+        body += "    // Build multimesh form\n"
+        body += "    build();\n"
+
 
     # Create body for assigning coefficients
     body += "\n    /// Assign coefficients"
@@ -399,7 +412,7 @@ def apply_form_template(classname, constructors, number, name, members,
 def apply_multimesh_form_template(classname, constructors, number, name,
                                   members, superclass):
     members = members.replace("CoefficientAssigner",
-                              "MultiMeshCoefficientAssigner") # hack
+                              "MultiMeshCoefficientAssigner")  # hack
     args = {"classname": classname,
             "superclass": superclass,
             "constructors": constructors,
diff --git a/ffc/backends/dolfin/functionspace.py b/ffc/backends/dolfin/functionspace.py
index 0e31c2d..d390774 100644
--- a/ffc/backends/dolfin/functionspace.py
+++ b/ffc/backends/dolfin/functionspace.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2011 Marie E. Rognes
 #
 # This file is part of DOLFIN.
@@ -28,7 +29,7 @@ __all__ = ["apply_function_space_template",
            "extract_coefficient_spaces",
            "generate_typedefs"]
 
-#-------------------------------------------------------------------------------
+
 def extract_coefficient_spaces(forms):
     """Extract a list of tuples
 
@@ -52,10 +53,10 @@ def extract_coefficient_spaces(forms):
 
     # Return coefficient spaces sorted alphabetically by coefficient
     # name
-    names = list(spaces.keys())
-    names.sort()
+    names = sorted(spaces.keys())
     return [spaces[name] for name in names]
-#-------------------------------------------------------------------------------
+
+
 def generate_typedefs(form, classname, error_control):
     """Generate typedefs for test, trial and coefficient spaces
     relative to a function space."""
@@ -65,7 +66,7 @@ def generate_typedefs(form, classname, error_control):
     # Generate typedef data for test/trial spaces
     pairs += [("%s_FunctionSpace_%d" % (classname, i),
               snippets["functionspace"][i]) for i in range(form.rank)]
-    if not error_control: # FIXME: Issue #91
+    if not error_control:  # FIXME: Issue #91
         pairs += [("%s_MultiMeshFunctionSpace_%d" % (classname, i),
                    snippets["multimeshfunctionspace"][i]) for i in range(form.rank)]
 
@@ -77,7 +78,8 @@ def generate_typedefs(form, classname, error_control):
     # Combine data to typedef code
     code = "\n".join("  typedef %s %s;" % (to, fro) for (to, fro) in pairs)
     return code
-#-------------------------------------------------------------------------------
+
+
 function_space_template = """\
 class %(classname)s: public dolfin::FunctionSpace
 {
@@ -103,7 +105,8 @@ public:
 
 };
 """
-#-------------------------------------------------------------------------------
+
+
 multimesh_function_space_template = """\
 class %(classname)s: public dolfin::MultiMeshFunctionSpace
 {
@@ -125,17 +128,19 @@ public:
 
 };
 """
-#-------------------------------------------------------------------------------
+
 
 def apply_function_space_template(name, element_name, dofmap_name):
     args = {"classname": name,
             "ufc_finite_element_classname": element_name,
-            "ufc_dofmap_classname": dofmap_name }
+            "ufc_dofmap_classname": dofmap_name}
     return function_space_template % args
 
-def apply_multimesh_function_space_template(name, single_name, element_name, dofmap_name):
+
+def apply_multimesh_function_space_template(name, single_name, element_name,
+                                            dofmap_name):
     args = {"classname": name,
             "single_name": single_name,
             "ufc_finite_element_classname": element_name,
-            "ufc_dofmap_classname": dofmap_name }
+            "ufc_dofmap_classname": dofmap_name}
     return multimesh_function_space_template % args
diff --git a/ffc/backends/dolfin/goalfunctional.py b/ffc/backends/dolfin/goalfunctional.py
index 221f16c..95ef131 100644
--- a/ffc/backends/dolfin/goalfunctional.py
+++ b/ffc/backends/dolfin/goalfunctional.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010 Marie E. Rognes
 #
 # This file is part of DOLFIN.
@@ -19,7 +20,7 @@
 
 __all__ = ["generate_update_ec"]
 
-#-------------------------------------------------------------------------------
+
 attach_coefficient_template = """
     // Attach coefficients from %(from)s to %(to)s
     for (std::size_t i = 0; i < %(from)s.num_coefficients(); i++)
@@ -40,14 +41,16 @@ attach_coefficient_template = """
       %(to)s->set_coefficient(name, %(from)s.coefficient(i));
     }
     """
-#-------------------------------------------------------------------------------
+
+
 attach_domains_template = """
     // Attach subdomains from %(from)s to %(to)s
     %(to)s->dx = %(from)s.cell_domains();
     %(to)s->ds = %(from)s.exterior_facet_domains();
     %(to)s->dS = %(from)s.interior_facet_domains();
 """
-#-------------------------------------------------------------------------------
+
+
 update_ec_template = """
   /// Initialize all error control forms, attach coefficients and
   /// (re-)set error control
@@ -137,7 +140,8 @@ update_ec_template = """
                                        %(linear)s));
   }
 """
-#-------------------------------------------------------------------------------
+
+
 def _attach(tos, froms):
 
     if not isinstance(froms, tuple):
@@ -150,34 +154,35 @@ def _attach(tos, froms):
     domains = attach_domains_template % {"to": tos[-1], "from": froms[-1]}
     return coeffs + domains
 
-#-------------------------------------------------------------------------------
+
 def generate_maps(linear):
     """
     NB: This depends on the ordering of the forms
     """
-    maps = {"a_star":           "Form_%d" % 0,
-            "L_star":           "Form_%d" % 1,
-            "residual":         "Form_%d" % 2,
-            "a_R_T":            "Form_%d" % 3,
-            "L_R_T":            "Form_%d" % 4,
-            "a_R_dT":           "Form_%d" % 5,
-            "L_R_dT":           "Form_%d" % 6,
-            "eta_T":            "Form_%d" % 7,
-            "V_Ez_h":           "CoefficientSpace_%s" % "__improved_dual",
-            "V_R_T":            "Form_%d::TestSpace" % 4,
-            "V_b_T":            "CoefficientSpace_%s" % "__cell_bubble",
-            "V_R_dT":           "Form_%d::TestSpace" % 6,
-            "V_b_e":            "CoefficientSpace_%s" % "__cell_cone",
-            "V_eta_T":          "Form_%d::TestSpace" % 7,
-            "attach_a_star":    _attach("a_star", "a"),
-            "attach_L_star":    _attach("L_star", "(*this)"),
-            "attach_residual":  _attach(("residual",)*2, ("a", "L")),
-            "attach_L_R_T":     _attach(("L_R_T",)*2, ("a", "L")),
-            "attach_L_R_dT":    _attach(("L_R_dT",)*2, ("a", "L")),
-            "linear":           "true" if linear else "false"
+    maps = {"a_star": "Form_%d" % 0,
+            "L_star": "Form_%d" % 1,
+            "residual": "Form_%d" % 2,
+            "a_R_T": "Form_%d" % 3,
+            "L_R_T": "Form_%d" % 4,
+            "a_R_dT": "Form_%d" % 5,
+            "L_R_dT": "Form_%d" % 6,
+            "eta_T": "Form_%d" % 7,
+            "V_Ez_h": "CoefficientSpace_%s" % "__improved_dual",
+            "V_R_T": "Form_%d::TestSpace" % 4,
+            "V_b_T": "CoefficientSpace_%s" % "__cell_bubble",
+            "V_R_dT": "Form_%d::TestSpace" % 6,
+            "V_b_e": "CoefficientSpace_%s" % "__cell_cone",
+            "V_eta_T": "Form_%d::TestSpace" % 7,
+            "attach_a_star": _attach("a_star", "a"),
+            "attach_L_star": _attach("L_star", "(*this)"),
+            "attach_residual": _attach(("residual",) * 2, ("a", "L")),
+            "attach_L_R_T": _attach(("L_R_T",) * 2, ("a", "L")),
+            "attach_L_R_dT": _attach(("L_R_dT",) * 2, ("a", "L")),
+            "linear": "true" if linear else "false"
             }
     return maps
-#-------------------------------------------------------------------------------
+
+
 def generate_update_ec(form):
 
     linear = "__discrete_primal_solution" in form.coefficient_names
diff --git a/ffc/backends/dolfin/includes.py b/ffc/backends/dolfin/includes.py
index f4a9a1e..d2c73e4 100644
--- a/ffc/backends/dolfin/includes.py
+++ b/ffc/backends/dolfin/includes.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Based on original implementation by Martin Alnes and Anders Logg
 #
 # Modified by Anders Logg 2015
@@ -28,37 +29,40 @@ dolfin_includes = """\
 #include <dolfin/adaptivity/ErrorControl.h>
 #include <dolfin/adaptivity/GoalFunctional.h>
 #include <dolfin/la/GenericVector.h>"""
-#-------------------------------------------------------------------------------
+
+
 snippets = {"shared_ptr_space":
-                ("std::shared_ptr<const dolfin::FunctionSpace> %s",
-                 "    _function_spaces[%d] = %s;"),
+            ("std::shared_ptr<const dolfin::FunctionSpace> %s",
+             "    _function_spaces[%d] = %s;"),
             "referenced_space":
-                ("const dolfin::FunctionSpace& %s",
-                 "    _function_spaces[%d] = reference_to_no_delete_pointer(%s);"),
+            ("const dolfin::FunctionSpace& %s",
+             "    _function_spaces[%d] = reference_to_no_delete_pointer(%s);"),
             "multimesh_shared_ptr_space":
-                ("std::shared_ptr<const dolfin::MultiMeshFunctionSpace> %s",
-                 None),
+            ("std::shared_ptr<const dolfin::MultiMeshFunctionSpace> %s",
+             None),
             "multimesh_referenced_space":
-                ("const dolfin::MultiMeshFunctionSpace& %s",
-                 None),
+            ("const dolfin::MultiMeshFunctionSpace& %s",
+             None),
             "shared_ptr_mesh":
-                ("std::shared_ptr<const dolfin::Mesh> mesh",
-                 "    _mesh = mesh;"),
+            ("std::shared_ptr<const dolfin::Mesh> mesh",
+             "    _mesh = mesh;"),
+            "shared_ptr_multimesh":
+                ("std::shared_ptr<const dolfin::MultiMesh> mesh",
+                 "    _multimesh = mesh;"),
             "referenced_mesh":
-                ("const dolfin::Mesh& mesh",
-                 "    _mesh = reference_to_no_delete_pointer(mesh);"),
+            ("const dolfin::Mesh& mesh",
+             "    _mesh = reference_to_no_delete_pointer(mesh);"),
             "shared_ptr_coefficient":
-                ("std::shared_ptr<const dolfin::GenericFunction> %s",
-                 "    this->%s = %s;"),
+            ("std::shared_ptr<const dolfin::GenericFunction> %s",
+             "    this->%s = %s;"),
             "shared_ptr_ref_coefficient":
-                ("std::shared_ptr<const dolfin::GenericFunction> %s",
-                 "    this->%s = *%s;"),
+            ("std::shared_ptr<const dolfin::GenericFunction> %s",
+             "    this->%s = *%s;"),
             "referenced_coefficient":
-                ("const dolfin::GenericFunction& %s",
-                 "    this->%s = %s;"),
+            ("const dolfin::GenericFunction& %s",
+             "    this->%s = %s;"),
             "functionspace":
-                ("TestSpace", "TrialSpace"),
+            ("TestSpace", "TrialSpace"),
             "multimeshfunctionspace":
-                ("MultiMeshTestSpace", "MultiMeshTrialSpace")
+            ("MultiMeshTestSpace", "MultiMeshTrialSpace")
             }
-#-------------------------------------------------------------------------------
diff --git a/ffc/backends/dolfin/wrappers.py b/ffc/backends/dolfin/wrappers.py
index 2ee26f6..cff6919 100644
--- a/ffc/backends/dolfin/wrappers.py
+++ b/ffc/backends/dolfin/wrappers.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2011 Marie E. Rognes
 #
 # This file is part of DOLFIN.
@@ -32,7 +33,7 @@ __all__ = ["generate_dolfin_code"]
 # the same name in multiple forms, it is indeed the same coefficient:
 parameters = {"use_common_coefficient_names": True}
 
-#-------------------------------------------------------------------------------
+
 def generate_dolfin_code(prefix, header, forms,
                          common_function_space=False, add_guards=False,
                          error_control=False):
@@ -70,7 +71,7 @@ def generate_dolfin_code(prefix, header, forms,
     # Return code
     return "\n".join(code)
 
-#-------------------------------------------------------------------------------
+
 def generate_dolfin_namespace(prefix, forms, common_function_space=False,
                               error_control=False):
 
@@ -98,7 +99,7 @@ def generate_dolfin_namespace(prefix, forms, common_function_space=False,
     # Return code
     return code
 
-#-------------------------------------------------------------------------------
+
 def generate_single_function_space(prefix, space):
     code = apply_function_space_template("FunctionSpace",
                                          space.ufc_finite_element_classnames[0],
@@ -106,7 +107,7 @@ def generate_single_function_space(prefix, space):
     code = "\nnamespace %s\n{\n\n%s\n}" % (prefix, code)
     return code
 
-#-------------------------------------------------------------------------------
+
 def generate_namespace_typedefs(forms, common_function_space, error_control):
 
     # Generate typedefs as (fro, to) pairs of strings
@@ -115,18 +116,19 @@ def generate_namespace_typedefs(forms, common_function_space, error_control):
     # Add typedef for Functional/LinearForm/BilinearForm if only one
     # is present of each
     aliases = ["Functional", "LinearForm", "BilinearForm"]
-    extra_aliases = {"LinearForm": "ResidualForm", "BilinearForm": "JacobianForm"}
+    extra_aliases = {"LinearForm": "ResidualForm",
+                     "BilinearForm": "JacobianForm"}
     for rank in sorted(range(len(aliases)), reverse=True):
         forms_of_rank = [form for form in forms if form.rank == rank]
         if len(forms_of_rank) == 1:
             pairs += [("Form_%s" % forms_of_rank[0].name, aliases[rank])]
-            if not error_control: # FIXME: Issue #91
+            if not error_control:  # FIXME: Issue #91
                 pairs += [("MultiMeshForm_%s" % forms_of_rank[0].name,
                            "MultiMesh" + aliases[rank])]
             if aliases[rank] in extra_aliases:
                 extra_alias = extra_aliases[aliases[rank]]
                 pairs += [("Form_%s" % forms_of_rank[0].name, extra_alias)]
-                if not error_control: # FIXME: Issue #91
+                if not error_control:  # FIXME: Issue #91
                     pairs += [("MultiMeshForm_%s" % forms_of_rank[0].name,
                                "MultiMesh" + extra_alias)]
 
@@ -135,8 +137,9 @@ def generate_namespace_typedefs(forms, common_function_space, error_control):
         for i, form in enumerate(forms):
             if form.rank:
                 pairs += [("Form_%s::TestSpace" % form.name, "FunctionSpace")]
-                if not error_control: # FIXME: Issue #91
-                    pairs += [("Form_%s::MultiMeshTestSpace" % form.name, "MultiMeshFunctionSpace")]
+                if not error_control:  # FIXME: Issue #91
+                    pairs += [("Form_%s::MultiMeshTestSpace" % form.name,
+                               "MultiMeshFunctionSpace")]
                 break
 
     # Add specialized typedefs when adding error control wrapppers
@@ -151,7 +154,7 @@ def generate_namespace_typedefs(forms, common_function_space, error_control):
         return ""
     return "// Class typedefs\n" + typedefs + "\n"
 
-#-------------------------------------------------------------------------------
+
 def error_control_pairs(forms):
     assert (len(forms) == 11), "Expecting 11 error control forms"
 
diff --git a/ffc/backends/ufc/__init__.py b/ffc/backends/ufc/__init__.py
index 4e936e2..9443e04 100644
--- a/ffc/backends/ufc/__init__.py
+++ b/ffc/backends/ufc/__init__.py
@@ -1,12 +1,8 @@
 # -*- coding: utf-8 -*-
-"""Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0dev
+"""Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 
-Three format strings are defined for each of the following UFC classes:
+Five format strings are defined for each of the following UFC classes:
 
-    function
-    finite_element
-    dofmap
-    domain
     cell_integral
     exterior_facet_integral
     interior_facet_integral
@@ -14,92 +10,65 @@ Three format strings are defined for each of the following UFC classes:
     cutcell_integral
     interface_integral
     overlap_integral
+    function
+
+    finite_element
+    dofmap
+    coordinate_mapping
     form
 
-The strings are named '<classname>_header', '<classname>_implementation',
-and '<classname>_combined'. The header and implementation contain the
-definition and declaration respectively, and are meant to be placed in
-.h and .cpp files, while the combined version is for an implementation
-within a single .h header.
+The strings are named:
+
+    '<classname>_header'
+    '<classname>_implementation'
+    '<classname>_combined'
+    '<classname>_jit_header'
+    '<classname>_jit_implementation'
+
+The header and implementation contain the definition and declaration
+of the class respectively, and are meant to be placed in .h and .cpp files,
+while the combined version is for an implementation within a single .h header.
+The _jit_ versions are used in the jit compiler and contains some additional
+factory functions exported as extern "C" to allow construction of compiled
+objects through ctypes without dealing with C++ ABI and name mangling issues.
 
-Each string has the following format variables: 'classname',
+Each string has at least the following format variables: 'classname',
 'members', 'constructor', 'destructor', plus one for each interface
 function with name equal to the function name.
 
 For more information about UFC and the FEniCS Project, visit
 
     http://www.fenicsproject.org
-    https://bitbucket.org/fenics-project/ufc
+    https://bitbucket.org/fenics-project/ffc
 
 """
 
-__author__  = "Martin Sandve Alnæs, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen"
-__date__    = "2016-06-23"
-__version__ = "2016.1.0"
+__author__ = "Martin Sandve Alnæs, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen"
+__date__ = "2016-11-30"
+__version__ = "2016.2.0"
 __license__ = "This code is released into the public domain"
 
-from .function import *
-from .finite_element import *
-from .dofmap import *
-from .coordinate_mapping import *
-from .integrals import *
-from .form import *
-from .factory import *
-from .build import build_ufc_module
-
-templates = {"function_header":                          function_header,
-             "function_implementation":                  function_implementation,
-             "function_combined":                        function_combined,
-             "finite_element_header":                    finite_element_header,
-             "finite_element_jit_header":                finite_element_jit_header,
-             "finite_element_implementation":            finite_element_implementation,
-             "finite_element_jit_implementation":        finite_element_jit_implementation,
-             "finite_element_combined":                  finite_element_combined,
-             "dofmap_header":                            dofmap_header,
-             "dofmap_jit_header":                        dofmap_jit_header,
-             "dofmap_implementation":                    dofmap_implementation,
-             "dofmap_jit_implementation":                dofmap_jit_implementation,
-             "dofmap_combined":                          dofmap_combined,
-             "coordinate_mapping_header":                coordinate_mapping_header,
-             "coordinate_mapping_implementation":        coordinate_mapping_implementation,
-             "coordinate_mapping_combined":              coordinate_mapping_combined,
-             "cell_integral_header":                     cell_integral_header,
-             "cell_integral_implementation":             cell_integral_implementation,
-             "cell_integral_combined":                   cell_integral_combined,
-             "exterior_facet_integral_header":           exterior_facet_integral_header,
-             "exterior_facet_integral_implementation":   exterior_facet_integral_implementation,
-             "exterior_facet_integral_combined":         exterior_facet_integral_combined,
-             "interior_facet_integral_header":           interior_facet_integral_header,
-             "interior_facet_integral_implementation":   interior_facet_integral_implementation,
-             "interior_facet_integral_combined":         interior_facet_integral_combined,
-             "vertex_integral_header":                   vertex_integral_header,
-             "vertex_integral_implementation":           vertex_integral_implementation,
-             "vertex_integral_combined":                 vertex_integral_combined,
-             "custom_integral_header":                   custom_integral_header,
-             "custom_integral_implementation":           custom_integral_implementation,
-             "custom_integral_combined":                 custom_integral_combined,
-             "cutcell_integral_header":                  cutcell_integral_header,
-             "cutcell_integral_implementation":          cutcell_integral_implementation,
-             "cutcell_integral_combined":                cutcell_integral_combined,
-             "interface_integral_header":                interface_integral_header,
-             "interface_integral_implementation":        interface_integral_implementation,
-             "interface_integral_combined":              interface_integral_combined,
-             "overlap_integral_header":                  overlap_integral_header,
-             "overlap_integral_implementation":          overlap_integral_implementation,
-             "overlap_integral_combined":                overlap_integral_combined,
-             "form_header":                              form_header,
-             "form_jit_header":                          form_jit_header,
-             "form_implementation":                      form_implementation,
-             "form_jit_implementation":                  form_jit_implementation,
-             "form_combined":                            form_combined,
-             "factory_header":                           factory_header,
-             "factory_implementation":                   factory_implementation,
-             }
-
-for integral_name in ["cell", "exterior_facet", "interior_facet", "vertex", "custom", "cutcell", "interface", "overlap"]:
-    templates[integral_name + "_integral_jit_header"] = ""
-    templates[integral_name + "_integral_jit_implementation"] = templates[integral_name + "_integral_combined"]
+import os
+from hashlib import sha1
+
+from ffc.backends.ufc.function import *
+from ffc.backends.ufc.finite_element import *
+from ffc.backends.ufc.dofmap import *
+from ffc.backends.ufc.coordinate_mapping import *
+from ffc.backends.ufc.integrals import *
+from ffc.backends.ufc.form import *
+
 
+# Get abspath on import, it can in some cases be
+# a relative path w.r.t. curdir on startup
+_include_path = os.path.dirname(os.path.abspath(__file__))
+
+def get_include_path():
+    "Return location of UFC header files"
+    return _include_path
+
+
+# Platform specific snippets for controlling visilibity of exported symbols in generated shared libraries
 visibility_snippet = """
 // Based on https://gcc.gnu.org/wiki/Visibility
 #if defined _WIN32 || defined __CYGWIN__
@@ -113,13 +82,122 @@ visibility_snippet = """
 #endif
 """
 
+
+# Generic factory function signature
 factory_decl = """
 extern "C" %(basename)s * create_%(publicname)s();
 """
 
+
+# Generic factory function implementation. Set basename to the base class,
+# and note that publicname and privatename does not need to match, allowing
+# multiple factory functions to return the same object.
 factory_impl = """
 extern "C" DLL_EXPORT %(basename)s * create_%(publicname)s()
 {
- return new %(privatename)s();
+  return new %(privatename)s();
 }
 """
+
+
+def all_ufc_classnames():
+    "Build list of all classnames."
+    integral_names = ["cell", "exterior_facet", "interior_facet", "vertex", "custom", "cutcell", "interface", "overlap"]
+    integral_classnames = [integral_name + "_integral" for integral_name in integral_names]
+    jitable_classnames = ["finite_element", "dofmap", "coordinate_mapping", "form"]
+    classnames = ["function"] + jitable_classnames + integral_classnames
+    return classnames
+
+
+def _build_templates():
+    "Build collection of all templates to store in the templates dict."
+    templates = {}
+    classnames = all_ufc_classnames()
+
+    for classname in classnames:
+        # Expect all classes to have header, implementation, and combined versions
+        header = globals()[classname + "_header"]
+        implementation = globals()[classname + "_implementation"]
+        combined = globals()[classname + "_combined"]
+
+        # Construct jit header with class and factory function signature
+        _fac_decl = factory_decl % {
+            "basename": "ufc::" + classname,
+            "publicname": "%(classname)s",
+            "privatename": "%(classname)s",
+            }
+        jit_header = header + _fac_decl
+
+        # Construct jit implementation template with class declaration,
+        # factory function implementation, and class definition
+        _fac_impl = factory_impl % {
+            "basename": "ufc::" + classname,
+            "publicname": "%(classname)s",
+            "privatename": "%(classname)s",
+            }
+        jit_implementation = implementation + _fac_impl
+
+        # Store all in templates dict
+        templates[classname + "_header"] = header
+        templates[classname + "_implementation"] = implementation
+        templates[classname + "_combined"] = combined
+        templates[classname + "_jit_header"] = jit_header
+        templates[classname + "_jit_implementation"] = jit_implementation
+
+    return templates
+
+
+def _compute_ufc_templates_signature(templates):
+    # Compute signature of jit templates
+    h = sha1()
+    for k in sorted(templates):
+        h.update(k.encode("utf-8"))
+        h.update(templates[k].encode("utf-8"))
+    return h.hexdigest()
+
+
+def _compute_ufc_signature():
+    # Compute signature of ufc header files
+    h = sha1()
+    for fn in ("ufc.h", "ufc_geometry.h"):
+        with open(os.path.join(get_include_path(), fn)) as f:
+            h.update(f.read().encode("utf-8"))
+    return h.hexdigest()
+
+
+# Build these on import
+templates = _build_templates()
+_ufc_signature = _compute_ufc_signature()
+_ufc_templates_signature = _compute_ufc_templates_signature(templates)
+
+
+def get_ufc_signature():
+    """Return SHA-1 hash of the contents of ufc.h and ufc_geometry.h.
+
+    In this implementation, the value is computed on import.
+    """
+    return _ufc_signature
+
+
+def get_ufc_templates_signature():
+    """Return SHA-1 hash of the ufc code templates.
+
+    In this implementation, the value is computed on import.
+    """
+    return _ufc_templates_signature
+
+
+def get_ufc_cxx_flags():
+    """Return C++ flags for compiling UFC C++11 code.
+
+    Return type is a list of strings.
+
+    Used internally in some tests.
+    """
+    return ["-std=c++11"]
+
+
+# ufc_signature() already introduced to FFC standard in 1.7.0dev,
+# called by the dolfin cmake build system to compare against
+# future imported ffc versions for compatibility.
+ufc_signature = get_ufc_signature
diff --git a/ffc/backends/ufc/build.py b/ffc/backends/ufc/build.py
deleted file mode 100644
index 58bcc34..0000000
--- a/ffc/backends/ufc/build.py
+++ /dev/null
@@ -1,102 +0,0 @@
-__author__ = "Johan Hake (hake at simula.no)"
-__date__ = "2009-03-06 -- 2014-05-20"
-__license__  = "This code is released into the public domain"
-
-__all__ = ['build_ufc_module']
-
-# Modified by Martin Alnes, 2009
-
-import instant
-import os, sys, re, glob
-
-from distutils import sysconfig
-
-def build_ufc_module(h_files, source_directory="", system_headers=None, \
-                     **kwargs):
-    """Build a python extension module from ufc compliant source code.
-
-    The compiled module will be imported and returned by the function.
-
-    @param h_files:
-       The name(s) of the header files that should be compiled and included in
-       the python extension module.
-    @param source_directory:
-       The directory where the source files reside.
-    @param system_headers:
-       Extra headers that will be #included in the generated wrapper file.
-
-    Any additional keyword arguments are passed on to instant.build_module.
-    """
-
-    # Check h_files argument
-    if isinstance(h_files, str):
-        h_files = [h_files]
-    assert isinstance(h_files, list) , "Provide a 'list' or a 'str' as 'h_files'."
-    assert all(isinstance(f, str) for f in h_files), \
-           "Elements of 'h_files' must be 'str'."
-
-    h_files2 = [os.path.join(source_directory, fn) for fn in h_files]
-    for f in h_files2:
-        if not os.path.isfile(f):
-            raise IOError("The file '%s' does not exist." % f)
-
-    # Check system_headers argument
-    system_headers = system_headers or []
-    assert isinstance(system_headers, list), "Provide a 'list' as 'system_headers'"
-    assert all(isinstance(header, str) for header in system_headers), \
-           "Elements of 'system_headers' must be 'str'."
-
-    system_headers.append("memory")
-
-    # Get the swig interface file declarations
-    declarations = extract_declarations(h_files2)
-    declarations += """
-
-// SWIG version
-%inline %{
-int get_swigversion() { return  SWIGVERSION; }
-%}
-
-%pythoncode %{
-tmp = hex(get_swigversion())
-swigversion = "%d.%d.%d"%(tuple(map(int, [tmp[-5], tmp[-3], tmp[-2:]])))
-del tmp, get_swigversion
-%}
-"""
-
-    # Call instant and return module
-    return instant.build_module(wrap_headers            = h_files,
-                                source_directory        = source_directory,
-                                additional_declarations = declarations,
-                                system_headers          = system_headers,
-                                cmake_packages          = ["UFC"],
-                                **kwargs)
-
-def extract_declarations(h_files):
-    "Extract information for shared_ptr"
-
-    # Swig declarations
-    declarations =r"""
-// Use std::shared_ptr
-#define SWIG_SHARED_PTR_NAMESPACE std
-%include <std_shared_ptr.i>
-
-// Swig shared_ptr macro declarations
-"""
-
-    for h_file in h_files:
-        # Read the code
-        with open(h_file) as file:
-            code = file.read()
-
-        # Extract the class names
-        derived_classes   = re.findall(r"class[ ]+([\w]+)[ ]*: public", code)
-        ufc_classes       = re.findall(r"public[ ]+(ufc::[\w]+).*", code)
-        ufc_proxy_classes = [s.replace("ufc::", "") for s in ufc_classes]
-
-        new_share_ptr_format = "%%shared_ptr(%s)"
-
-        # Write shared_ptr code for swig 2.0.0 or higher
-        declarations += "\n".join(new_share_ptr_format%c for c in derived_classes)
-        declarations += "\n"
-    return declarations
diff --git a/ffc/backends/ufc/coordinate_mapping.py b/ffc/backends/ufc/coordinate_mapping.py
index f2100e4..12296c0 100644
--- a/ffc/backends/ufc/coordinate_mapping.py
+++ b/ffc/backends/ufc/coordinate_mapping.py
@@ -1,7 +1,8 @@
+# -*- coding: utf-8 -*-
 # Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0dev.
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
 
 coordinate_mapping_header = """
 class %(classname)s: public ufc::coordinate_mapping
diff --git a/ffc/backends/ufc/dofmap.py b/ffc/backends/ufc/dofmap.py
index 69690f6..6215165 100644
--- a/ffc/backends/ufc/dofmap.py
+++ b/ffc/backends/ufc/dofmap.py
@@ -1,4 +1,5 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0.
+# -*- coding: utf-8 -*-
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 # This code is released into the public domain.
 #
 # The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
@@ -54,6 +55,11 @@ public:
 %(num_entity_dofs)s
   }
 
+  std::size_t num_entity_closure_dofs(std::size_t d) const final override
+  {
+%(num_entity_closure_dofs)s
+  }
+
   void tabulate_dofs(std::size_t * dofs,
                      const std::vector<std::size_t>& num_global_entities,
                      const std::vector<std::vector<std::size_t>>& entity_indices) const final override
@@ -73,6 +79,12 @@ public:
 %(tabulate_entity_dofs)s
   }
 
+  void tabulate_entity_closure_dofs(std::size_t * dofs,
+                                    std::size_t d, std::size_t i) const final override
+  {
+%(tabulate_entity_closure_dofs)s
+  }
+
 
   std::size_t num_sub_dofmaps() const final override
   {
@@ -116,6 +128,8 @@ public:
 
   std::size_t num_entity_dofs(std::size_t d) const final override;
 
+  std::size_t num_entity_closure_dofs(std::size_t d) const final override;
+
   void tabulate_dofs(std::size_t * dofs,
                      const std::vector<std::size_t>& num_global_entities,
                      const std::vector<std::vector<std::size_t>>& entity_indices) const final override;
@@ -126,6 +140,9 @@ public:
   void tabulate_entity_dofs(std::size_t * dofs,
                             std::size_t d, std::size_t i) const final override;
 
+  void tabulate_entity_closure_dofs(std::size_t * dofs,
+                            std::size_t d, std::size_t i) const final override;
+
   std::size_t num_sub_dofmaps() const final override;
 
   ufc::dofmap * create_sub_dofmap(std::size_t i) const final override;
@@ -182,6 +199,11 @@ std::size_t %(classname)s::num_entity_dofs(std::size_t d) const
 %(num_entity_dofs)s
 }
 
+std::size_t %(classname)s::num_entity_closure_dofs(std::size_t d) const
+{
+%(num_entity_closure_dofs)s
+}
+
 void %(classname)s::tabulate_dofs(std::size_t * dofs,
                                   const std::vector<std::size_t>& num_global_entities,
                                   const std::vector<std::vector<std::size_t>>& entity_indices) const
@@ -196,11 +218,17 @@ void %(classname)s::tabulate_facet_dofs(std::size_t * dofs,
 }
 
 void %(classname)s::tabulate_entity_dofs(std::size_t * dofs,
-                                  std::size_t d, std::size_t i) const
+                                         std::size_t d, std::size_t i) const
 {
 %(tabulate_entity_dofs)s
 }
 
+void %(classname)s::tabulate_entity_closure_dofs(std::size_t * dofs,
+                                             std::size_t d, std::size_t i) const
+{
+%(tabulate_entity_closure_dofs)s
+}
+
 std::size_t %(classname)s::num_sub_dofmaps() const
 {
 %(num_sub_dofmaps)s
@@ -216,14 +244,3 @@ ufc::dofmap * %(classname)s::create() const
 %(create)s
 }
 """
-
-dofmap_jit_header = """
-extern "C" ufc::dofmap * create_%(classname)s();
-"""
-
-dofmap_jit_implementation = dofmap_header + """
-extern "C" DLL_EXPORT ufc::dofmap * create_%(classname)s()
-{
-  return new %(classname)s();
-}
-""" + dofmap_implementation
diff --git a/ffc/backends/ufc/factory.py b/ffc/backends/ufc/factory.py
deleted file mode 100644
index 4070bec..0000000
--- a/ffc/backends/ufc/factory.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0dev.
-# This code is released into the public domain.
-#
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
-
-visibility_snippet = """\
-// Based on https://gcc.gnu.org/wiki/Visibility
-#if defined _WIN32 || defined __CYGWIN__
-    #ifdef __GNUC__
-        #define DLL_EXPORT __attribute__ ((dllexport))
-    #else
-        #define DLL_EXPORT __declspec(dllexport)
-    #endif
-#else
-    #define DLL_EXPORT __attribute__ ((visibility ("default")))
-#endif
-"""
-
-factory_header = """\
-class %(namespace)s%(classname)s;
-
-extern "C" %(namespace)s%(classname)s * create_%(classname)s();
-"""
-
-factory_implementation = """\
-extern "C" DLL_EXPORT %(namespace)s%(classname)s * create_%(classname)s()
-{
-  return new %(namespace)s%(classname)s();
-}
-"""
diff --git a/ffc/backends/ufc/finite_element.py b/ffc/backends/ufc/finite_element.py
index 1f87e31..f928e46 100644
--- a/ffc/backends/ufc/finite_element.py
+++ b/ffc/backends/ufc/finite_element.py
@@ -1,4 +1,5 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0.
+# -*- coding: utf-8 -*-
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 # This code is released into the public domain.
 #
 # The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
@@ -331,7 +332,7 @@ public:
 };
 """
 
-finite_element_implementation= """
+finite_element_implementation = """
 %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::finite_element()%(initializer_list)s
 {
 %(constructor)s
@@ -491,14 +492,3 @@ ufc::finite_element * %(classname)s::create() const
 %(create)s
 }
 """
-
-finite_element_jit_header = """
-extern "C" ufc::finite_element * create_%(classname)s();
-"""
-
-finite_element_jit_implementation = finite_element_header + """
-extern "C" DLL_EXPORT ufc::finite_element * create_%(classname)s()
-{
-  return new %(classname)s();
-}
-""" + finite_element_implementation
diff --git a/ffc/backends/ufc/form.py b/ffc/backends/ufc/form.py
index d2419e6..0485340 100644
--- a/ffc/backends/ufc/form.py
+++ b/ffc/backends/ufc/form.py
@@ -1,4 +1,5 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0.
+# -*- coding: utf-8 -*-
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 # This code is released into the public domain.
 #
 # The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
@@ -536,14 +537,3 @@ ufc::overlap_integral * %(classname)s::create_default_overlap_integral() const
 %(create_default_overlap_integral)s
 }
 """
-
-form_jit_header = """
-extern "C" ufc::form * create_%(classname)s();
-"""
-
-form_jit_implementation = form_header + """
-extern "C" DLL_EXPORT ufc::form * create_%(classname)s()
-{
-  return new %(classname)s();
-}
-""" + form_implementation
diff --git a/ffc/backends/ufc/function.py b/ffc/backends/ufc/function.py
index 61a6301..721cd2b 100644
--- a/ffc/backends/ufc/function.py
+++ b/ffc/backends/ufc/function.py
@@ -1,4 +1,5 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0.
+# -*- coding: utf-8 -*-
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 # This code is released into the public domain.
 #
 # The FEniCS Project (http://www.fenicsproject.org/) 2006-2016
diff --git a/ffc/backends/ufc/integrals.py b/ffc/backends/ufc/integrals.py
index 13edab8..3d064f8 100644
--- a/ffc/backends/ufc/integrals.py
+++ b/ffc/backends/ufc/integrals.py
@@ -1,4 +1,5 @@
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0.
+# -*- coding: utf-8 -*-
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
 # This code is released into the public domain.
 #
 # The FEniCS Project (http://www.fenicsproject.org/) 2006-2016
@@ -28,6 +29,7 @@ public:
                        const double * coordinate_dofs,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -74,6 +76,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     const double * coordinate_dofs,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -104,6 +107,7 @@ public:
                        std::size_t facet,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -152,6 +156,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     std::size_t facet,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -185,6 +190,7 @@ public:
                        int cell_orientation_0,
                        int cell_orientation_1) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -239,6 +245,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     int cell_orientation_0,
                                     int cell_orientation_1) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -269,6 +276,7 @@ public:
                        std::size_t vertex,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -317,6 +325,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     std::size_t vertex,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -355,6 +364,7 @@ public:
                        const double * facet_normals,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -416,6 +426,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     const double * facet_normals,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -448,6 +459,7 @@ public:
                        const double * quadrature_weights,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -500,6 +512,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     const double * quadrature_weights,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -533,6 +546,7 @@ public:
                        const double * facet_normals,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -587,6 +601,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     const double * facet_normals,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
@@ -619,6 +634,7 @@ public:
                        const double * quadrature_weights,
                        int cell_orientation) const final override
   {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
   }
 
@@ -671,6 +687,7 @@ void %(classname)s::tabulate_tensor(double * A,
                                     const double * quadrature_weights,
                                     int cell_orientation) const
 {
+%(tabulate_tensor_comment)s
 %(tabulate_tensor)s
 }
 """
diff --git a/ufc/ufc.h b/ffc/backends/ufc/ufc.h
similarity index 97%
rename from ufc/ufc.h
rename to ffc/backends/ufc/ufc.h
index 37dbc8c..ce70476 100644
--- a/ufc/ufc.h
+++ b/ffc/backends/ufc/ufc.h
@@ -1,4 +1,4 @@
-// This is UFC (Unified Form-assembly Code) v. 2016.1.0.
+// This is UFC (Unified Form-assembly Code) v. 2016.2.0
 // This code is released into the public domain.
 //
 // The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
@@ -7,7 +7,7 @@
 #define __UFC_H
 
 #define UFC_VERSION_MAJOR 2016
-#define UFC_VERSION_MINOR 1
+#define UFC_VERSION_MINOR 2
 #define UFC_VERSION_MAINTENANCE 0
 #define UFC_VERSION_RELEASE 1
 
@@ -231,10 +231,14 @@ namespace ufc
     /// Return the number of dofs on each cell facet
     virtual std::size_t num_facet_dofs() const = 0;
 
-   /// Return the number of dofs associated with each cell entity of
-    /// dimension d
+    /// Return the number of dofs associated with each cell
+    /// entity of dimension d
     virtual std::size_t num_entity_dofs(std::size_t d) const = 0;
 
+    /// Return the number of dofs associated with the closure
+    /// of each cell entity dimension d
+    virtual std::size_t num_entity_closure_dofs(std::size_t d) const = 0;
+
     /// Tabulate the local-to-global mapping of dofs on a cell
     virtual void tabulate_dofs(std::size_t * dofs,
                                const std::vector<std::size_t>& num_global_entities,
@@ -248,6 +252,10 @@ namespace ufc
     virtual void tabulate_entity_dofs(std::size_t * dofs,
                                       std::size_t d, std::size_t i) const = 0;
 
+    /// Tabulate the local-to-local mapping of dofs on the closure of entity (d, i)
+    virtual void tabulate_entity_closure_dofs(std::size_t * dofs,
+                                              std::size_t d, std::size_t i) const = 0;
+
     /// Return the number of sub dofmaps (for a mixed element)
     virtual std::size_t num_sub_dofmaps() const = 0;
 
diff --git a/ufc/ufc_geometry.h b/ffc/backends/ufc/ufc_geometry.h
similarity index 100%
rename from ufc/ufc_geometry.h
rename to ffc/backends/ufc/ufc_geometry.h
diff --git a/ffc/codegeneration.py b/ffc/codegeneration.py
index 95cb8ae..a7a3386 100644
--- a/ffc/codegeneration.py
+++ b/ffc/codegeneration.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 Compiler stage 4: Code generation
 ---------------------------------
@@ -6,7 +7,8 @@ This module implements the generation of C++ code for the body of each
 UFC function from an (optimized) intermediate representation (OIR).
 """
 
-# Copyright (C) 2009-2015 Anders Logg
+# Copyright (C) 2009-2016 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes,
+# Kristian B. Oelgaard, and others
 #
 # This file is part of FFC.
 #
@@ -22,15 +24,13 @@ UFC function from an (optimized) intermediate representation (OIR).
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Mehdi Nikbakht 2010
-# Modified by Martin Alnaes, 2013-2015
 
+from itertools import chain
 from ufl import product
 
 # FFC modules
-from ffc.log import info, begin, end, debug_code
-from ffc.cpp import format, indent, make_integral_classname
+from ffc.log import info, begin, end, debug_code, dstr
+from ffc.cpp import format, indent
 from ffc.cpp import set_exception_handling, set_float_formatting
 
 # FFC code generation modules
@@ -41,6 +41,7 @@ from ffc.evaluatedof import evaluate_dof_and_dofs, affine_weights
 from ffc.interpolatevertexvalues import interpolate_vertex_values
 from ffc.representation import pick_representation, ufc_integral_types
 
+
 # Errors issued for non-implemented functions
 def _not_implemented(function_name, return_null=False):
     body = format["exception"]("%s not yet implemented." % function_name)
@@ -54,6 +55,7 @@ def generate_code(ir, parameters):
 
     begin("Compiler stage 4: Generating code")
 
+    # FIXME: This has global side effects
     # Set code generation parameters
     set_float_formatting(int(parameters["precision"]))
     set_exception_handling(parameters["convert_exceptions_to_warnings"])
@@ -75,6 +77,8 @@ def generate_code(ir, parameters):
     info("Generating code for %d coordinate_mapping(s)" % len(ir_coordinate_mappings))
     code_coordinate_mappings = [_generate_coordinate_mapping_code(ir, parameters)
                                 for ir in ir_coordinate_mappings]
+    # FIXME: This disables output of generated coordinate_mapping class, until implemented properly
+    code_coordinate_mappings = []
 
     # Generate code for integrals
     info("Generating code for integrals")
@@ -107,7 +111,7 @@ def _generate_element_code(ir, parameters):
     (evaluate_dof_code, evaluate_dofs_code) \
         = evaluate_dof_and_dofs(ir["evaluate_dof"])
 
-    element_number = ir["id"]
+    #element_number = ir["id"]
 
     # Generate code
     code = {}
@@ -150,6 +154,7 @@ def _generate_element_code(ir, parameters):
     code["num_sub_elements"] = ret(ir["num_sub_elements"])
     code["create_sub_element"] = _create_sub_element(ir)
     code["create"] = ret(create(code["classname"]))
+    code["additional_includes_set"] = _additional_includes_finite_element(ir)
 
     # Postprocess code
     _postprocess_code(code, parameters)
@@ -174,7 +179,7 @@ def _generate_dofmap_code(ir, parameters):
     f_d = format["argument dimension"]
     create = format["create foo"]
 
-    element_number = ir["id"]
+    #element_number = ir["id"]
 
     # Generate code
     code = {}
@@ -194,14 +199,20 @@ def _generate_dofmap_code(ir, parameters):
     code["num_entity_dofs"] \
         = switch(f_d, [ret(num) for num in ir["num_entity_dofs"]],
                  ret(f_int(0)))
+    code["num_entity_closure_dofs"] \
+        = switch(f_d, [ret(num) for num in ir["num_entity_closure_dofs"]],
+                 ret(f_int(0)))
     code["tabulate_dofs"] = _tabulate_dofs(ir["tabulate_dofs"])
     code["tabulate_facet_dofs"] \
         = _tabulate_facet_dofs(ir["tabulate_facet_dofs"])
     code["tabulate_entity_dofs"] \
         = _tabulate_entity_dofs(ir["tabulate_entity_dofs"])
+    code["tabulate_entity_closure_dofs"] \
+        = _tabulate_entity_closure_dofs(ir["tabulate_entity_closure_dofs"])
     code["num_sub_dofmaps"] = ret(ir["num_sub_dofmaps"])
     code["create_sub_dofmap"] = _create_sub_dofmap(ir)
     code["create"] = ret(create(code["classname"]))
+    code["additional_includes_set"] = _additional_includes_dofmap(ir)
 
     # Postprocess code
     _postprocess_code(code, parameters)
@@ -209,6 +220,53 @@ def _generate_dofmap_code(ir, parameters):
     return code
 
 
+def _additional_includes_dofmap(ir):
+    if not ir["jit"]:
+        return set()
+    dofmap_classnames = ir["create_sub_dofmap"]
+    jit_includes = [classname.split("_dofmap")[0] + ".h"
+                    for classname in dofmap_classnames]
+    return set("#include <%s>" % inc for inc in jit_includes)
+
+
+def _additional_includes_finite_element(ir):
+    if not ir["jit"]:
+        return set()
+    finite_element_classnames = ir["create_sub_element"]
+    jit_includes = [classname.split("_finite_element")[0] + ".h"
+                    for classname in finite_element_classnames]
+    return set("#include <%s>" % inc for inc in jit_includes)
+
+
+def _additional_includes_coordinate_mapping(ir):
+    if not ir["jit"]:
+        return set()
+    finite_element_classnames = [
+        ir["coordinate_finite_element_classname"],
+        ir["scalar_coordinate_finite_element_classname"]
+        ]
+    jit_includes = [classname.split("_finite_element")[0] + ".h"
+                    for classname in finite_element_classnames]
+    return set("#include <%s>" % inc for inc in jit_includes)
+
+
+def _additional_includes_form(ir):
+    if not ir["jit"]:
+        return set()
+    # Gather all header names for classes that are separately compiled
+    # For finite_element and dofmap the module and header name is the prefix,
+    # extracted here with .split, and equal for both classes so we skip dofmap here:
+    finite_element_classnames = list(chain(
+        ir["create_finite_element"],
+        ir["create_coordinate_finite_element"]
+        ))
+    jit_includes = set(classname.split("_finite_element")[0] + ".h"
+                       for classname in finite_element_classnames)
+    # FIXME: Enable when coordinate_mapping is fully generated:
+    #jit_includes.update(classname + ".h" for classname in ir["create_coordinate_mapping"])
+    return set("#include <%s>" % inc for inc in jit_includes)
+
+
 def _generate_coordinate_mapping_code(ir, parameters):
     "Generate code for coordinate_mapping from intermediate representation."
 
@@ -246,6 +304,8 @@ def _generate_coordinate_mapping_code(ir, parameters):
     code["compute_jacobian_inverses"] = ""
     code["compute_geometry"] = ""
 
+    code["additional_includes_set"] = _additional_includes_coordinate_mapping(ir)
+
     return code
 
 
@@ -260,17 +320,44 @@ def _generate_integral_code(ir, parameters):
     r = pick_representation(ir["representation"])
 
     # Generate code
-    prefix = ir["prefix"]
-    code = r.generate_integral_code(ir, prefix, parameters) # TODO: Drop prefix argument and get from ir
+    # TODO: Drop prefix argument and get from ir:
+    code = r.generate_integral_code(ir, ir["prefix"], parameters)
+
+    # Generate comment
+    code["tabulate_tensor_comment"] = _generate_tabulate_tensor_comment(ir, parameters)
 
     # Indent code (unused variables should already be removed)
     # FIXME: Remove this quick hack
     if ir["representation"] != "uflacs":
         _indent_code(code)
+    else:
+        code["tabulate_tensor_comment"] = indent(code["tabulate_tensor_comment"], 4)
 
     return code
 
 
+def _generate_tabulate_tensor_comment(ir, parameters):
+    "Generate comment for tabulate_tensor."
+
+    r = ir["representation"]
+    integrals_metadata = ir["integrals_metadata"]
+    integral_metadata = ir["integral_metadata"]
+
+    comment  = format["comment"]("This function was generated using '%s' representation" % r) + "\n"
+    comment += format["comment"]("with the following integrals metadata:") + "\n"
+    comment += format["comment"]("") + "\n"
+    comment += "\n".join([format["comment"]("  " + l) for l in dstr(integrals_metadata).split("\n")][:-1])
+    comment += "\n"
+    for i, metadata in enumerate(integral_metadata):
+        comment += format["comment"]("") + "\n"
+        comment += format["comment"]("and the following integral %d metadata:" % i) + "\n"
+        comment += format["comment"]("") + "\n"
+        comment += "\n".join([format["comment"]("  " + l) for l in dstr(metadata).split("\n")][:-1])
+        comment += "\n"
+
+    return comment
+
+
 def _generate_original_coefficient_position(original_coefficient_positions):
     # TODO: I don't know how to implement this using the format dict,
     # this will do for now:
@@ -279,7 +366,7 @@ def _generate_original_coefficient_position(original_coefficient_positions):
     code = '\n'.join([
         "static const std::vector<std::size_t> position({%s});"
         % initializer_list, "return position[i];",
-        ])
+    ])
     return code
 
 
@@ -294,9 +381,6 @@ def _generate_form_code(ir, parameters):
     ret = format["return"]
     do_nothing = format["do nothing"]
 
-    form_id = ir["id"]
-    prefix = ir["prefix"]
-
     # Generate code
     code = {}
     code["classname"] = ir["classname"]
@@ -308,7 +392,8 @@ def _generate_form_code(ir, parameters):
     code["destructor"] = do_nothing
 
     code["signature"] = ret('"%s"' % ir["signature"])
-    code["original_coefficient_position"] = _generate_original_coefficient_position(ir["original_coefficient_position"])
+    code["original_coefficient_position"] = \
+        _generate_original_coefficient_position(ir["original_coefficient_position"])
     code["rank"] = ret(ir["rank"])
     code["num_coefficients"] = ret(ir["num_coefficients"])
 
@@ -319,11 +404,17 @@ def _generate_form_code(ir, parameters):
     code["create_finite_element"] = _create_finite_element(ir)
     code["create_dofmap"] = _create_dofmap(ir)
 
+    code["additional_includes_set"] = _additional_includes_form(ir)
+
     for integral_type in ufc_integral_types:
-        code["max_%s_subdomain_id" % integral_type] = ret(ir["max_%s_subdomain_id" % integral_type])
-        code["has_%s_integrals" % integral_type] = _has_foo_integrals(ir, integral_type)
-        code["create_%s_integral" % integral_type] = _create_foo_integral(ir, integral_type, prefix)
-        code["create_default_%s_integral" % integral_type] = _create_default_foo_integral(ir, integral_type, prefix)
+        code["max_%s_subdomain_id" % integral_type] = \
+            ret(ir["max_%s_subdomain_id" % integral_type])
+        code["has_%s_integrals" % integral_type] = \
+            _has_foo_integrals(ir, integral_type)
+        code["create_%s_integral" % integral_type] = \
+            _create_foo_integral(ir, integral_type)
+        code["create_default_%s_integral" % integral_type] = \
+            _create_default_foo_integral(ir, integral_type)
 
     # Postprocess code
     _postprocess_code(code, parameters)
@@ -332,6 +423,7 @@ def _generate_form_code(ir, parameters):
 
 #--- Code generation for non-trivial functions ---
 
+
 def _value_dimension(ir):
     "Generate code for value_dimension."
     ret = format["return"]
@@ -441,7 +533,7 @@ def _tabulate_dofs(ir):
                 v = multiply([len(num[k]), component(entity_index, (dim, k))])
                 for (j, dof) in enumerate(dofs):
                     value = add([offset_name, v, j])
-                    code.append(assign(component(dofs_variable, dof+i), value))
+                    code.append(assign(component(dofs_variable, dof + i), value))
 
             # Update offset corresponding to mesh entity:
             if need_offset:
@@ -484,11 +576,11 @@ def _tabulate_dof_coordinates(ir):
         w = coefficients(coordinate)
         for j in range(gdim):
             # Compute physical coordinate
-            coords = [component(f_x(), (k*gdim + j,)) for k in range(tdim + 1)]
+            coords = [component(f_x(), (k * gdim + j,)) for k in range(tdim + 1)]
             value = inner_product(w, coords)
 
             # Assign coordinate
-            code.append(assign(component(coordinates, (i*gdim + j)), value))
+            code.append(assign(component(coordinates, (i * gdim + j)), value))
 
     return "\n".join(code)
 
@@ -510,7 +602,7 @@ def _tabulate_entity_dofs(ir):
     dim = len(num_dofs_per_entity)
     excpt = format["exception"]("%s is larger than dimension (%d)"
                                 % (f_d, dim - 1))
-    code = [format["if"]("%s > %d" % (f_d, dim-1), excpt)]
+    code = [format["if"]("%s > %d" % (f_d, dim - 1), excpt)]
 
     # Generate cases for each dimension:
     all_cases = ["" for d in range(dim)]
@@ -540,58 +632,123 @@ def _tabulate_entity_dofs(ir):
     return "\n".join(code)
 
 
+def _tabulate_entity_closure_dofs(ir):
+    "Generate code for tabulate_entity_closure_dofs."
+
+    # Extract variables from ir
+    entity_closure_dofs, entity_dofs, num_dofs_per_entity = ir
+
+    # Prefetch formats
+    assign = format["assign"]
+    component = format["component"]
+    f_d = format["argument dimension"]
+    f_i = format["argument entity"]
+    dofs = format["argument dofs"]
+
+    # Add check that dimension and number of mesh entities is valid
+    dim = len(num_dofs_per_entity)
+    excpt = format["exception"]("%s is larger than dimension (%d)"
+                                % (f_d, dim - 1))
+    code = [format["if"]("%s > %d" % (f_d, dim - 1), excpt)]
+
+    # Generate cases for each dimension:
+    all_cases = ["" for d in range(dim)]
+    for d in range(dim):
+        num_entities = len(entity_dofs[d])
+
+        # Add check that given entity is valid:
+        excpt = format["exception"]("%s is larger than number of entities (%d)"
+                                    % (f_i, num_entities - 1))
+        check = format["if"]("%s > %d" % (f_i, num_entities - 1), excpt)
+
+        # Generate cases for each mesh entity
+        cases = []
+        for entity in range(num_entities):
+            assignments = [assign(component(dofs, j), dof)
+                           for (j, dof) in enumerate(entity_closure_dofs[(d, entity)])]
+            cases.append("\n".join(assignments))
+
+        # Generate inner switch with preceding check
+        all_cases[d] = "\n".join([check, format["switch"](f_i, cases)])
+
+    # Generate outer switch
+    code.append(format["switch"](f_d, all_cases))
+
+    return "\n".join(code)
+
+
 #--- Utility functions ---
 
-def _create_bar(arg, classnames):
-    "Generate code for create_<bar>(arg) returning new <classname[arg]>."
+def _create_switch(arg, classnames, factory=False):
+    "Generate code for create_<bar>(arg) returning new <classnames[arg]>."
     ret = format["return"]
-    create = format["create foo"]
+    if factory:
+        create = format["create factory"]
+    else:
+        create = format["create foo"]
     numbers = list(range(len(classnames)))
     cases = [ret(create(name)) for name in classnames]
     default = ret(0)
     return format["switch"](arg, cases, default=default, numbers=numbers)
 
+
 def _create_coordinate_finite_element(ir):
     ret = format["return"]
-    create = format["create foo"]
+    if ir["jit"]:
+        create = format["create factory"]
+    else:
+        create = format["create foo"]
     classnames = ir["create_coordinate_finite_element"]
-    assert len(classnames) == 1 # list of length 1 until we support multiple domains
+    assert len(classnames) == 1  # list of length 1 until we support multiple domains
     return ret(create(classnames[0]))
 
+
 def _create_coordinate_dofmap(ir):
     ret = format["return"]
-    create = format["create foo"]
+    if ir["jit"]:
+        create = format["create factory"]
+    else:
+        create = format["create foo"]
     classnames = ir["create_coordinate_dofmap"]
-    assert len(classnames) == 1 # list of length 1 until we support multiple domains
+    assert len(classnames) == 1  # list of length 1 until we support multiple domains
     return ret(create(classnames[0]))
 
+
 def _create_coordinate_mapping(ir):
     ret = format["return"]
-    create = format["create foo"]
+    if ir["jit"]:
+        create = format["create factory"]
+    else:
+        create = format["create foo"]
     classnames = ir["create_coordinate_mapping"]
-    assert len(classnames) == 1 # list of length 1 until we support multiple domains
-    #return ret(create(classnames[0]))
-    return ret("nullptr") # FIXME: Disabled until we generate a functional class (work in progress)
+    assert len(classnames) == 1  # list of length 1 until we support multiple domains
+    # return ret(create(classnames[0]))
+    return ret("nullptr")  # FIXME: Disabled until we generate a functional class (work in progress)
+
 
 def _create_finite_element(ir):
     f_i = format["argument sub"]
     classnames = ir["create_finite_element"]
-    return _create_bar(f_i, classnames)
+    return _create_switch(f_i, classnames, ir["jit"])
+
 
 def _create_dofmap(ir):
     f_i = format["argument sub"]
     classnames = ir["create_dofmap"]
-    return _create_bar(f_i, classnames)
+    return _create_switch(f_i, classnames, ir["jit"])
+
 
 def _create_sub_element(ir):
     f_i = format["argument sub"]
     classnames = ir["create_sub_element"]
-    return _create_bar(f_i, classnames)
+    return _create_switch(f_i, classnames, ir["jit"])
+
 
 def _create_sub_dofmap(ir):
     f_i = format["argument sub"]
     classnames = ir["create_sub_dofmap"]
-    return _create_bar(f_i, classnames)
+    return _create_switch(f_i, classnames, ir["jit"])
+
 
 def _has_foo_integrals(ir, integral_type):
     ret = format["return"]
@@ -599,44 +756,44 @@ def _has_foo_integrals(ir, integral_type):
     i = ir["has_%s_integrals" % integral_type]
     return ret(b(i))
 
-def _create_foo_integral(ir, integral_type, prefix):
+
+def _create_foo_integral(ir, integral_type):
     "Generate code for create_<foo>_integral."
     ret = format["return"]
     create = format["create foo"]
     f_i = format["argument subdomain"]
-    form_id = ir["id"]
-    subdomain_ids = ir["create_" + integral_type + "_integral"]
-    classnames = [make_integral_classname(prefix, integral_type, form_id, subdomain_id)
-                  for subdomain_id in subdomain_ids]
+    subdomain_ids, classnames = ir["create_%s_integral" % integral_type]
     cases = [ret(create(name)) for name in classnames]
     default = ret(0)
     return format["switch"](f_i, cases, default=default, numbers=subdomain_ids)
 
-def _create_default_foo_integral(ir, integral_type, prefix):
+
+def _create_default_foo_integral(ir, integral_type):
     "Generate code for create_default_<foo>_integral."
     ret = format["return"]
-    subdomain_id = ir["create_default_" + integral_type + "_integral"]
-    if subdomain_id is None:
+    classname = ir["create_default_%s_integral" % integral_type]
+    if classname is None:
         return ret(0)
     else:
         create = format["create foo"]
-        form_id = ir["id"]
-        classname = make_integral_classname(prefix, integral_type, form_id, subdomain_id)
         return ret(create(classname))
 
+
 def _postprocess_code(code, parameters):
     "Postprocess generated code."
     _indent_code(code)
     _remove_code(code, parameters)
 
+
 def _indent_code(code):
     "Indent code that should be indented."
     for key in code:
-        if not key in ("classname", "members", "constructor_arguments",
+        if key not in ("classname", "members", "constructor_arguments",
                        "initializer_list", "additional_includes_set",
                        "class_type"):
             code[key] = indent(code[key], 4)
 
+
 def _remove_code(code, parameters):
     "Remove code that should not be generated."
     for key in code:
diff --git a/ffc/codesnippets.py b/ffc/codesnippets.py
index babd79e..0327614 100644
--- a/ffc/codesnippets.py
+++ b/ffc/codesnippets.py
@@ -1,6 +1,6 @@
-"Code snippets for code generation."
+# -*- coding: utf-8 -*-
 
-# Copyright (C) 2007-2013 Anders Logg
+# Copyright (C) 2007-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -20,12 +20,12 @@
 # Modified by Kristian B. Oelgaard 2010-2013
 # Modified by Marie Rognes 2007-2012
 # Modified by Peter Brune 2009
-# Modified by Martin Alnaes, 2013
+# Modified by Martin Sandve Alnæs, 2013
 #
 # First added:  2007-02-28
 # Last changed: 2014-06-10
 
-# Code snippets
+"Code snippets for code generation."
 
 __all__ = ["comment_ufc", "comment_dolfin", "header_h", "header_c", "footer",
            "compute_jacobian", "compute_jacobian_inverse",
@@ -510,6 +510,7 @@ for (unsigned int row = 1; row < %(num_derivatives)s; row++)
   }
 }"""
 
+
 def _transform_snippet(tdim, gdim):
 
     if tdim == gdim:
@@ -520,14 +521,14 @@ def _transform_snippet(tdim, gdim):
         _g = "_g"
 
     # Matricize K_ij -> {K_ij}
-    matrix = "{{" + "}, {".join([", ".join(["K[%d]" % (t*gdim + g)
+    matrix = "{{" + "}, {".join([", ".join(["K[%d]" % (t * gdim + g)
                                             for g in range(gdim)])
                                  for t in range(tdim)]) + "}};\n\n"
     snippet = """\
 // Compute inverse of Jacobian
 const double %%(K)s[%d][%d] = %s""" % (tdim, gdim, matrix)
 
-    snippet +="""// Declare transformation matrix
+    snippet += """// Declare transformation matrix
 // Declare pointer to two dimensional array and initialise
 double %%(transform)s[%%(max_g_deriv)s][%%(max_t_deriv)s];
 for (unsigned int j = 0; j < %%(num_derivatives)s%(g)s; j++)
@@ -544,7 +545,7 @@ for (unsigned int row = 0; row < %%(num_derivatives)s%(g)s; row++)
     for (unsigned int k = 0; k < %%(n)s; k++)
       %%(transform)s[row][col] *= %%(K)s[%%(combinations)s%(t)s[col][k]][%%(combinations)s%(g)s[row][k]];
   }
-}""" % {"t":_t, "g":_g}
+}""" % {"t": _t, "g": _g}
 
     return snippet
 
@@ -741,12 +742,12 @@ map_onto_physical = {1: {1: _map_onto_physical_1D,
                          3: _map_onto_physical_3D_2D},
                      3: {3: _map_onto_physical_3D}}
 
-fiat_coordinate_map = {"interval": {1:_map_coordinates_FIAT_interval,
-                                    2:_map_coordinates_FIAT_interval_in_2D,
-                                    3:_map_coordinates_FIAT_interval_in_3D},
-                       "triangle": {2:_map_coordinates_FIAT_triangle,
+fiat_coordinate_map = {"interval": {1: _map_coordinates_FIAT_interval,
+                                    2: _map_coordinates_FIAT_interval_in_2D,
+                                    3: _map_coordinates_FIAT_interval_in_3D},
+                       "triangle": {2: _map_coordinates_FIAT_triangle,
                                     3: _map_coordinates_FIAT_triangle_in_3D},
-                       "tetrahedron": {3:_map_coordinates_FIAT_tetrahedron}}
+                       "tetrahedron": {3: _map_coordinates_FIAT_tetrahedron}}
 
 transform_snippet = {"interval": {1: _transform_snippet(1, 1),
                                   2: _transform_snippet(1, 2),
@@ -816,7 +817,7 @@ eval_basis = """\
 // Get current quadrature point and compute values of basis functions
 const double* x = quadrature_points + ip*%(gdim)s;
 const double* v = coordinate_dofs + %(vertex_offset)s;
-%(form_prefix)s_finite_element_%(element_number)s::_evaluate_basis_all(%(eval_name)s, x, v, cell_orientation);"""
+%(classname)s::_evaluate_basis_all(%(eval_name)s, x, v, cell_orientation);"""
 
 eval_basis_copy = """\
 // Copy values to table %(table_name)s
@@ -834,7 +835,7 @@ eval_derivs = """\
 // Get current quadrature point and compute values of basis function derivatives
 const double* x = quadrature_points + ip*%(gdim)s;
 const double* v = coordinate_dofs + %(vertex_offset)s;
-%(form_prefix)s_finite_element_%(element_number)s::_evaluate_basis_derivatives_all(%(n)s, %(eval_name)s, x, v, cell_orientation);"""
+%(classname)s::_evaluate_basis_derivatives_all(%(n)s, %(eval_name)s, x, v, cell_orientation);"""
 
 eval_derivs_copy = """\
 // Copy values to table %(table_name)s
diff --git a/ffc/compiler.py b/ffc/compiler.py
index 9e18ddf..a4e377d 100644
--- a/ffc/compiler.py
+++ b/ffc/compiler.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 This is the compiler, acting as the main interface for compilation
 of forms and breaking the compilation into several sequential stages.
@@ -89,7 +90,7 @@ The compiler stages are implemented by the following functions:
   format_code       (stage 5)
 """
 
-# Copyright (C) 2007-2015 Anders Logg
+# Copyright (C) 2007-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -109,7 +110,7 @@ The compiler stages are implemented by the following functions:
 # Modified by Kristian B. Oelgaard, 2010.
 # Modified by Dag Lindbo, 2008.
 # Modified by Garth N. Wells, 2009.
-# Modified by Martin Alnaes, 2013-2015
+# Modified by Martin Sandve Alnæs, 2013-2016
 
 __all__ = ["compile_form", "compile_element"]
 
@@ -118,100 +119,73 @@ from time import time
 import os
 
 # FFC modules
-from ffc.log import info, info_green, warning
+from ffc.log import info, info_green, warning, error
 from ffc.parameters import validate_parameters
-from ffc.analysis import analyze_forms, analyze_elements
+from ffc.analysis import analyze_ufl_objects
 from ffc.representation import compute_ir
 from ffc.optimization import optimize_ir
 from ffc.codegeneration import generate_code
 from ffc.formatting import format_code
 from ffc.wrappers import generate_wrapper_code
 
-def compile_form(forms, object_names=None, prefix="Form", parameters=None, jit=False):
-    """This function generates UFC code for a given UFL form or list
-    of UFL forms."""
 
-    info("Compiling form %s\n" % prefix)
-
-    # Reset timing
-    cpu_time_0 = time()
-
-    # Check input arguments
-    forms = _check_forms(forms)
-    if not forms:
-        return "", ""
-    if prefix != os.path.basename(prefix):
-        error("Invalid prefix, looks like a full path? prefix='{}'.".format(prefix))
-    if object_names is None:
-        object_names = {}
-
-    # Note that jit will always pass parameters so this is
-    # only for commandline and direct call from python
-    parameters = validate_parameters(parameters)
-
-    # Stage 1: analysis
-    cpu_time = time()
-    analysis = analyze_forms(forms, parameters)
-    _print_timing(1, time() - cpu_time)
-
-    # Stage 2: intermediate representation
-    cpu_time = time()
-    ir = compute_ir(analysis, prefix, parameters)
-    _print_timing(2, time() - cpu_time)
-
-    # Stage 3: optimization
-    cpu_time = time()
-    oir = optimize_ir(ir, parameters)
-    _print_timing(3, time() - cpu_time)
+def _print_timing(stage, timing):
+    "Print timing results."
+    info("Compiler stage %s finished in %g seconds.\n" % (str(stage), timing))
 
-    # Stage 4: code generation
-    cpu_time = time()
-    code = generate_code(oir, parameters)
-    _print_timing(4, time() - cpu_time)
 
-    # Stage 4.1: generate wrappers
-    cpu_time = time()
-    wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters)
-    _print_timing(4.1, time() - cpu_time)
+def compile_form(forms, object_names=None,
+                 prefix="Form", parameters=None, jit=False):
+    """This function generates UFC code for a given UFL form or list of UFL forms."""
+    return compile_ufl_objects(forms, "form", object_names,
+                               prefix, parameters, jit)
 
-    # Stage 5: format code
-    cpu_time = time()
-    code_h, code_c = format_code(code, wrapper_code, prefix, parameters, jit)
-    _print_timing(5, time() - cpu_time)
 
-    info_green("FFC finished in %g seconds.", time() - cpu_time_0)
+def compile_element(elements, object_names=None,
+                    prefix="Element", parameters=None, jit=False):
+    """This function generates UFC code for a given UFL element or list of UFL elements."""
+    return compile_ufl_objects(elements, "element", object_names,
+                               prefix, parameters, jit)
 
-    return code_h, code_c
 
+def compile_coordinate_mapping(elements, object_names=None,
+                               prefix="Mesh", parameters=None, jit=False):
+    """This function generates UFC code for a given UFL element or list of UFL elements."""
+    return compile_ufl_objects(elements, "coordinate_mapping", object_names,
+                               prefix, parameters, jit)
 
-def compile_element(elements, prefix="Element", parameters=None, jit=False):
-    """This function generates UFC code for a given UFL element or
-    list of UFL elements."""
 
-    info("Compiling element %s\n" % prefix)
+def compile_ufl_objects(ufl_objects, kind, object_names=None,
+                        prefix=None, parameters=None, jit=False):
+    """This function generates UFC code for a given UFL form or list of UFL forms."""
+    info("Compiling %s %s\n" % (kind, prefix))
 
     # Reset timing
     cpu_time_0 = time()
 
+    # Note that jit will always pass validated parameters so 
+    # this is only for commandline and direct call from python
+    if not jit:
+        parameters = validate_parameters(parameters)
+
     # Check input arguments
-    elements = _check_elements(elements)
-    if not elements:
+    if not isinstance(ufl_objects, (list, tuple)):
+        ufl_objects = (ufl_objects,)
+    if not ufl_objects:
         return "", ""
-
-    object_names = {}
-
-    # Note that jit will always pass parameters so this is
-    # only for commandline and direct call from python
-    parameters = validate_parameters(parameters)
+    if prefix != os.path.basename(prefix):
+        error("Invalid prefix, looks like a full path? prefix='{}'.".format(prefix))
+    if object_names is None:
+        object_names = {}
 
     # Stage 1: analysis
     cpu_time = time()
-    analysis = analyze_elements(elements, parameters)
+    analysis = analyze_ufl_objects(ufl_objects, kind, parameters)
     _print_timing(1, time() - cpu_time)
 
     # Stage 2: intermediate representation
     cpu_time = time()
-    ir = compute_ir(analysis, prefix, parameters)
+    ir = compute_ir(analysis, prefix, parameters, jit)
     _print_timing(2, time() - cpu_time)
 
     # Stage 3: optimization
@@ -236,22 +210,21 @@ def compile_element(elements, prefix="Element", parameters=None, jit=False):
 
     info_green("FFC finished in %g seconds.", time() - cpu_time_0)
 
-    # TODO: If prefix and parameters are determined properly outside
-    #   this function they don't need to be returned here...
-    return code_h, code_c
+    if jit:
+        # Must use processed elements from analysis here
+        form_datas, unique_elements, element_numbers, unique_coordinate_elements = analysis
 
-def _check_forms(forms):
-    "Initial check of forms."
-    if not isinstance(forms, (list, tuple)):
-        forms = (forms,)
-    return forms
+        # Avoid returning self as dependency for infinite recursion
+        unique_elements = list(element for element in unique_elements
+                               if element not in ufl_objects)
 
-def _check_elements(elements):
-    "Initial check of elements."
-    if not isinstance(elements, (list, tuple)):
-        elements = (elements,)
-    return elements
+        # FIXME: May get similar recursion issue with coordinate elements
+        # but currently not used all the way
+        dependent_ufl_objects = {
+            "element": tuple(unique_elements),
+            "coordinate_mapping": tuple(unique_coordinate_elements),
+        }
 
-def _print_timing(stage, timing):
-    "Print timing results."
-    info("Compiler stage %s finished in %g seconds.\n" % (str(stage), timing))
+        return code_h, code_c, dependent_ufl_objects
+    else:
+        return code_h, code_c
diff --git a/ffc/cpp.py b/ffc/cpp.py
index 3c6a9ba..49e470f 100644
--- a/ffc/cpp.py
+++ b/ffc/cpp.py
@@ -1,6 +1,7 @@
+# -*- coding: utf-8 -*-
 "This module defines rules and algorithms for generating C++ code."
 
-# Copyright (C) 2009-2015 Anders Logg
+# Copyright (C) 2009-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -19,10 +20,13 @@
 #
 # Modified by Kristian B. Oelgaard 2011
 # Modified by Marie E. Rognes 2010
-# Modified by Martin Alnaes 2013-2015
+# Modified by Martin Sandve Alnæs 2013-2016
 
 # Python modules
-import re, numpy, platform
+import re
+import numpy
+import platform
+from six import string_types
 
 # UFL modules
 from ufl import custom_integral_types
@@ -32,15 +36,18 @@ from ffc.log import debug, error
 from six.moves import zip
 
 # ufc class names
+
 def make_classname(prefix, basename, signature):
     pre = prefix.lower() + "_" if prefix else ""
     sig = str(signature).lower()
     return "%s%s_%s" % (pre, basename, sig)
 
+
 def make_integral_classname(prefix, integral_type, form_id, subdomain_id):
     basename = "%s_integral_%s" % (integral_type, str(form_id).lower())
     return make_classname(prefix, basename, subdomain_id)
 
+
 # Mapping of restrictions
 _fixed_map = {None: "", "+": "_0", "-": "_1"}
 _choose_map = lambda r: _fixed_map[r] if r in _fixed_map else "_%s" % str(r)
@@ -54,89 +61,90 @@ format = {}
 
 # Program flow
 format.update({
-    "return":         lambda v: "return %s;" % str(v),
-    "grouping":       lambda v: "(%s)" % v,
-    "block":          lambda v: "{%s}" % v,
-    "block begin":    "{",
-    "block end":      "}",
-    "list":           lambda v: format["block"](format["list separator"].join([str(l) for l in v])),
-    "switch":         lambda v, cases, default=None, numbers=None: _generate_switch(v, cases, default, numbers),
-    "exception":      lambda v: "throw std::runtime_error(\"%s\");" % v,
-    "warning":        lambda v: 'std::cerr << "*** FFC warning: " << "%s" << std::endl;' % v,
-    "comment":        lambda v: "// %s" % v,
-    "if":             lambda c, v: "if (%s)\n{\n%s\n}\n" % (c, v),
-    "loop":           lambda i, j, k: "for (unsigned int %s = %s; %s < %s; %s++)"% (i, j, i, k, i),
-    "generate loop":  lambda v, w, _indent=0: _generate_loop(v, w, _indent),
-    "is equal":       " == ",
-    "not equal":      " != ",
-    "less than":      " < ",
-    "greater than":   " > ",
-    "less equal":     " <= ",
-    "greater equal":  " >= ",
-    "and":            " && ",
-    "or":             " || ",
-    "not":            lambda v: "!(%s)" % v,
-    "do nothing":     "// Do nothing"
+    "return": lambda v: "return %s;" % str(v),
+    "grouping": lambda v: "(%s)" % v,
+    "block": lambda v: "{%s}" % v,
+    "block begin": "{",
+    "block end": "}",
+    "list": lambda v: format["block"](format["list separator"].join([str(l) for l in v])),
+    "switch": lambda v, cases, default=None, numbers=None: _generate_switch(v, cases, default, numbers),
+    "exception": lambda v: "throw std::runtime_error(\"%s\");" % v,
+    "warning": lambda v: 'std::cerr << "*** FFC warning: " << "%s" << std::endl;' % v,
+    "comment": lambda v: "// %s" % v,
+    "if": lambda c, v: "if (%s)\n{\n%s\n}\n" % (c, v),
+    "loop": lambda i, j, k: "for (unsigned int %s = %s; %s < %s; %s++)" % (i, j, i, k, i),
+    "generate loop": lambda v, w, _indent=0: _generate_loop(v, w, _indent),
+    "is equal": " == ",
+    "not equal": " != ",
+    "less than": " < ",
+    "greater than": " > ",
+    "less equal": " <= ",
+    "greater equal": " >= ",
+    "and": " && ",
+    "or": " || ",
+    "not": lambda v: "!(%s)" % v,
+    "do nothing": "// Do nothing"
 })
 
 # Declarations
 format.update({
-    "declaration":                    lambda t, n, v=None: _declaration(t, n, v),
-    "float declaration":              "double",
-    "int declaration":                "int",
-    "uint declaration":               "unsigned int",
-    "static const uint declaration":  "static const unsigned int",
+    "declaration": lambda t, n, v=None: _declaration(t, n, v),
+    "float declaration": "double",
+    "int declaration": "int",
+    "uint declaration": "unsigned int",
+    "static const uint declaration": "static const unsigned int",
     "static const float declaration": "static const double",
-    "vector table declaration":       "std::vector< std::vector<double> >",
-    "double array declaration":       "double*",
+    "vector table declaration": "std::vector< std::vector<double> >",
+    "double array declaration": "double*",
     "const double array declaration": "const double*",
-    "const float declaration":        lambda v, w: "const double %s = %s;" % (v, w),
-    "const uint declaration":         lambda v, w: "const unsigned int %s = %s;" % (v, w),
-    "dynamic array":                  lambda t, n, s: "%s *%s = new %s[%s];" % (t, n, t, s),
-    "static array":                   lambda t, n, s: "static %s %s[%d];" % (t, n, s),
-    "fixed array":                    lambda t, n, s: "%s %s[%d];" % (t, n, s),
-    "delete dynamic array":           lambda n, s=None: _delete_array(n, s),
-    "create foo":                     lambda v: "new %s()" % v
+    "const float declaration": lambda v, w: "const double %s = %s;" % (v, w),
+    "const uint declaration": lambda v, w: "const unsigned int %s = %s;" % (v, w),
+    "dynamic array": lambda t, n, s: "%s *%s = new %s[%s];" % (t, n, t, s),
+    "static array": lambda t, n, s: "static %s %s[%d];" % (t, n, s),
+    "fixed array": lambda t, n, s: "%s %s[%d];" % (t, n, s),
+    "delete dynamic array": lambda n, s=None: _delete_array(n, s),
+    "create foo": lambda v: "new %s()" % v,
+    "create factory": lambda v: "create_%s()" % v
 })
 
 # Mathematical operators
 format.update({
-    "add":            lambda v: " + ".join(v),
-    "iadd":           lambda v, w: "%s += %s;" % (str(v), str(w)),
-    "sub":            lambda v: " - ".join(v),
-    "neg":            lambda v: "-%s" % v,
-    "mul":            lambda v: "*".join(v),
-    "imul":           lambda v, w: "%s *= %s;" % (str(v), str(w)),
-    "div":            lambda v, w: "%s/%s" % (str(v), str(w)),
-    "inverse":        lambda v: "(1.0/%s)" % v,
-    "std power":      lambda base, exp: "std::pow(%s, %s)" % (base, exp),
-    "exp":            lambda v: "std::exp(%s)" % str(v),
-    "ln":             lambda v: "std::log(%s)" % str(v),
-    "cos":            lambda v: "std::cos(%s)" % str(v),
-    "sin":            lambda v: "std::sin(%s)" % str(v),
-    "tan":            lambda v: "std::tan(%s)" % str(v),
-    "cosh":           lambda v: "std::cosh(%s)" % str(v),
-    "sinh":           lambda v: "std::sinh(%s)" % str(v),
-    "tanh":           lambda v: "std::tanh(%s)" % str(v),
-    "acos":           lambda v: "std::acos(%s)" % str(v),
-    "asin":           lambda v: "std::asin(%s)" % str(v),
-    "atan":           lambda v: "std::atan(%s)" % str(v),
-    "atan_2":         lambda v1,v2: "std::atan2(%s,%s)" % (str(v1),str(v2)),
-    "erf":            lambda v: "erf(%s)" % str(v),
-    "bessel_i":       lambda v, n: "boost::math::cyl_bessel_i(%s, %s)" % (str(n), str(v)),
-    "bessel_j":       lambda v, n: "boost::math::cyl_bessel_j(%s, %s)" % (str(n), str(v)),
-    "bessel_k":       lambda v, n: "boost::math::cyl_bessel_k(%s, %s)" % (str(n), str(v)),
-    "bessel_y":       lambda v, n: "boost::math::cyl_neumann(%s, %s)" % (str(n), str(v)),
+    "add": lambda v: " + ".join(v),
+    "iadd": lambda v, w: "%s += %s;" % (str(v), str(w)),
+    "sub": lambda v: " - ".join(v),
+    "neg": lambda v: "-%s" % v,
+    "mul": lambda v: "*".join(v),
+    "imul": lambda v, w: "%s *= %s;" % (str(v), str(w)),
+    "div": lambda v, w: "%s/%s" % (str(v), str(w)),
+    "inverse": lambda v: "(1.0/%s)" % v,
+    "std power": lambda base, exp: "std::pow(%s, %s)" % (base, exp),
+    "exp": lambda v: "std::exp(%s)" % str(v),
+    "ln": lambda v: "std::log(%s)" % str(v),
+    "cos": lambda v: "std::cos(%s)" % str(v),
+    "sin": lambda v: "std::sin(%s)" % str(v),
+    "tan": lambda v: "std::tan(%s)" % str(v),
+    "cosh": lambda v: "std::cosh(%s)" % str(v),
+    "sinh": lambda v: "std::sinh(%s)" % str(v),
+    "tanh": lambda v: "std::tanh(%s)" % str(v),
+    "acos": lambda v: "std::acos(%s)" % str(v),
+    "asin": lambda v: "std::asin(%s)" % str(v),
+    "atan": lambda v: "std::atan(%s)" % str(v),
+    "atan_2": lambda v1, v2: "std::atan2(%s,%s)" % (str(v1), str(v2)),
+    "erf": lambda v: "erf(%s)" % str(v),
+    "bessel_i": lambda v, n: "boost::math::cyl_bessel_i(%s, %s)" % (str(n), str(v)),
+    "bessel_j": lambda v, n: "boost::math::cyl_bessel_j(%s, %s)" % (str(n), str(v)),
+    "bessel_k": lambda v, n: "boost::math::cyl_bessel_k(%s, %s)" % (str(n), str(v)),
+    "bessel_y": lambda v, n: "boost::math::cyl_neumann(%s, %s)" % (str(n), str(v)),
     "absolute value": lambda v: "std::abs(%s)" % str(v),
-    "min value":      lambda l, r: "std::min(%s, %s)" % (str(l), str(r)),
-    "max value":      lambda l, r: "std::max(%s, %s)" % (str(l), str(r)),
-    "sqrt":           lambda v: "std::sqrt(%s)" % str(v),
-    "addition":       lambda v: _add(v),
-    "multiply":       lambda v: _multiply(v),
-    "power":          lambda base, exp: _power(base, exp),
-    "inner product":  lambda v, w: _inner_product(v, w),
-    "assign":         lambda v, w: "%s = %s;" % (v, str(w)),
-    "component":      lambda v, k: _component(v, k)
+    "min value": lambda l, r: "std::min(%s, %s)" % (str(l), str(r)),
+    "max value": lambda l, r: "std::max(%s, %s)" % (str(l), str(r)),
+    "sqrt": lambda v: "std::sqrt(%s)" % str(v),
+    "addition": lambda v: _add(v),
+    "multiply": lambda v: _multiply(v),
+    "power": lambda base, exp: _power(base, exp),
+    "inner product": lambda v, w: _inner_product(v, w),
+    "assign": lambda v, w: "%s = %s;" % (v, str(w)),
+    "component": lambda v, k: _component(v, k)
 })
 
 # Formatting used in tabulate_tensor
@@ -146,59 +154,59 @@ format.update({
 
 # Geometry related variable names (from code snippets).
 format.update({
-    "entity index":       "entity_indices",
-    "num entities":       "num_global_entities",
-    "cell":               lambda s: "ufc::shape::%s" % s,
-    "J":                  lambda i, j, m, n: "J[%d]" % _flatten(i, j, m, n),
-    "inv(J)":             lambda i, j, m, n: "K[%d]" % _flatten(i, j, m, n),
-    "det(J)":             lambda r=None: "detJ%s" % _choose_map(r),
-    "cell volume":        lambda r=None: "volume%s" % _choose_map(r),
-    "circumradius":       lambda r=None: "circumradius%s" % _choose_map(r),
-    "facet area":         "facet_area",
+    "entity index": "entity_indices",
+    "num entities": "num_global_entities",
+    "cell": lambda s: "ufc::shape::%s" % s,
+    "J": lambda i, j, m, n: "J[%d]" % _flatten(i, j, m, n),
+    "inv(J)": lambda i, j, m, n: "K[%d]" % _flatten(i, j, m, n),
+    "det(J)": lambda r=None: "detJ%s" % _choose_map(r),
+    "cell volume": lambda r=None: "volume%s" % _choose_map(r),
+    "circumradius": lambda r=None: "circumradius%s" % _choose_map(r),
+    "facet area": "facet_area",
     "min facet edge length": lambda r: "min_facet_edge_length",
     "max facet edge length": lambda r: "max_facet_edge_length",
-    "scale factor":       "det",
-    "transform":          lambda t, i, j, m, n, r: _transform(t, i, j, m, n, r),
-    "normal component":   lambda r, j: "n%s%s" % (_choose_map(r), j),
-    "x coordinate":       "X",
-    "y coordinate":       "Y",
-    "z coordinate":       "Z",
-    "ip coordinates":     lambda i, j: "X%d[%d]" % (i, j),
-    "affine map table":   lambda i, j: "FEA%d_f%d" % (i, j),
+    "scale factor": "det",
+    "transform": lambda t, i, j, m, n, r: _transform(t, i, j, m, n, r),
+    "normal component": lambda r, j: "n%s%s" % (_choose_map(r), j),
+    "x coordinate": "X",
+    "y coordinate": "Y",
+    "z coordinate": "Z",
+    "ip coordinates": lambda i, j: "X%d[%d]" % (i, j),
+    "affine map table": lambda i, j: "FEA%d_f%d" % (i, j),
     "coordinate_dofs": lambda r=None: "coordinate_dofs%s" % _choose_map(r)
 })
 
 # UFC function arguments and class members (names)
 format.update({
-    "element tensor":             lambda i: "A[%s]" % i,
-    "element tensor term":        lambda i, j: "A%d[%s]" % (j, i),
-    "coefficient":                lambda j, k: format["component"]("w", [j, k]),
-    "argument basis num":         "i",
-    "argument derivative order":  "n",
-    "argument values":            "values",
-    "argument coordinates":       "dof_coordinates",
-    "facet":                      lambda r: "facet%s" % _choose_map(r),
-    "vertex":                     "vertex",
-    "argument axis":              "i",
-    "argument dimension":         "d",
-    "argument entity":            "i",
-    "member global dimension":    "_global_dimension",
-    "argument dofs":              "dofs",
-    "argument dof num":           "i",
-    "argument dof values":        "dof_values",
-    "argument vertex values":     "vertex_values",
-    "argument sub":               "i", # sub element
-    "argument subdomain":         "subdomain_id", # sub domain
+    "element tensor": lambda i: "A[%s]" % i,
+    "element tensor term": lambda i, j: "A%d[%s]" % (j, i),
+    "coefficient": lambda j, k: format["component"]("w", [j, k]),
+    "argument basis num": "i",
+    "argument derivative order": "n",
+    "argument values": "values",
+    "argument coordinates": "dof_coordinates",
+    "facet": lambda r: "facet%s" % _choose_map(r),
+    "vertex": "vertex",
+    "argument axis": "i",
+    "argument dimension": "d",
+    "argument entity": "i",
+    "member global dimension": "_global_dimension",
+    "argument dofs": "dofs",
+    "argument dof num": "i",
+    "argument dof values": "dof_values",
+    "argument vertex values": "vertex_values",
+    "argument sub": "i",  # sub element
+    "argument subdomain": "subdomain_id",  # sub domain
 })
 
 # Formatting used in evaluatedof.
 format.update({
-    "dof vals":                 "vals",
-    "dof result":               "result",
-    "dof X":                    lambda i: "X_%d" % i,
-    "dof D":                    lambda i: "D_%d" % i,
-    "dof W":                    lambda i: "W_%d" % i,
-    "dof copy":                 lambda i: "copy_%d" % i,
+    "dof vals": "vals",
+    "dof result": "result",
+    "dof X": lambda i: "X_%d" % i,
+    "dof D": lambda i: "D_%d" % i,
+    "dof W": lambda i: "W_%d" % i,
+    "dof copy": lambda i: "copy_%d" % i,
     "dof physical coordinates": "y"
 })
 
@@ -207,115 +215,118 @@ format.update({
 # code generators.
 format.update({
     # evaluate_basis and evaluate_basis_derivatives
-    "tmp value":                  lambda i: "tmp%d" % i,
-    "tmp ref value":              lambda i: "tmp_ref%d" % i,
-    "local dof":                  "dof",
-    "basisvalues":                "basisvalues",
-    "coefficients":               lambda i: "coefficients%d" %(i),
-    "num derivatives":            lambda t_or_g :"num_derivatives" + t_or_g,
-    "derivative combinations":    lambda t_or_g :"combinations" + t_or_g,
-    "transform matrix":           "transform",
-    "transform Jinv":             "Jinv",
-    "dmats":                      lambda i: "dmats%s" %(i),
-    "dmats old":                  "dmats_old",
-    "reference derivatives":      "derivatives",
-    "dof values":                 "dof_values",
-    "dof map if":                 lambda i,j: "%d <= %s && %s <= %d"\
+    "tmp value": lambda i: "tmp%d" % i,
+    "tmp ref value": lambda i: "tmp_ref%d" % i,
+    "local dof": "dof",
+    "basisvalues": "basisvalues",
+    "coefficients": lambda i: "coefficients%d" % (i),
+    "num derivatives": lambda t_or_g: "num_derivatives" + t_or_g,
+    "derivative combinations": lambda t_or_g: "combinations" + t_or_g,
+    "transform matrix": "transform",
+    "transform Jinv": "Jinv",
+    "dmats": lambda i: "dmats%s" % (i),
+    "dmats old": "dmats_old",
+    "reference derivatives": "derivatives",
+    "dof values": "dof_values",
+    "dof map if":                 lambda i, j: "%d <= %s && %s <= %d"\
                                   % (i, format["argument basis num"], format["argument basis num"], j),
-    "dereference pointer":        lambda n: "*%s" % n,
-    "reference variable":         lambda n: "&%s" % n,
-    "call basis":                 lambda i, s: "_evaluate_basis(%s, %s, x, coordinate_dofs, cell_orientation);" % (i, s),
-    "call basis_all":             "_evaluate_basis_all(values, x, coordinate_dofs, cell_orientation);",
-    "call basis_derivatives":     lambda i, s: "_evaluate_basis_derivatives(%s, n, %s, x, coordinate_dofs, cell_orientation);" % (i, s),
+    "dereference pointer": lambda n: "*%s" % n,
+    "reference variable": lambda n: "&%s" % n,
+    "call basis": lambda i, s: "_evaluate_basis(%s, %s, x, coordinate_dofs, cell_orientation);" % (i, s),
+    "call basis_all": "_evaluate_basis_all(values, x, coordinate_dofs, cell_orientation);",
+    "call basis_derivatives": lambda i, s: "_evaluate_basis_derivatives(%s, n, %s, x, coordinate_dofs, cell_orientation);" % (i, s),
     "call basis_derivatives_all": lambda i, s: "_evaluate_basis_derivatives_all(n, %s, x, coordinate_dofs, cell_orientation);" % s,
 
     # quadrature code generators
-    "integration points":   "ip",
-    "first free index":     "j",
-    "second free index":    "k",
-    "geometry constant":    lambda i: "G[%d]" % i,
-    "ip constant":          lambda i: "I[%d]" % i,
-    "basis constant":       lambda i: "B[%d]" % i,
-    "conditional":          lambda i: "C[%d]" % i,
-    "evaluate conditional": lambda i,j,k: "(%s) ? %s : %s" % (i,j,k),
-#    "geometry constant":   lambda i: "G%d" % i,
-#    "ip constant":         lambda i: "I%d" % i,
-#    "basis constant":      lambda i: "B%d" % i,
-    "function value":       lambda i: "F%d" % i,
-    "nonzero columns":      lambda i: "nzc%d" % i,
-    "weight":               lambda i: "W" if i is None else "W%d" % (i),
-    "psi name":             lambda c, et, e, co, d, a: _generate_psi_name(c, et, e, co, d, a),
-    # both
-    "free indices":         ["r","s","t","u"],
-    "matrix index":         lambda i, j, range_j: _matrix_index(i, str(j), str(range_j)),
-    "quadrature point":     lambda i, gdim: "quadrature_points + %s*%d" % (i, gdim),
-    "facet_normal_custom":  lambda gdim: _generate_facet_normal_custom(gdim),
-})
+    "integration points": "ip",
+    "first free index": "j",
+    "second free index": "k",
+    "geometry constant": lambda i: "G[%d]" % i,
+    "ip constant": lambda i: "I[%d]" % i,
+    "basis constant": lambda i: "B[%d]" % i,
+    "conditional": lambda i: "C[%d]" % i,
+    "evaluate conditional": lambda i, j, k: "(%s) ? %s : %s" % (i, j, k),
+              #    "geometry constant":   lambda i: "G%d" % i,
+              #    "ip constant":         lambda i: "I%d" % i,
+              #    "basis constant":      lambda i: "B%d" % i,
+              "function value": lambda i: "F%d" % i,
+              "nonzero columns": lambda i: "nzc%d" % i,
+              "weight": lambda i: "W" if i is None else "W%d" % (i),
+              "psi name": lambda c, et, e, co, d, a: _generate_psi_name(c, et, e, co, d, a),
+              # both
+              "free indices": ["r", "s", "t", "u"],
+              "matrix index": lambda i, j, range_j: _matrix_index(i, str(j), str(range_j)),
+              "quadrature point": lambda i, gdim: "quadrature_points + %s*%d" % (i, gdim),
+              "facet_normal_custom": lambda gdim: _generate_facet_normal_custom(gdim),
+              })
 
 # Misc
 format.update({
-    "bool":             lambda v: {True: "true", False: "false"}[v],
-    "str":              lambda v: "%s" % v,
-    "int":              lambda v: "%d" % v,
-    "list separator":   ", ",
-    "block separator":  ",\n",
-    "new line":         "\\\n",
-    "tabulate tensor":  lambda m: _tabulate_tensor(m),
+    "bool": lambda v: {True: "true", False: "false"}[v],
+    "str": lambda v: "%s" % v,
+    "int": lambda v: "%d" % v,
+    "list separator": ", ",
+    "block separator": ",\n",
+    "new line": "\\\n",
+    "tabulate tensor": lambda m: _tabulate_tensor(m),
 })
 
 # Code snippets
 from ffc.codesnippets import *
 
 format.update({
-    "compute_jacobian":         lambda tdim, gdim, r=None: \
+    "compute_jacobian": lambda tdim, gdim, r=None:
                                 compute_jacobian[tdim][gdim] % {"restriction": _choose_map(r)},
-    "compute_jacobian_inverse": lambda tdim, gdim, r=None: \
+    "compute_jacobian_inverse": lambda tdim, gdim, r=None:
                                 compute_jacobian_inverse[tdim][gdim] % {"restriction": _choose_map(r)},
-    "orientation":              lambda tdim, gdim, r=None: orientation_snippet % {"restriction": _choose_map(r)} if tdim != gdim else "",
-    "facet determinant":        lambda tdim, gdim, r=None: facet_determinant[tdim][gdim] % {"restriction": _choose_map(r)},
-    "fiat coordinate map":      lambda cell, gdim: fiat_coordinate_map[cell][gdim],
-    "generate normal":          lambda tdim, gdim, i: _generate_normal(tdim, gdim, i),
-    "generate cell volume":     lambda tdim, gdim, i, r=None: _generate_cell_volume(tdim, gdim, i, r),
-    "generate circumradius":    lambda tdim, gdim, i, r=None: _generate_circumradius(tdim, gdim, i, r),
-    "generate facet area":      lambda tdim, gdim: facet_area[tdim][gdim],
+    "orientation": lambda tdim, gdim, r=None: orientation_snippet % {"restriction": _choose_map(r)} if tdim != gdim else "",
+    "facet determinant": lambda tdim, gdim, r=None: facet_determinant[tdim][gdim] % {"restriction": _choose_map(r)},
+    "fiat coordinate map": lambda cell, gdim: fiat_coordinate_map[cell][gdim],
+    "generate normal": lambda tdim, gdim, i: _generate_normal(tdim, gdim, i),
+    "generate cell volume": lambda tdim, gdim, i, r=None: _generate_cell_volume(tdim, gdim, i, r),
+    "generate circumradius": lambda tdim, gdim, i, r=None: _generate_circumradius(tdim, gdim, i, r),
+    "generate facet area": lambda tdim, gdim: facet_area[tdim][gdim],
     "generate min facet edge length": lambda tdim, gdim, r=None: min_facet_edge_length[tdim][gdim] % {"restriction": _choose_map(r)},
     "generate max facet edge length": lambda tdim, gdim, r=None: max_facet_edge_length[tdim][gdim] % {"restriction": _choose_map(r)},
-    "generate ip coordinates":  lambda g, t, num_ip, name, ip, r=None: (ip_coordinates[t][g][0], ip_coordinates[t][g][1] % \
-                                {"restriction": _choose_map(r), "ip": ip, "name": name, "num_ip": num_ip}),
-    "scale factor snippet":     scale_factor,
-    "map onto physical":        map_onto_physical,
-    "evaluate basis snippet":   eval_basis,
-    "combinations":             combinations_snippet,
-    "transform snippet":        transform_snippet,
-    "evaluate function":        evaluate_f,
-    "ufc comment":              comment_ufc,
-    "dolfin comment":           comment_dolfin,
-    "header_h":                 header_h,
-    "header_c":                 header_c,
-    "footer":                   footer,
-    "eval_basis_decl":          eval_basis_decl,
-    "eval_basis_init":          eval_basis_init,
-    "eval_basis":               eval_basis,
-    "eval_basis_copy":          eval_basis_copy,
-    "eval_derivs_decl":         eval_derivs_decl,
-    "eval_derivs_init":         eval_derivs_init,
-    "eval_derivs":              eval_derivs,
-    "eval_derivs_copy":         eval_derivs_copy,
-    "extract_cell_coordinates": lambda offset, r : "const double* coordinate_dofs_%d = coordinate_dofs + %d;" % (r, offset)
-    })
+    "generate ip coordinates": lambda g, t, num_ip, name, ip, r=None: (ip_coordinates[t][g][0], ip_coordinates[t][g][1] %
+                                                                       {"restriction": _choose_map(r), "ip": ip, "name": name, "num_ip": num_ip}),
+    "scale factor snippet": scale_factor,
+    "map onto physical": map_onto_physical,
+    "evaluate basis snippet": eval_basis,
+    "combinations": combinations_snippet,
+    "transform snippet": transform_snippet,
+    "evaluate function": evaluate_f,
+    "ufc comment": comment_ufc,
+    "dolfin comment": comment_dolfin,
+    "header_h": header_h,
+    "header_c": header_c,
+    "footer": footer,
+    "eval_basis_decl": eval_basis_decl,
+    "eval_basis_init": eval_basis_init,
+    "eval_basis": eval_basis,
+    "eval_basis_copy": eval_basis_copy,
+    "eval_derivs_decl": eval_derivs_decl,
+    "eval_derivs_init": eval_derivs_init,
+    "eval_derivs": eval_derivs,
+    "eval_derivs_copy": eval_derivs_copy,
+    "extract_cell_coordinates": lambda offset, r: "const double* coordinate_dofs_%d = coordinate_dofs + %d;" % (r, offset)
+})
 
 # Helper functions for formatting
 
+
 def _declaration(type, name, value=None):
     if value is None:
-        return "%s %s;" % (type, name);
-    return "%s %s = %s;" % (type, name, str(value));
+        return "%s %s;" % (type, name)
+    return "%s %s = %s;" % (type, name, str(value))
+
 
 def _component(var, k):
     if not isinstance(k, (list, tuple)):
         k = [k]
     return "%s" % var + "".join("[%s]" % str(i) for i in k)
 
+
 def _delete_array(name, size=None):
     if size is None:
         return "delete [] %s;" % name
@@ -324,6 +335,7 @@ def _delete_array(name, size=None):
     code.append("delete [] %s;" % name)
     return "\n".join(code)
 
+
 def _multiply(factors):
     """
     Generate string multiplying a list of numbers or strings.  If a
@@ -366,6 +378,7 @@ def _multiply(factors):
 
     return "*".join(non_zero_factors)
 
+
 def _add(terms):
     "Generate string summing a list of strings."
 
@@ -375,13 +388,15 @@ def _add(terms):
         return format["str"](0)
     return result
 
+
 def _power(base, exponent):
     "Generate code for base^exponent."
     if exponent >= 0:
-        return _multiply(exponent*(base,))
+        return _multiply(exponent * (base,))
     else:
         return "1.0 / (%s)" % _power(base, -exponent)
 
+
 def _inner_product(v, w):
     "Generate string for v[0]*w[0] + ... + v[n]*w[n]."
 
@@ -389,10 +404,11 @@ def _inner_product(v, w):
     assert(len(v) == len(w)), "Sizes differ in inner-product!"
 
     # Special case, zero terms
-    if len(v) == 0: return format["float"](0)
+    if len(v) == 0:
+        return format["float"](0)
 
     # Straightforward handling when we only have strings
-    if isinstance(v[0], str):
+    if isinstance(v[0], string_types):
         return _add([_multiply([v[i], w[i]]) for i in range(len(v))])
 
     # Fancy handling of negative numbers etc
@@ -402,7 +418,7 @@ def _inner_product(v, w):
     sub = format["sub"]
     neg = format["neg"]
     mul = format["mul"]
-    fl  = format["float"]
+    fl = format["float"]
     for (c, x) in zip(v, w):
         if result:
             if abs(c - 1.0) < eps:
@@ -425,11 +441,14 @@ def _inner_product(v, w):
 
     return result
 
+
 def _transform(type, i, j, m, n, r):
     map_name = {"J": "J", "JINV": "K"}[type] + _choose_map(r)
     return (map_name + "[%d]") % _flatten(i, j, m, n)
 
 # FIXME: Input to _generate_switch should be a list of tuples (i, case)
+
+
 def _generate_switch(variable, cases, default=None, numbers=None):
     "Generate switch statement from given variable and cases"
 
@@ -459,16 +478,17 @@ def _generate_switch(variable, cases, default=None, numbers=None):
 
     return code
 
+
 def _tabulate_tensor(vals):
     "Tabulate a multidimensional tensor. (Replace tabulate_matrix and tabulate_vector)."
 
     # Prefetch formats to speed up code generation
-    f_block     = format["block"]
-    f_list_sep  = format["list separator"]
+    f_block = format["block"]
+    f_list_sep = format["list separator"]
     f_block_sep = format["block separator"]
     # FIXME: KBO: Change this to "float" once issue in set_float_formatting is fixed.
-    f_float     = format["floating point"]
-    f_epsilon   = format["epsilon"]
+    f_float = format["floating point"]
+    f_epsilon = format["epsilon"]
 
     # Create numpy array and get shape.
     tensor = numpy.array(vals)
@@ -488,14 +508,15 @@ def _tabulate_tensor(vals):
     else:
         error("Not an N-dimensional array:\n%s" % tensor)
 
+
 def _generate_loop(lines, loop_vars, _indent):
     "This function generates a loop over a vector or matrix."
 
     # Prefetch formats to speed up code generation.
-    f_loop     = format["loop"]
-    f_begin    = format["block begin"]
-    f_end      = format["block end"]
-    f_comment  = format["comment"]
+    f_loop = format["loop"]
+    f_begin = format["block begin"]
+    f_end = format["block end"]
+    f_comment = format["comment"]
 
     if not loop_vars:
         return lines
@@ -525,6 +546,7 @@ def _generate_loop(lines, loop_vars, _indent):
 
     return code
 
+
 def _matrix_index(i, j, range_j):
     "Map the indices in a matrix to an index in an array i.e., m[i][j] -> a[i*range(j)+j]"
     if i == 0:
@@ -536,6 +558,7 @@ def _matrix_index(i, j, range_j):
         access = format["add"]([irj, j])
     return access
 
+
 def _generate_psi_name(counter, entity_type, entity, component, derivatives, avg):
     """Generate a name for the psi table of the form:
     FE#_f#_v#_C#_D###_A#, where '#' will be an integer value.
@@ -558,10 +581,7 @@ def _generate_psi_name(counter, entity_type, entity, component, derivatives, avg
     name = "FE%d" % counter
 
     if entity_type == "facet":
-        if entity is None:
-            name += "_f0"
-        else:
-            name += "_f%d" % entity
+        name += "_f%d" % entity
     elif entity_type == "vertex":
         name += "_v%d" % entity
 
@@ -569,7 +589,7 @@ def _generate_psi_name(counter, entity_type, entity, component, derivatives, avg
         name += "_C%d" % component
 
     if any(derivatives):
-        name += "_D" + "".join(map(str,derivatives))
+        name += "_D" + "".join(map(str, derivatives))
 
     if avg == "cell":
         name += "_AC"
@@ -578,6 +598,7 @@ def _generate_psi_name(counter, entity_type, entity, component, derivatives, avg
 
     return name
 
+
 def _generate_normal(tdim, gdim, integral_type, reference_normal=False):
     "Generate code for computing normal"
 
@@ -590,16 +611,17 @@ def _generate_normal(tdim, gdim, integral_type, reference_normal=False):
 
     # Choose restrictions
     if integral_type == "exterior_facet":
-        code = direction % {"restriction": "", "facet" : "facet"}
-        code += normal % {"direction" : "", "restriction": ""}
+        code = direction % {"restriction": "", "facet": "facet"}
+        code += normal % {"direction": "", "restriction": ""}
     elif integral_type == "interior_facet":
         code = direction % {"restriction": _choose_map("+"), "facet": "facet_0"}
-        code += normal % {"direction" : "", "restriction": _choose_map("+")}
-        code += normal % {"direction" : "!", "restriction": _choose_map("-")}
+        code += normal % {"direction": "", "restriction": _choose_map("+")}
+        code += normal % {"direction": "!", "restriction": _choose_map("-")}
     else:
         error("Unsupported integral_type: %s" % str(integral_type))
     return code
 
+
 def _generate_facet_normal_custom(gdim):
     "Generate code for setting facet normal in custom integrals"
     code = format["comment"]("Set facet normal components for current quadrature point\n")
@@ -608,6 +630,7 @@ def _generate_facet_normal_custom(gdim):
         code += "const double n_1%d = - facet_normals[%d*ip + %d];\n" % (i, gdim, i)
     return code
 
+
 def _generate_cell_volume(tdim, gdim, integral_type, r=None):
     "Generate code for computing cell volume."
 
@@ -626,6 +649,7 @@ def _generate_cell_volume(tdim, gdim, integral_type, r=None):
         error("Unsupported integral_type: %s" % str(integral_type))
     return code
 
+
 def _generate_circumradius(tdim, gdim, integral_type, r=None):
     "Generate code for computing a cell's circumradius."
 
@@ -644,31 +668,35 @@ def _generate_circumradius(tdim, gdim, integral_type, r=None):
         error("Unsupported integral_type: %s" % str(integral_type))
     return code
 
+
 def _flatten(i, j, m, n):
-    return i*n + j
+    return i * n + j
 
 # Other functions
 
+
 def indent(block, num_spaces):
     "Indent each row of the given string block with n spaces."
     indentation = " " * num_spaces
     return indentation + ("\n" + indentation).join(block.split("\n"))
 
+
 def count_ops(code):
     "Count the number of operations in code (multiply-add pairs)."
     num_add = code.count(" + ") + code.count(" - ")
     num_multiply = code.count("*") + code.count("/")
     return (num_add + num_multiply) // 2
 
+
 def set_float_formatting(precision):
     "Set floating point formatting based on precision."
 
     # Options for float formatting
-    #f1     = "%%.%df" % precision
-    #f2     = "%%.%de" % precision
-    f1     = "%%.%dg" % precision
-    f2     = "%%.%dg" % precision
-    f_int  = "%%.%df" % 1
+    # f1     = "%%.%df" % precision
+    # f2     = "%%.%de" % precision
+    f1 = "%%.%dg" % precision
+    f2 = "%%.%dg" % precision
+    f_int = "%%.%df" % 1
 
     eps = eval("1e-%s" % precision)
 
@@ -695,11 +723,11 @@ def set_float_formatting(precision):
     format["floating point"] = format["float"]
 
     # Set machine precision
-    format["epsilon"] = 10.0*eval("1e-%s" % precision)
+    format["epsilon"] = 10.0 * eval("1e-%s" % precision)
 
     # Hack to propagate precision to uflacs internals...
-    import uflacs.language.format_value
-    uflacs.language.format_value.set_float_precision(precision)
+    import ffc.uflacs.language.format_value
+    ffc.uflacs.language.format_value.set_float_precision(precision)
 
 
 def set_exception_handling(convert_exceptions_to_warnings):
@@ -721,7 +749,8 @@ types = [["double"],
          ["const", "unsigned", "int"]]
 
 # Special characters and delimiters
-special_characters = ["+", "-", "*", "/", "=", ".", " ", ";", "(", ")", "\\", "{", "}", "[","]", "!"]
+special_characters = ["+", "-", "*", "/", "=", ".", " ", ";", "(", ")", "\\", "{", "}", "[", "]", "!"]
+
 
 def remove_unused(code, used_set=set()):
     """
@@ -747,10 +776,10 @@ def remove_unused(code, used_set=set()):
             continue
 
         # Split words
-        words = [word for word in line.split(" ") if not word == ""]
+        words = [word for word in line.split(" ") if word != ""]
 
         # Remember line where variable is declared
-        for type in [type for type in types if " ".join(type) in " ".join(words)]: # Fewer matches than line below.
+        for type in [type for type in types if " ".join(type) in " ".join(words)]:  # Fewer matches than line below.
         # for type in [type for type in types if len(words) > len(type)]:
             variable_type = words[0:len(type)]
             variable_name = words[len(type)]
@@ -769,12 +798,11 @@ def remove_unused(code, used_set=set()):
                 # y[2]) for variables with separators
                 seps_present = [sep for sep in special_characters if sep in variable_name]
                 if seps_present:
-                    variable_name = [variable_name.split(sep)[0] for sep in seps_present]
-                    variable_name.sort()
+                    variable_name = sorted([variable_name.split(sep)[0] for sep in seps_present])
                     variable_name = variable_name[0]
 
                 variables[variable_name] = (line_number, [])
-                if not variable_name in variable_names:
+                if variable_name not in variable_names:
                     variable_names += [variable_name]
 
         # Mark line for used variables
@@ -794,18 +822,19 @@ def remove_unused(code, used_set=set()):
         for line in removed_lines:
             if line in used_lines:
                 used_lines.remove(line)
-        if not used_lines and not variable_name in used_set:
+        if not used_lines and variable_name not in used_set:
             debug("Removing unused variable: %s" % variable_name)
-            lines[declaration_line] = None # KBO: Need to completely remove line for evaluate_basis* to work
+            lines[declaration_line] = None  # KBO: Need to completely remove line for evaluate_basis* to work
             # lines[declaration_line] = "// " + lines[declaration_line]
             removed_lines += [declaration_line]
-    return "\n".join([line for line in lines if not line is None])
+    return "\n".join([line for line in lines if line is not None])
+
 
 def _variable_in_line(variable_name, line):
     "Check if variable name is used in line"
-    if not variable_name in line:
+    if variable_name not in line:
         return False
     for character in special_characters:
         line = line.replace(character, "\\" + character)
     delimiter = "[" + ",".join(["\\" + c for c in special_characters]) + "]"
-    return not re.search(delimiter + variable_name + delimiter, line) == None
+    return re.search(delimiter + variable_name + delimiter, line) is not None
diff --git a/ffc/enrichedelement.py b/ffc/enrichedelement.py
index 6f3486a..98b168b 100644
--- a/ffc/enrichedelement.py
+++ b/ffc/enrichedelement.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010 Marie E. Rognes
 #
 # This file is part of FFC.
@@ -22,7 +23,9 @@ import numpy
 from .utils import pick_first
 from .mixedelement import _combine_entity_dofs, _num_components
 
+
 class EnrichedElement:
+
     "Create the space spanned by a list of ffc elements."
 
     def __init__(self, elements):
@@ -68,7 +71,7 @@ class EnrichedElement:
             # Insert element table into table
             for dtuple in etable.keys():
 
-                if not dtuple in table:
+                if dtuple not in table:
                     if num_components == 1:
                         table[dtuple] = numpy.zeros((self.space_dimension(), len(points)))
                     else:
diff --git a/ffc/errorcontrol/__init__.py b/ffc/errorcontrol/__init__.py
index 88423ea..cbaa6c8 100644
--- a/ffc/errorcontrol/__init__.py
+++ b/ffc/errorcontrol/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 This module contains functionality for working with automated
 goal-oriented error control. In particular it offers the following
diff --git a/ffc/errorcontrol/errorcontrol.py b/ffc/errorcontrol/errorcontrol.py
index b4d1c5b..3d8b8fc 100644
--- a/ffc/errorcontrol/errorcontrol.py
+++ b/ffc/errorcontrol/errorcontrol.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 This module provides compilation of forms required for goal-oriented
 error control
@@ -23,15 +24,14 @@ error control
 from ufl.utils.sorting import sorted_by_key
 from ufl import Coefficient
 
-from ffc.log import info, error
+from ffc.log import error
 from ffc.compiler import compile_form
-from ffc.formatting import write_code
 
 __all__ = ["compile_with_error_control"]
 
+
 def compile_with_error_control(forms, object_names, reserved_objects,
                                prefix, parameters):
-
     """
     Compile forms and additionally generate and compile forms required
     for performing goal-oriented error control
@@ -73,9 +73,9 @@ def compile_with_error_control(forms, object_names, reserved_objects,
     # Check that there are no conflicts between user defined and
     # generated names
     ec_names = generator.ec_names
-    comment = "%s are reserved error control names." % str(sorted(ec_names.values()))
-    assert not (set(object_names.values()) & set(ec_names.values())), \
-               "Conflict between user defined and generated names: %s" % comment
+    if set(object_names.values()) & set(ec_names.values()):
+        comment = "%s are reserved error control names." % str(sorted(ec_names.values()))
+        error("Conflict between user defined and generated names: %s" % comment)
 
     # Add names generated for error control to object_names
     for (objid, name) in sorted_by_key(ec_names):
@@ -87,6 +87,7 @@ def compile_with_error_control(forms, object_names, reserved_objects,
 
     return code_h, code_c
 
+
 def prepare_input_arguments(forms, object_names, reserved_objects):
     """
     Extract required input arguments to UFLErrorControlGenerator.
@@ -119,8 +120,8 @@ def prepare_input_arguments(forms, object_names, reserved_objects):
     """
 
     # Check that we get a tuple of forms
-    expecting_tuple_msg = "Expecting tuple of forms, got %s" % str(forms)
-    assert(isinstance(forms, (list, tuple))), expecting_tuple_msg
+    if not isinstance(forms, (list, tuple)):
+        error("Expecting tuple of forms, got %s" % str(forms))
 
     def __is_nonlinear(forms):
         return len(forms) == 2
@@ -139,8 +140,8 @@ def prepare_input_arguments(forms, object_names, reserved_objects):
         (F, M) = forms
 
         # Check that forms have the expected rank
-        assert(len(F.arguments()) == 1)
-        assert(len(M.arguments()) == 0)
+        assert len(F.arguments()) == 1
+        assert len(M.arguments()) == 0
 
         # Return primal, goal and unknown
         return (F, M, u)
@@ -155,9 +156,9 @@ def prepare_input_arguments(forms, object_names, reserved_objects):
 
         # Check that forms have the expected rank
         arguments = a.arguments()
-        assert(len(arguments) == 2)
-        assert(len(L.arguments()) == 1)
-        assert(len(M.arguments()) == 1)
+        assert len(arguments) == 2
+        assert len(L.arguments()) == 1
+        assert len(M.arguments()) == 1
 
         # Standard case: create default Coefficient in trial space and
         # label it __discrete_primal_solution
diff --git a/ffc/errorcontrol/errorcontrolgenerators.py b/ffc/errorcontrol/errorcontrolgenerators.py
index 25460af..7303685 100644
--- a/ffc/errorcontrol/errorcontrolgenerators.py
+++ b/ffc/errorcontrol/errorcontrolgenerators.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 This module provides an abstract ErrorControlGenerator class for
 generating forms required for goal-oriented error control and a
@@ -26,6 +27,7 @@ from ufl import inner, dx, ds, dS, avg, replace, action
 
 __all__ = ["ErrorControlGenerator", "UFLErrorControlGenerator"]
 
+
 class ErrorControlGenerator:
 
     def __init__(self, module, F, M, u):
@@ -156,8 +158,8 @@ class ErrorControlGenerator:
 
         # Define forms defining linear variational problem for cell
         # residual
-        v_T = self._b_T*v
-        a_R_T = inner(v_T, R_T)*dx(self.domain)
+        v_T = self._b_T * v
+        a_R_T = inner(v_T, R_T) * dx(self.domain)
         L_R_T = replace(self.weak_residual, {v_h: v_T})
 
         return (a_R_T, L_R_T)
@@ -177,11 +179,12 @@ class ErrorControlGenerator:
 
         # Define forms defining linear variational problem for facet
         # residual
-        v_e = self._b_e*v
-        a_R_dT = ((inner(v_e('+'), R_e('+')) + inner(v_e('-'), R_e('-')))*dS(self.domain)
-                  + inner(v_e, R_e)*ds(self.domain))
-        L_R_dT = (replace(self.weak_residual, {v_h: v_e})
-                  - inner(v_e, self._R_T)*dx(self.domain))
+        v_e = self._b_e * v
+        a_R_dT = ((inner(v_e('+'), R_e('+')) +
+                   inner(v_e('-'), R_e('-'))) * dS(self.domain) +
+                  inner(v_e, R_e) * ds(self.domain))
+        L_R_dT = (replace(self.weak_residual, {v_h: v_e}) -
+                  inner(v_e, self._R_T) * dx(self.domain))
 
         return (a_R_dT, L_R_dT)
 
@@ -205,14 +208,16 @@ class ErrorControlGenerator:
 
         # Define linear form for computing error indicators
         v = self.module.TestFunction(self._DG0)
-        eta_T = (v*inner(R_T, z - z_h)*dx(self.domain)
-                 + avg(v)*(inner(R_dT('+'), (z - z_h)('+'))
-                           + inner(R_dT('-'), (z - z_h)('-')))*dS(self.domain)
-                 + v*inner(R_dT, z - z_h)*ds(self.domain))
+        eta_T = (v * inner(R_T, z - z_h) * dx(self.domain) +
+                 avg(v)*(inner(R_dT('+'), (z - z_h)('+')) +
+                         inner(R_dT('-'), (z - z_h)('-'))) * dS(self.domain) +
+                 v * inner(R_dT, z - z_h) * ds(self.domain))
 
         return eta_T
 
+
 class UFLErrorControlGenerator(ErrorControlGenerator):
+
     """
     This class provides a realization of ErrorControlGenerator for use
     with pure UFL forms
@@ -294,4 +299,5 @@ class UFLErrorControlGenerator(ErrorControlGenerator):
         self.ec_names[id(self._z_h)] = "__discrete_dual_solution"
 
         # Piecewise constants for assembling indicators
-        self._DG0 = FunctionSpace(domain, FiniteElement("DG", domain.ufl_cell(), 0))
+        self._DG0 = FunctionSpace(domain, FiniteElement("DG",
+                                                        domain.ufl_cell(), 0))
diff --git a/ffc/evaluatebasis.py b/ffc/evaluatebasis.py
index 98dfc6a..5564a92 100644
--- a/ffc/evaluatebasis.py
+++ b/ffc/evaluatebasis.py
@@ -1,8 +1,6 @@
-"""Code generation for evaluation of finite element basis values. This
-module generates code which is more or less a C++ representation of
-the code found in FIAT."""
+# -*- coding: utf-8 -*-
 
-# Copyright (C) 2007-2010 Kristian B. Oelgaard
+# Copyright (C) 2007-2016 Kristian B. Oelgaard
 #
 # This file is part of FFC.
 #
@@ -19,13 +17,14 @@ the code found in FIAT."""
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# First added:  2007-04-04
-# Last changed: 2015-03-28
-#
 # Modified by Marie E. Rognes 2011
 # Modified by Anders Logg 2013
-# Modified by Lizao Li 2015
-#
+# Modified by Lizao Li 2015, 2016
+
+"""Code generation for evaluation of finite element basis values. This
+module generates code which is more or less a C++ representation of
+the code found in FIAT."""
+
 # MER: The original module generated code that was more or less a C++
 # representation of the code found in FIAT. I've modified this (for 2
 # and 3D) to generate code that does the same as FIAT, but with loops
@@ -35,33 +34,35 @@ the code found in FIAT."""
 # Python modules
 import math
 import numpy
+from six import string_types
 
 # FFC modules
 from ffc.log import error
 from ffc.cpp import remove_unused, indent, format
-from ffc.quadrature.symbolics import create_float, create_float, create_symbol,\
-                                     create_product, create_sum, create_fraction, CONST
+from ffc.quadrature.symbolics import create_float, create_symbol,\
+    create_product, create_sum, create_fraction, CONST
+
 
 def _evaluate_basis_all(data):
     """Like evaluate_basis, but return the values of all basis functions (dofs)."""
 
-    if isinstance(data, str):
+    if isinstance(data, string_types):
         return format["exception"]("evaluate_basis_all: %s" % data)
 
     # Prefetch formats.
-    f_assign    = format["assign"]
+    f_assign = format["assign"]
     f_component = format["component"]
-    f_comment   = format["comment"]
-    f_loop      = format["generate loop"]
-    f_r, f_s    = format["free indices"][:2]
-    f_tensor    = format["tabulate tensor"]
-    f_values    = format["argument values"]
-    f_basis     = format["call basis"]
-    f_dof_vals  = format["dof values"]
-    f_double    = format["float declaration"]
-    f_float     = format["floating point"]
-    f_decl      = format["declaration"]
-    f_ref_var   = format["reference variable"]
+    f_comment = format["comment"]
+    f_loop = format["generate loop"]
+    f_r, f_s = format["free indices"][:2]
+    f_tensor = format["tabulate tensor"]
+    f_values = format["argument values"]
+    f_basis = format["call basis"]
+    f_dof_vals = format["dof values"]
+    f_double = format["float declaration"]
+    f_float = format["floating point"]
+    f_decl = format["declaration"]
+    f_ref_var = format["reference variable"]
 
     # Initialise return code.
     code = []
@@ -94,7 +95,7 @@ def _evaluate_basis_all(data):
     else:
         code += [f_decl(f_double,
                         f_component(f_dof_vals, physical_value_size),
-                        f_tensor([0.0]*physical_value_size))]
+                        f_tensor([0.0] * physical_value_size))]
 
     # Create loop over dofs that calls evaluate_basis for a single dof and
     # inserts the values into the global array.
@@ -107,7 +108,7 @@ def _evaluate_basis_all(data):
     else:
         lines_r += [f_basis(f_r, f_dof_vals)]
 
-    if physical_value_size ==  1:
+    if physical_value_size == 1:
         lines_r += [f_assign(f_component(f_values, f_r), f_dof_vals)]
     else:
         index = format["matrix index"](f_r, f_s, physical_value_size)
@@ -120,6 +121,8 @@ def _evaluate_basis_all(data):
     return "\n".join(code)
 
 # From FIAT_NEW.polynomial_set.tabulate()
+
+
 def _evaluate_basis(data):
     """Generate run time code to evaluate an element basisfunction at an
     arbitrary point. The value(s) of the basisfunction is/are
@@ -130,14 +133,14 @@ def _evaluate_basis(data):
     The function should work for all elements supported by FIAT, but it remains
     untested for tensor valued elements."""
 
-    if isinstance(data, str):
+    if isinstance(data, string_types):
         return format["exception"]("evaluate_basis: %s" % data)
 
     # Prefetch formats.
-    f_assign    = format["assign"]
-    f_comment   = format["comment"]
-    f_values    = format["argument values"]
-    f_float     = format["floating point"]
+    f_assign = format["assign"]
+    f_comment = format["comment"]
+    f_values = format["argument values"]
+    f_float = format["floating point"]
     f_component = format["component"]
 
     # Initialise return code.
@@ -175,9 +178,10 @@ def _evaluate_basis(data):
 
     # Remove unused variables (from transformations and mappings) in code.
     code = remove_unused("\n".join(code))
-    #code = "\n".join(code)
+    # code = "\n".join(code)
     return code
 
+
 def _generate_dof_code(data, dof_data):
     """Generate code for a single basis element as the dot product of
     coefficients and basisvalues. Then apply transformation if applicable."""
@@ -194,18 +198,19 @@ def _generate_dof_code(data, dof_data):
 
     return remove_unused("\n".join(code))
 
+
 def _tabulate_coefficients(dof_data):
     """This function tabulates the element coefficients that are
     generated by FIAT at compile time."""
 
     # Prefetch formats to speed up code generation.
-    f_comment       = format["comment"]
-    f_table         = format["static const float declaration"]
-    f_coefficients  = format["coefficients"]
-    f_component     = format["component"]
-    f_decl          = format["declaration"]
-    f_tensor        = format["tabulate tensor"]
-    f_new_line      = format["new line"]
+    f_comment = format["comment"]
+    f_table = format["static const float declaration"]
+    f_coefficients = format["coefficients"]
+    f_component = format["component"]
+    f_decl = format["declaration"]
+    f_tensor = format["tabulate tensor"]
+    f_new_line = format["new line"]
 
     # Get coefficients from basis functions, computed by FIAT at compile time.
     coefficients = dof_data["coeffs"]
@@ -226,30 +231,31 @@ def _tabulate_coefficients(dof_data):
         code += [f_decl(f_table, name, f_new_line + f_tensor(coeffs))] + [""]
     return code
 
+
 def _compute_values(data, dof_data):
     """This function computes the value of the basisfunction as the dot product
     of the coefficients and basisvalues."""
 
     # Prefetch formats to speed up code generation.
-    f_values        = format["argument values"]
-    f_component     = format["component"]
-    f_comment       = format["comment"]
-    f_add           = format["add"]
-    f_coefficients  = format["coefficients"]
-    f_basisvalues   = format["basisvalues"]
-    f_r             = format["free indices"][0]
+    f_values = format["argument values"]
+    f_component = format["component"]
+    f_comment = format["comment"]
+    f_add = format["add"]
+    f_coefficients = format["coefficients"]
+    f_basisvalues = format["basisvalues"]
+    f_r = format["free indices"][0]
     f_deref_pointer = format["dereference pointer"]
-    f_detJ          = format["det(J)"]
-    f_inv           = format["inverse"]
-    f_mul           = format["mul"]
-    f_iadd          = format["iadd"]
-    f_group         = format["grouping"]
-    f_tmp_ref       = format["tmp ref value"]
-    f_assign        = format["assign"]
-    f_loop          = format["generate loop"]
-    f_const_float   = format["const float declaration"]
-    f_trans         = format["transform"]
-    f_inner         = format["inner product"]
+    f_detJ = format["det(J)"]
+    f_inv = format["inverse"]
+    f_mul = format["mul"]
+    f_iadd = format["iadd"]
+    f_group = format["grouping"]
+    f_tmp_ref = format["tmp ref value"]
+    f_assign = format["assign"]
+    f_loop = format["generate loop"]
+    f_const_float = format["const float declaration"]
+    f_trans = format["transform"]
+    f_inner = format["inner product"]
 
     tdim = data["topological_dimension"]
     gdim = data["geometric_dimension"]
@@ -261,7 +267,7 @@ def _compute_values(data, dof_data):
     num_components = dof_data["num_components"]
     reference_offset = dof_data["reference_offset"]
     physical_offset = dof_data["physical_offset"]
-    offset = reference_offset # physical_offset # FIXME: Should be physical offset but that breaks tests
+    offset = reference_offset  # physical_offset # FIXME: Should be physical offset but that breaks tests
 
     lines = []
     if data["reference_value_size"] != 1:
@@ -269,14 +275,14 @@ def _compute_values(data, dof_data):
         for i in range(num_components):
             # Generate name and value to create matrix vector multiply.
             name = f_component(f_values, i + offset)
-            value = f_mul([f_component(f_coefficients(i), f_r),\
-                    f_component(f_basisvalues, f_r)])
+            value = f_mul([f_component(f_coefficients(i), f_r),
+                           f_component(f_basisvalues, f_r)])
             lines += [f_iadd(name, value)]
     else:
         # Generate name and value to create matrix vector multiply.
         name = f_deref_pointer(f_values)
-        value = f_mul([f_component(f_coefficients(0), f_r),\
-                f_component(f_basisvalues, f_r)])
+        value = f_mul([f_component(f_coefficients(0), f_r),
+                       f_component(f_basisvalues, f_r)])
         lines = [f_iadd(name, value)]
 
     # Get number of members of the expansion set.
@@ -291,8 +297,8 @@ def _compute_values(data, dof_data):
     elif mapping == "contravariant piola":
         code += ["", f_comment("Using contravariant Piola transform to map values back to the physical element")]
         # Get temporary values before mapping.
-        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\
-                  for i in range(num_components)]
+        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))
+                 for i in range(num_components)]
         # Create names for inner product.
         basis_col = [f_tmp_ref(j) for j in range(tdim)]
         for i in range(gdim):
@@ -307,8 +313,8 @@ def _compute_values(data, dof_data):
     elif mapping == "covariant piola":
         code += ["", f_comment("Using covariant Piola transform to map values back to the physical element")]
         # Get temporary values before mapping.
-        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\
-                  for i in range(num_components)]
+        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))
+                 for i in range(num_components)]
         # Create names for inner product.
         tdim = data["topological_dimension"]
         gdim = data["geometric_dimension"]
@@ -321,11 +327,11 @@ def _compute_values(data, dof_data):
             value = f_group(f_inner(inv_jacobian_column, basis_col))
             name = f_component(f_values, i + offset)
             code += [f_assign(name, value)]
-    elif mapping == "pullback as metric":
-        code += ["", f_comment("Using metric pullback to map values back to the physical element")]
+    elif mapping == "double covariant piola":
+        code += ["", f_comment("Using double covariant Piola transform to map values back to the physical element")]
         # Get temporary values before mapping.
-        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\
-                  for i in range(num_components)]
+        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))
+                 for i in range(num_components)]
         # Create names for inner product.
         tdim = data["topological_dimension"]
         gdim = data["geometric_dimension"]
@@ -344,57 +350,81 @@ def _compute_values(data, dof_data):
                  for k in range(tdim)]))
             name = f_component(f_values, p + offset)
             code += [f_assign(name, value)]
+    elif mapping == "double contravariant piola":
+        code += ["", f_comment("Using double contravariant Piola transform to map values back to the physical element")]
+        # Get temporary values before mapping.
+        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\
+                  for i in range(num_components)]
+        # Create names for inner product.
+        tdim = data["topological_dimension"]
+        gdim = data["geometric_dimension"]
+        basis_col = [f_tmp_ref(j) for j in range(num_components)]
+        for p in range(num_components):
+            # unflatten the indices
+            i = p // tdim
+            l = p % tdim
+            # g_il = (det J)^(-2) Jij G_jk Jlk
+            value = f_group(f_inner(
+                [f_inner([f_trans("J", i, j, tdim, gdim, None)
+                          for j in range(tdim)],
+                         [basis_col[j * tdim + k] for j in range(tdim)])
+                 for k in range(tdim)],
+                [f_trans("J", l, k, tdim, gdim, None) for k in range(tdim)]))
+            value = f_mul([f_inv(f_detJ(None)), f_inv(f_detJ(None)), value])
+            name = f_component(f_values, p + offset)
+            code += [f_assign(name, value)]
     else:
         error("Unknown mapping: %s" % mapping)
 
     return code
 
+
 def _compute_basisvalues(data, dof_data):
     """From FIAT_NEW.expansions."""
 
     UNROLL = True
 
     # Prefetch formats to speed up code generation.
-    f_comment     = format["comment"]
-    f_add         = format["add"]
-    f_mul         = format["mul"]
-    f_imul        = format["imul"]
-    f_sub         = format["sub"]
-    f_group       = format["grouping"]
-    f_assign      = format["assign"]
-    f_sqrt        = format["sqrt"]
-    f_x           = format["x coordinate"]
-    f_y           = format["y coordinate"]
-    f_z           = format["z coordinate"]
-    f_double      = format["float declaration"]
-    f_basisvalue  = format["basisvalues"]
-    f_component   = format["component"]
-    f_float       = format["floating point"]
-    f_uint        = format["uint declaration"]
-    f_tensor      = format["tabulate tensor"]
-    f_loop        = format["generate loop"]
-    f_decl        = format["declaration"]
-    f_tmp         = format["tmp value"]
-    f_int         = format["int"]
+    f_comment = format["comment"]
+    f_add = format["add"]
+    f_mul = format["mul"]
+    f_imul = format["imul"]
+    f_sub = format["sub"]
+    f_group = format["grouping"]
+    f_assign = format["assign"]
+    f_sqrt = format["sqrt"]
+    f_x = format["x coordinate"]
+    f_y = format["y coordinate"]
+    f_z = format["z coordinate"]
+    f_double = format["float declaration"]
+    f_basisvalue = format["basisvalues"]
+    f_component = format["component"]
+    f_float = format["floating point"]
+    f_uint = format["uint declaration"]
+    f_tensor = format["tabulate tensor"]
+    f_loop = format["generate loop"]
+    f_decl = format["declaration"]
+    f_tmp = format["tmp value"]
+    f_int = format["int"]
 
     f_r = format["free indices"][0]
 
     # Create temporary values.
-    f1, f2, f3, f4, f5  = [create_symbol(f_tmp(i), CONST) for i in range(0,5)]
+    f1, f2, f3, f4, f5 = [create_symbol(f_tmp(i), CONST) for i in range(0, 5)]
 
     # Get embedded degree.
     embedded_degree = dof_data["embedded_degree"]
 
     # Create helper symbols.
-    symbol_p    = create_symbol(f_r, CONST)
-    symbol_x    = create_symbol(f_x, CONST)
-    symbol_y    = create_symbol(f_y, CONST)
-    symbol_z    = create_symbol(f_z, CONST)
-    int_n1   = f_int(embedded_degree + 1)
+    symbol_p = create_symbol(f_r, CONST)
+    symbol_x = create_symbol(f_x, CONST)
+    symbol_y = create_symbol(f_y, CONST)
+    symbol_z = create_symbol(f_z, CONST)
+    int_n1 = f_int(embedded_degree + 1)
     float_1 = create_float(1)
     float_2 = create_float(2)
-    float_0_5   = create_float(0.5)
-    float_0_25  = create_float(0.25)
+    float_0_5 = create_float(0.5)
+    float_0_25 = create_float(0.25)
 
     # Initialise return code.
     code = [""]
@@ -403,19 +433,19 @@ def _compute_basisvalues(data, dof_data):
     # Get number of members of the expansion set.
     num_mem = dof_data["num_expansion_members"]
     code += [f_comment("Array of basisvalues")]
-    code += [f_decl(f_double, f_component(f_basisvalue, num_mem), f_tensor([0.0]*num_mem))]
+    code += [f_decl(f_double, f_component(f_basisvalue, num_mem), f_tensor([0.0] * num_mem))]
 
     # Declare helper variables, will be removed if not used.
-    code += ["", f_comment("Declare helper variables")] # Keeping this here to avoid changing references
+    code += ["", f_comment("Declare helper variables")]  # Keeping this here to avoid changing references
 
     # Get the element cell name
     element_cellname = data["cellname"]
 
     def _jrc(a, b, n):
-        an = float( ( 2*n+1+a+b)*(2*n+2+a+b))/ float( 2*(n+1)*(n+1+a+b))
-        bn = float( (a*a-b*b) * (2*n+1+a+b))/ float( 2*(n+1)*(2*n+a+b)*(n+1+a+b) )
-        cn = float( (n+a)*(n+b)*(2*n+2+a+b))/ float( (n+1)*(n+1+a+b)*(2*n+a+b) )
-        return (an,bn,cn)
+        an = float((2 * n + 1 + a + b) * (2 * n + 2 + a + b)) / float(2 * (n + 1) * (n + 1 + a + b))
+        bn = float((a * a - b * b) * (2 * n + 1 + a + b)) / float(2 * (n + 1) * (2 * n + a + b) * (n + 1 + a + b))
+        cn = float((n + a) * (n + b) * (2 * n + 2 + a + b)) / float((n + 1) * (n + 1 + a + b) * (2 * n + a + b))
+        return (an, bn, cn)
 
     # 1D
     if (element_cellname == "interval"):
@@ -457,16 +487,16 @@ def _compute_basisvalues(data, dof_data):
                 #        - a4 * result[k-2,:]
 
                 # The below implements the above (with a = b = apb = 0)
-                for r in range(2, embedded_degree+1):
+                for r in range(2, embedded_degree + 1):
 
                     # Define helper variables
-                    a1 = 2.0*r*r*(2.0*r - 2.0)
-                    a3 = ((2.0*r - 2.0)*(2.0*r - 1.0 )*(2.0*r))/a1
-                    a4 = (2.0*(r - 1.0)*(r - 1.0)*(2.0*r))/a1
+                    a1 = 2.0 * r * r * (2.0 * r - 2.0)
+                    a3 = ((2.0 * r - 2.0) * (2.0 * r - 1.0) * (2.0 * r)) / a1
+                    a4 = (2.0 * (r - 1.0) * (r - 1.0) * (2.0 * r)) / a1
 
                     assign_to = f_component(f_basisvalue, r)
-                    assign_from = f_sub([f_mul([f_x, f_component(f_basisvalue, r-1), f_float(a3)]),
-                                         f_mul([f_component(f_basisvalue, r-2), f_float(a4)])])
+                    assign_from = f_sub([f_mul([f_x, f_component(f_basisvalue, r - 1), f_float(a3)]),
+                                         f_mul([f_component(f_basisvalue, r - 2), f_float(a4)])])
                     code += [f_assign(assign_to, assign_from)]
 
         # Scale values.
@@ -480,7 +510,7 @@ def _compute_basisvalues(data, dof_data):
         # Create names.
         basis_k = create_symbol(f_component(f_basisvalue, str(symbol_p)), CONST)
         # Compute value.
-        fac1 = create_symbol( f_sqrt(str(symbol_p + float_0_5)), CONST )
+        fac1 = create_symbol(f_sqrt(str(symbol_p + float_0_5)), CONST)
         lines += [format["imul"](str(basis_k), str(fac1))]
         # Create loop (block of lines).
         code += f_loop(lines, loop_vars)
@@ -493,11 +523,11 @@ def _compute_basisvalues(data, dof_data):
         # f1 = (1.0+2*x+y)/2.0
         # f2 = (1.0 - y) / 2.0
         # f3 = f2**2
-        fac1 = create_fraction(float_1 + float_2*symbol_x + symbol_y, float_2)
+        fac1 = create_fraction(float_1 + float_2 * symbol_x + symbol_y, float_2)
         fac2 = create_fraction(float_1 - symbol_y, float_2)
         code += [f_decl(f_double, str(f1), fac1)]
         code += [f_decl(f_double, str(f2), fac2)]
-        code += [f_decl(f_double, str(f3), f2*f2)]
+        code += [f_decl(f_double, str(f3), f2 * f2)]
 
         code += ["", f_comment("Compute basisvalues")]
         # The initial value basisvalue 0 is always 1.0.
@@ -507,7 +537,7 @@ def _compute_basisvalues(data, dof_data):
         code += [f_assign(f_component(f_basisvalue, 0), f_float(1.0))]
 
         def _idx2d(p, q):
-            return (p + q)*(p + q + 1)//2 + q
+            return (p + q) * (p + q + 1) // 2 + q
 
         # Only continue if the embedded degree is larger than zero.
         if embedded_degree > 0:
@@ -533,8 +563,8 @@ def _compute_basisvalues(data, dof_data):
                     assign_to = f_component(f_basisvalue, rr)
                     ss = _idx2d(r, 0)
                     tt = _idx2d((r - 1), 0)
-                    A = (2*r + 1.0)/(r + 1)
-                    B = r/(1.0 + r)
+                    A = (2 * r + 1.0) / (r + 1)
+                    B = r / (1.0 + r)
                     v1 = f_mul([f_component(f_basisvalue, ss), f_float(A),
                                 str(f1)])
                     v2 = f_mul([f_component(f_basisvalue, tt), f_float(B),
@@ -552,8 +582,8 @@ def _compute_basisvalues(data, dof_data):
                 rr = _idx2d(r, 1)
                 assign_to = f_component(f_basisvalue, rr)
                 ss = _idx2d(r, 0)
-                A = 0.5*(1 + 2*r)
-                B = 0.5*(3 + 2*r)
+                A = 0.5 * (1 + 2 * r)
+                B = 0.5 * (3 + 2 * r)
                 C = f_add([f_float(A), f_mul([f_float(B), str(symbol_y)])])
                 assign_from = f_mul([f_component(f_basisvalue, ss),
                                      f_group(C)])
@@ -574,7 +604,7 @@ def _compute_basisvalues(data, dof_data):
                         rr = _idx2d(r, (s + 1))
                         ss = _idx2d(r, s)
                         tt = _idx2d(r, s - 1)
-                        A, B, C = _jrc(2*r + 1, 0, s)
+                        A, B, C = _jrc(2 * r + 1, 0, s)
                         assign_to = f_component(f_basisvalue, rr)
                         assign_from = f_sub([f_mul([f_component(f_basisvalue, ss), f_group(f_add([f_float(B), f_mul([str(symbol_y), f_float(A)])]))]),
                                              f_mul([f_component(f_basisvalue, tt), f_float(C)])])
@@ -588,7 +618,7 @@ def _compute_basisvalues(data, dof_data):
             for r in range(0, n1):
                 for s in range(0, n1 - r):
                     rr = _idx2d(r, s)
-                    A = (r + 0.5)*(r + s + 1)
+                    A = (r + 0.5) * (r + s + 1)
                     assign_to = f_component(f_basisvalue, rr)
                     code += [f_imul(assign_to, f_sqrt(A))]
 
@@ -599,7 +629,7 @@ def _compute_basisvalues(data, dof_data):
         # def idx(p,q,r):
         #     return (p+q+r)*(p+q+r+1)*(p+q+r+2)//6 + (q+r)*(q+r+1)//2 + r
         def _idx3d(p, q, r):
-            return (p+q+r)*(p+q+r+1)*(p+q+r+2)//6 + (q+r)*(q+r+1)//2 + r
+            return (p + q + r) * (p + q + r + 1) * (p + q + r + 2) // 6 + (q + r) * (q + r + 1) // 2 + r
 
         # FIAT_NEW.expansions.TetrahedronExpansionSet.
 
@@ -610,15 +640,15 @@ def _compute_basisvalues(data, dof_data):
         # factor3 = 0.5 * ( 1 + 2.0 * y + z )
         # factor4 = 0.5 * ( 1 - z )
         # factor5 = factor4 ** 2
-        fac1 = create_product([float_0_5, float_2 + float_2*symbol_x + symbol_y + symbol_z])
+        fac1 = create_product([float_0_5, float_2 + float_2 * symbol_x + symbol_y + symbol_z])
         fac2 = create_product([float_0_25, symbol_y + symbol_z, symbol_y + symbol_z])
-        fac3 = create_product([float_0_5, float_1 + float_2*symbol_y + symbol_z])
+        fac3 = create_product([float_0_5, float_1 + float_2 * symbol_y + symbol_z])
         fac4 = create_product([float_0_5, float_1 - symbol_z])
         code += [f_decl(f_double, str(f1), fac1)]
         code += [f_decl(f_double, str(f2), fac2)]
         code += [f_decl(f_double, str(f3), fac3)]
         code += [f_decl(f_double, str(f4), fac4)]
-        code += [f_decl(f_double, str(f5), f4*f4)]
+        code += [f_decl(f_double, str(f5), f4 * f4)]
 
         code += ["", f_comment("Compute basisvalues")]
         # The initial value basisvalue 0 is always 1.0.
@@ -649,8 +679,8 @@ def _compute_basisvalues(data, dof_data):
                     rr = _idx3d((r + 1), 0, 0)
                     ss = _idx3d(r, 0, 0)
                     tt = _idx3d((r - 1), 0, 0)
-                    A = (2*r + 1.0)/(r + 1)
-                    B = r/(r + 1.0)
+                    A = (2 * r + 1.0) / (r + 1)
+                    B = r / (r + 1.0)
                     assign_to = f_component(f_basisvalue, rr)
                     assign_from = f_sub([f_mul([f_float(A), str(f1), f_component(f_basisvalue, ss)]), f_mul([f_float(B), str(f2), f_component(f_basisvalue, tt)])])
                     code += [f_assign(assign_to, assign_from)]
@@ -689,7 +719,7 @@ def _compute_basisvalues(data, dof_data):
                         rr = _idx3d(r, (s + 1), 0)
                         ss = _idx3d(r, s, 0)
                         tt = _idx3d(r, s - 1, 0)
-                        (A, B, C) = _jrc(2*r + 1, 0, s)
+                        (A, B, C) = _jrc(2 * r + 1, 0, s)
                         assign_to = f_component(f_basisvalue, rr)
                         term0 = f_mul([f_group(f_add([f_mul([f_float(A), str(f3)]), f_mul([f_float(B), str(f4)])])), f_component(f_basisvalue, ss)])
                         term1 = f_mul([f_float(C), str(f5), f_component(f_basisvalue, tt)])
@@ -725,11 +755,11 @@ def _compute_basisvalues(data, dof_data):
                 for r in range(embedded_degree - 1):
                     for s in range(0, embedded_degree - r - 1):
                         for t in range(1, embedded_degree - r - s):
-                            rr = _idx3d(r, s, ( t + 1))
+                            rr = _idx3d(r, s, (t + 1))
                             ss = _idx3d(r, s, t)
                             tt = _idx3d(r, s, t - 1)
 
-                            (A, B, C) = _jrc(2*r + 2*s + 2, 0, t)
+                            (A, B, C) = _jrc(2 * r + 2 * s + 2, 0, t)
                             assign_to = f_component(f_basisvalue, rr)
                             az_b = f_group(f_add([f_float(B), f_mul([f_float(A), str(symbol_z)])]))
                             assign_from = f_sub([f_mul([f_component(f_basisvalue, ss), az_b]), f_mul([f_float(C), f_component(f_basisvalue, tt)])])
@@ -744,13 +774,13 @@ def _compute_basisvalues(data, dof_data):
                 for s in range(embedded_degree - r + 1):
                     for t in range(embedded_degree - r - s + 1):
                         rr = _idx3d(r, s, t)
-                        A = (r + 0.5)*(r + s + 1)*(r + s + t + 1.5)
+                        A = (r + 0.5) * (r + s + 1) * (r + s + t + 1.5)
                         assign_to = f_component(f_basisvalue, rr)
                         multiply_by = f_sqrt(A)
                         myline = f_imul(assign_to, multiply_by)
                         code += [myline]
 
     else:
-        error("Cannot compute basis values for shape: %d" % elemet_cell_domain)
+        error("Cannot compute basis values for shape: %d" % element_cellname)
 
     return code + [""]
diff --git a/ffc/evaluatebasisderivatives.py b/ffc/evaluatebasisderivatives.py
index 2f0d5dc..d0c993e 100644
--- a/ffc/evaluatebasisderivatives.py
+++ b/ffc/evaluatebasisderivatives.py
@@ -1,8 +1,6 @@
-"""Code generation for evaluation of derivatives of finite element
-basis values.  This module generates code which is more or less a C++
-representation of the code found in FIAT_NEW."""
+# -*- coding: utf-8 -*-
 
-# Copyright (C) 2007-2013 Kristian B. Oelgaard
+# Copyright (C) 2007-2016 Kristian B. Oelgaard
 #
 # This file is part of FFC.
 #
@@ -20,44 +18,47 @@ representation of the code found in FIAT_NEW."""
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Anders Logg 2013
-# Modified by Lizao Li 2015
-#
-# First added:  2007-04-16
-# Last changed: 2015-03-28
+# Modified by Lizao Li 2015, 2016
+
+"""Code generation for evaluation of derivatives of finite element
+basis values.  This module generates code which is more or less a C++
+representation of the code found in FIAT_NEW."""
 
 # Python modules
 import math
 import numpy
+from six import string_types
 
 # FFC modules
-from ffc.log import error, ffc_assert
+from ffc.log import error
 from ffc.evaluatebasis import _compute_basisvalues, _tabulate_coefficients
 from ffc.cpp import remove_unused, indent, format
 
+
 def _evaluate_basis_derivatives_all(data):
     """Like evaluate_basis, but return the values of all basis
     functions (dofs)."""
 
-    if isinstance(data, str):
+    if isinstance(data, string_types):
         return format["exception"]("evaluate_basis_derivatives_all: %s" % data)
 
     # Prefetch formats.
-    f_r, f_s      = format["free indices"][:2]
-    f_assign      = format["assign"]
-    f_loop        = format["generate loop"]
-    f_array       = format["dynamic array"]
-    f_dof_vals    = format["dof values"]
-    f_comment     = format["comment"]
-    f_derivs      = format["call basis_derivatives"]
-    f_values      = format["argument values"]
-    f_int         = format["int"]
-    f_num_derivs  = format["num derivatives"]
-    f_double      = format["float declaration"]
-    f_component   = format["component"]
-    f_mul         = format["mul"]
-    f_float       = format["floating point"]
-    f_index       = format["matrix index"]
-    f_del_array   = format["delete dynamic array"]
+    f_r, f_s = format["free indices"][:2]
+    f_assign = format["assign"]
+    f_loop = format["generate loop"]
+    f_array = format["dynamic array"]
+    f_dof_vals = format["dof values"]
+    f_comment = format["comment"]
+    f_derivs = format["call basis_derivatives"]
+    f_values = format["argument values"]
+    f_int = format["int"]
+    f_num_derivs = format["num derivatives"]
+    f_double = format["float declaration"]
+    f_component = format["component"]
+    f_mul = format["mul"]
+    f_float = format["floating point"]
+    f_index = format["matrix index"]
+    f_del_array = format["delete dynamic array"]
 
     # Initialise return code
     code = []
@@ -91,7 +92,7 @@ def _evaluate_basis_derivatives_all(data):
         return "\n".join(code)
 
     # Compute number of derivatives.
-    if data["topological_dimension"]==data["geometric_dimension"]:
+    if data["topological_dimension"] == data["geometric_dimension"]:
         _g = ""
     else:
         _g = "_g"
@@ -101,7 +102,7 @@ def _evaluate_basis_derivatives_all(data):
     cond = format["argument derivative order"] + format["is equal"] + format["int"](0)
     val = [format["call basis_all"]]
     val += [format["return"]("")]
-    code += [format["if"](cond, indent("\n".join(val),2))]
+    code += [format["if"](cond, indent("\n".join(val), 2))]
 
     code += _compute_num_derivatives(data["geometric_dimension"], _g)
 
@@ -112,42 +113,43 @@ def _evaluate_basis_derivatives_all(data):
 
     # Reset values.
     code += ["", f_comment("Set values equal to zero.")]
-    name    = f_component(f_values, f_index(f_r, f_s, num_vals))
+    name = f_component(f_values, f_index(f_r, f_s, num_vals))
     lines_s = [f_assign(name, f_float(0.0))]
-    loop_s  = [(f_s, 0, num_vals)]
+    loop_s = [(f_s, 0, num_vals)]
     lines_r = f_loop(lines_s, loop_s)
-    loop_r  = [(f_r, 0, space_dimension)]
-    code    += f_loop(lines_r, loop_r)
+    loop_r = [(f_r, 0, space_dimension)]
+    code += f_loop(lines_r, loop_r)
 
     # If n > max_degree, return zeros.
     code += ["", format["comment"]("If order of derivatives is greater than the maximum polynomial degree, return zeros.")]
     cond = format["argument derivative order"] + format["greater than"] + f_int(max_degree)
-    val  = format["return"]("")
-    code += [format["if"](cond, indent(val,2))]
+    val = format["return"]("")
+    code += [format["if"](cond, indent(val, 2))]
 
     # Declare helper value to hold single dof values and reset.
     code += [f_comment("Helper variable to hold values of a single dof.")]
-    nds = data["geometric_dimension"]**max_degree*physical_value_size
+    nds = data["geometric_dimension"]**max_degree * physical_value_size
     code += [format["declaration"](f_double, f_component(f_dof_vals, f_int(nds)))]
-    line  = [f_assign(f_component(f_dof_vals, f_r), f_float(0.0))]
+    line = [f_assign(f_component(f_dof_vals, f_r), f_float(0.0))]
     code += f_loop(line, [(f_r, 0, nds)])
 
     # Create loop over dofs that calls evaluate_basis_derivatives for a single dof and
     # inserts the values into the global array.
     code += ["", f_comment("Loop dofs and call evaluate_basis_derivatives.")]
-    name  = f_component(f_values, f_index(f_r, f_s, num_vals))
+    name = f_component(f_values, f_index(f_r, f_s, num_vals))
     value = f_component(f_dof_vals, f_s)
-    lines_s  = [f_assign(name, value)]
-    loop_s   = [(f_s, 0, num_vals)]
+    lines_s = [f_assign(name, value)]
+    loop_s = [(f_s, 0, num_vals)]
 
-    lines_r  = [f_derivs(f_r, f_dof_vals)]
+    lines_r = [f_derivs(f_r, f_dof_vals)]
     lines_r += f_loop(lines_s, loop_s)
-    loop_r   = [(f_r, 0, space_dimension)]
-    code    += f_loop(lines_r, loop_r)
+    loop_r = [(f_r, 0, space_dimension)]
+    code += f_loop(lines_r, loop_r)
 
     # Generate bode (no need to remove unused).
     return "\n".join(code)
 
+
 def _evaluate_basis_derivatives(data):
     """Evaluate the derivatives of an element basisfunction at a point. The values are
     computed as in FIAT as the matrix product of the coefficients (computed at compile time),
@@ -155,7 +157,7 @@ def _evaluate_basis_derivatives(data):
     run time and combinations (depending on the order of derivative) of dmats
     tables which hold the derivatives of the expansion coefficients."""
 
-    if isinstance(data, str):
+    if isinstance(data, string_types):
         return format["exception"]("evaluate_basis_derivatives: %s" % data)
 
     # Initialise return code.
@@ -226,9 +228,10 @@ def _evaluate_basis_derivatives(data):
         dof_cases.append(_generate_dof_code(data, dof_data))
     code += [format["switch"](format["argument basis num"], dof_cases)]
     code = remove_unused("\n".join(code))
-    #code = "\n".join(code)
+    # code = "\n".join(code)
     return code
 
+
 def _handle_degree(max_degree):
     """Check value of argument 'n' against the maximum polynomial degree of the
     finite element. If user ask for n>max_degree return an appropriate number
@@ -242,7 +245,7 @@ def _handle_degree(max_degree):
     cond = format["argument derivative order"] + format["is equal"] + format["int"](0)
     val = [format["call basis"](format["argument dof num"], format["argument values"])]
     val += [format["return"]("")]
-    code += [format["if"](cond, indent("\n".join(val),2))]
+    code += [format["if"](cond, indent("\n".join(val), 2))]
 
     # If n > max_degree, derivatives are always zero. Since the appropriate number of
     # zeros have already been inserted into the 'values' array simply return.
@@ -253,6 +256,7 @@ def _handle_degree(max_degree):
 
     return code
 
+
 def _geometry_related_code(data, tdim, gdim, element_cellname):
     code = []
     # Get code snippets for Jacobian, inverse of Jacobian and mapping of
@@ -264,6 +268,7 @@ def _geometry_related_code(data, tdim, gdim, element_cellname):
     code += ["", format["fiat coordinate map"](element_cellname, gdim)]
     return code
 
+
 def _compute_num_derivatives(dimension, suffix=""):
     """Computes the number of derivatives of order 'n' as dimension()^n.
 
@@ -272,8 +277,8 @@ def _compute_num_derivatives(dimension, suffix=""):
     for the number of derivatives in phyisical coordinates.
     """
     # Prefetch formats.
-    f_int         = format["int"]
-    f_num_derivs  = format["num derivatives"](suffix)
+    f_int = format["int"]
+    f_num_derivs = format["num derivatives"](suffix)
 
     # Use loop to compute power since using std::pow() result in an
     # ambiguous call.
@@ -287,21 +292,23 @@ def _compute_num_derivatives(dimension, suffix=""):
 
     return code
 
+
 def _generate_combinations(dimension, suffix, max_degree):
     "Generate all possible combinations of derivatives of order 'n'."
 
     nds = dimension**max_degree
 
     # Use code from format.
-    code = ["", format["combinations"]\
-            % {"combinations": format["derivative combinations"](suffix),\
-               "dimension-1": dimension-1,\
-               "num_derivatives" : format["num derivatives"](suffix),\
+    code = ["", format["combinations"]
+            % {"combinations": format["derivative combinations"](suffix),
+               "dimension-1": dimension - 1,
+               "num_derivatives": format["num derivatives"](suffix),
                "n": format["argument derivative order"],
                "max_num_derivatives":format["int"](nds),
                "max_degree":format["int"](max_degree)}]
     return code
 
+
 def _generate_transform(element_cellname, gdim, tdim, max_degree):
     """Generate the transformation matrix, which is used to transform
     derivatives from reference element back to the physical element."""
@@ -310,24 +317,25 @@ def _generate_transform(element_cellname, gdim, tdim, max_degree):
     max_t_d = tdim**max_degree
     # Generate code to construct the inverse of the Jacobian
     if (element_cellname in ["interval", "triangle", "tetrahedron"]):
-        code = ["", format["transform snippet"][element_cellname][gdim]\
-        % {"transform": format["transform matrix"],\
-           "num_derivatives" : format["num derivatives"](""),\
-           "n": format["argument derivative order"],\
-           "combinations": format["derivative combinations"](""),\
-           "K":format["transform Jinv"],
-           "max_g_deriv":max_g_d, "max_t_deriv":max_t_d}]
+        code = ["", format["transform snippet"][element_cellname][gdim]
+                % {"transform": format["transform matrix"],
+                   "num_derivatives": format["num derivatives"](""),
+                   "n": format["argument derivative order"],
+                   "combinations": format["derivative combinations"](""),
+                   "K":format["transform Jinv"],
+                   "max_g_deriv":max_g_d, "max_t_deriv":max_t_d}]
     else:
         error("Cannot generate transform for shape: %s" % element_cellname)
 
     return code
 
+
 def _reset_values(data, suffix):
     "Reset all components of the 'values' array as it is a pointer to an array."
 
     # Prefetch formats.
-    f_assign  = format["assign"]
-    f_r       = format["free indices"][0]
+    f_assign = format["assign"]
+    f_r = format["free indices"][0]
 
     code = ["", format["comment"]("Reset values. Assuming that values is always an array.")]
 
@@ -347,6 +355,7 @@ def _reset_values(data, suffix):
 
     return code + [""]
 
+
 def _generate_dof_code(data, dof_data):
     "Generate code for a basis."
 
@@ -372,18 +381,19 @@ def _generate_dof_code(data, dof_data):
 
     return code
 
+
 def _tabulate_dmats(dof_data):
     "Tabulate the derivatives of the polynomial base"
 
     code = []
 
     # Prefetch formats to speed up code generation.
-    f_table     = format["static const float declaration"]
-    f_dmats     = format["dmats"]
+    f_table = format["static const float declaration"]
+    f_dmats = format["dmats"]
     f_component = format["component"]
-    f_decl      = format["declaration"]
-    f_tensor    = format["tabulate tensor"]
-    f_new_line  = format["new line"]
+    f_decl = format["declaration"]
+    f_tensor = format["tabulate tensor"]
+    f_new_line = format["new line"]
 
     # Get derivative matrices (coefficients) of basis functions, computed by FIAT at compile time.
     derivative_matrices = dof_data["dmats"]
@@ -398,7 +408,8 @@ def _tabulate_dmats(dof_data):
 
         # Get shape and check dimension (This is probably not needed).
         shape = numpy.shape(matrix)
-        ffc_assert(shape[0] == shape[1] == dof_data["num_expansion_members"], "Something is wrong with the shape of dmats.")
+        if not (shape[0] == shape[1] == dof_data["num_expansion_members"]):
+            error("Something is wrong with the shape of dmats.")
 
         # Declare varable name for coefficients.
         name = f_component(f_dmats(i), [shape[0], shape[1]])
@@ -406,26 +417,28 @@ def _tabulate_dmats(dof_data):
 
     return code
 
+
 def _reset_dmats(shape_dmats, indices):
     "Set values in dmats equal to the identity matrix."
-    f_assign  = format["assign"]
-    f_float   = format["floating point"]
-    i,j = indices
+    f_assign = format["assign"]
+    f_float = format["floating point"]
+    i, j = indices
 
     code = [format["comment"]("Resetting dmats values to compute next derivative.")]
     dmats_old = format["component"](format["dmats"](""), [i, j])
     lines = [f_assign(dmats_old, f_float(0.0))]
-    lines += [format["if"](i + format["is equal"] + j,\
+    lines += [format["if"](i + format["is equal"] + j,
               f_assign(dmats_old, f_float(1.0)))]
     loop_vars = [(i, 0, shape_dmats[0]), (j, 0, shape_dmats[1])]
     code += format["generate loop"](lines, loop_vars)
     return code
 
+
 def _update_dmats(shape_dmats, indices):
     "Update values in dmats_old with values in dmats and set values in dmats to zero."
-    f_assign    = format["assign"]
+    f_assign = format["assign"]
     f_component = format["component"]
-    i,j = indices
+    i, j = indices
 
     code = [format["comment"]("Updating dmats_old with new values and resetting dmats.")]
     dmats = f_component(format["dmats"](""), [i, j])
@@ -435,6 +448,7 @@ def _update_dmats(shape_dmats, indices):
     code += format["generate loop"](lines, loop_vars)
     return code
 
+
 def _compute_dmats(num_dmats, shape_dmats, available_indices, deriv_index, _t):
     "Compute values of dmats as a matrix product."
     f_comment = format["comment"]
@@ -459,9 +473,10 @@ def _compute_dmats(num_dmats, shape_dmats, available_indices, deriv_index, _t):
 
     return code
 
+
 def _dmats_product(shape_dmats, index, i, indices):
     "Create product to update dmats."
-    f_loop      = format["generate loop"]
+    f_loop = format["generate loop"]
     f_component = format["component"]
     t, u = indices
     tu = t + u
@@ -472,45 +487,46 @@ def _dmats_product(shape_dmats, index, i, indices):
     name = format["iadd"](dmats, value)
     lines = f_loop([name], [(tu, 0, shape_dmats[0])])
     loop_vars = [(t, 0, shape_dmats[0]), (u, 0, shape_dmats[1])]
-    code = [format["if"](index + format["is equal"] + str(i),\
+    code = [format["if"](index + format["is equal"] + str(i),
             "\n".join(f_loop(lines, loop_vars)))]
     return code
 
+
 def _compute_reference_derivatives(data, dof_data):
     """Compute derivatives on the reference element by recursively multiply coefficients with
     the relevant derivatives of the polynomial base until the requested order of derivatives
     has been reached. After this take the dot product with the basisvalues."""
 
     # Prefetch formats to speed up code generation
-    f_comment       = format["comment"]
-    f_num_derivs    = format["num derivatives"]
-    f_mul           = format["mul"]
-    f_int           = format["int"]
-    f_matrix_index  = format["matrix index"]
-    f_coefficients  = format["coefficients"]
+    f_comment = format["comment"]
+    f_num_derivs = format["num derivatives"]
+    f_mul = format["mul"]
+    f_int = format["int"]
+    f_matrix_index = format["matrix index"]
+    f_coefficients = format["coefficients"]
 #    f_dof           = format["local dof"]
-    f_basisvalues   = format["basisvalues"]
-    f_const_double  = format["const float declaration"]
-    f_group         = format["grouping"]
-    f_transform     = format["transform"]
-    f_double        = format["float declaration"]
-    f_component     = format["component"]
-    f_tmp           = format["tmp ref value"]
-    f_dmats         = format["dmats"]
-    f_dmats_old     = format["dmats old"]
-    f_assign        = format["assign"]
-    f_decl          = format["declaration"]
-    f_iadd          = format["iadd"]
-    f_add           = format["add"]
-    f_tensor        = format["tabulate tensor"]
-    f_new_line      = format["new line"]
-    f_loop          = format["generate loop"]
-    f_derivatives   = format["reference derivatives"]
-    f_array         = format["dynamic array"]
-    f_float         = format["floating point"]
-    f_inv           = format["inverse"]
-    f_detJ          = format["det(J)"]
-    f_inner         = format["inner product"]
+    f_basisvalues = format["basisvalues"]
+    f_const_double = format["const float declaration"]
+    f_group = format["grouping"]
+    f_transform = format["transform"]
+    f_double = format["float declaration"]
+    f_component = format["component"]
+    f_tmp = format["tmp ref value"]
+    f_dmats = format["dmats"]
+    f_dmats_old = format["dmats old"]
+    f_assign = format["assign"]
+    f_decl = format["declaration"]
+    f_iadd = format["iadd"]
+    f_add = format["add"]
+    f_tensor = format["tabulate tensor"]
+    f_new_line = format["new line"]
+    f_loop = format["generate loop"]
+    f_derivatives = format["reference derivatives"]
+    f_array = format["dynamic array"]
+    f_float = format["floating point"]
+    f_inv = format["inverse"]
+    f_detJ = format["det(J)"]
+    f_inner = format["inner product"]
 
     f_r, f_s, f_t, f_u = format["free indices"]
 
@@ -531,8 +547,8 @@ def _compute_reference_derivatives(data, dof_data):
     # Get shape of derivative matrix (they should all have the same shape) and
     # verify that it is a square matrix.
     shape_dmats = numpy.shape(dof_data["dmats"][0])
-    ffc_assert(shape_dmats[0] == shape_dmats[1],\
-               "Something is wrong with the dmats:\n%s" % str(dof_data["dmats"]))
+    if shape_dmats[0] != shape_dmats[1]:
+        error("Something is wrong with the dmats:\n%s" % str(dof_data["dmats"]))
 
     code = [f_comment("Compute reference derivatives.")]
 
@@ -545,9 +561,9 @@ def _compute_reference_derivatives(data, dof_data):
     else:
         num_vals = f_mul([f_int(num_components), f_num_derivs(_t)])
 
-    nds = tdim**max_degree*num_components
+    nds = tdim**max_degree * num_components
     code += [format["declaration"](f_double, f_component(f_derivatives, f_int(nds)))]
-    line  = [f_assign(f_component(f_derivatives, f_r), f_float(0.0))]
+    line = [f_assign(f_component(f_derivatives, f_r), f_float(0.0))]
     code += f_loop(line, [(f_r, 0, nds)])
     code += [""]
 
@@ -559,9 +575,9 @@ def _compute_reference_derivatives(data, dof_data):
         _p = "_p"
         num_components_p = gdim
 
-        nds = tdim**max_degree*gdim
-        code += [format["declaration"](f_double, f_component(f_derivatives+_p, f_int(nds)))]
-        line  = [f_assign(f_component(f_derivatives+_p, f_r), f_float(0.0))]
+        nds = tdim**max_degree * gdim
+        code += [format["declaration"](f_double, f_component(f_derivatives + _p, f_int(nds)))]
+        line = [f_assign(f_component(f_derivatives + _p, f_r), f_float(0.0))]
         code += f_loop(line, [(f_r, 0, nds)])
         code += [""]
     else:
@@ -589,17 +605,17 @@ def _compute_reference_derivatives(data, dof_data):
         dmats = f_component(f_dmats(""), [f_s, f_t])
         basis = f_component(f_basisvalues, f_t)
         lines_c.append(f_iadd(name, f_mul([coeffs, dmats, basis])))
-    loop_vars_c = [(f_s, 0, shape_dmats[0]),(f_t, 0, shape_dmats[1])]
+    loop_vars_c = [(f_s, 0, shape_dmats[0]), (f_t, 0, shape_dmats[1])]
     lines += f_loop(lines_c, loop_vars_c)
 
     # Apply transformation if applicable.
     if mapping == "affine":
         pass
     elif mapping == "contravariant piola":
-        lines += ["", f_comment\
-                ("Using contravariant Piola transform to map values back to the physical element.")]
+        lines += ["", f_comment
+                  ("Using contravariant Piola transform to map values back to the physical element.")]
         # Get temporary values before mapping.
-        lines += [f_const_double(f_tmp(i),\
+        lines += [f_const_double(f_tmp(i),
                   f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t)))) for i in range(num_components)]
 
         # Create names for inner product.
@@ -612,13 +628,13 @@ def _compute_reference_derivatives(data, dof_data):
             inner = [f_mul([jacobian_row[j], basis_col[j]]) for j in range(tdim)]
             sum_ = f_group(f_add(inner))
             value = f_mul([f_inv(f_detJ(None)), sum_])
-            name = f_component(f_derivatives+_p, f_matrix_index(i, f_r, f_num_derivs(_t)))
+            name = f_component(f_derivatives + _p, f_matrix_index(i, f_r, f_num_derivs(_t)))
             lines += [f_assign(name, value)]
     elif mapping == "covariant piola":
-        lines += ["", f_comment\
-                ("Using covariant Piola transform to map values back to the physical element")]
+        lines += ["", f_comment
+                  ("Using covariant Piola transform to map values back to the physical element")]
         # Get temporary values before mapping.
-        lines += [f_const_double(f_tmp(i),\
+        lines += [f_const_double(f_tmp(i),
                   f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t)))) for i in range(num_components)]
         # Create names for inner product.
         basis_col = [f_tmp(j) for j in range(tdim)]
@@ -629,13 +645,13 @@ def _compute_reference_derivatives(data, dof_data):
             # Create inner product of basis and inverse of Jacobian.
             inner = [f_mul([inv_jacobian_column[j], basis_col[j]]) for j in range(tdim)]
             value = f_group(f_add(inner))
-            name = f_component(f_derivatives+_p, f_matrix_index(i, f_r, f_num_derivs(_t)))
+            name = f_component(f_derivatives + _p, f_matrix_index(i, f_r, f_num_derivs(_t)))
             lines += [f_assign(name, value)]
-    elif mapping == "pullback as metric":
-        lines += ["", f_comment("Using metric pullback to map values back to the physical element")]
+    elif mapping == "double covariant piola":
+        code += ["", f_comment("Using double covariant Piola transform to map values back to the physical element")]
         lines += [f_const_double(f_tmp(i),
-                                f_component(f_derivatives,
-                                            f_matrix_index(i, f_r, f_num_derivs(_t))))
+                                 f_component(f_derivatives,
+                                             f_matrix_index(i, f_r, f_num_derivs(_t))))
                   for i in range(num_components)]
         basis_col = [f_tmp(j) for j in range(num_components)]
         for p in range(num_components):
@@ -650,7 +666,31 @@ def _compute_reference_derivatives(data, dof_data):
                  for k in range(tdim)],
                 [f_transform("JINV", k, l, tdim, gdim, None)
                  for k in range(tdim)]))
-            name = f_component(f_derivatives+_p, f_matrix_index(p, f_r, f_num_derivs(_t)))
+            name = f_component(f_derivatives + _p, f_matrix_index(p, f_r, f_num_derivs(_t)))
+            lines += [f_assign(name, value)]
+    elif mapping == "double contravariant piola":
+        code += ["", f_comment("Using double contravariant Piola transform to map values back to the physical element.")]
+        lines += [f_const_double(
+            f_tmp(i),
+            f_component(f_derivatives,
+                        f_matrix_index(i, f_r, f_num_derivs(_t))))
+                  for i in range(num_components)]
+        basis_col = [f_tmp(j) for j in range(num_components)]
+        for p in range(num_components):
+            # unflatten the indices
+            i = p // tdim
+            l = p % tdim
+            # g_il = (det J)^(-2) Jij G_jk Jlk
+            value = f_group(f_inner(
+                [f_inner([f_transform("J", i, j, tdim, gdim, None)
+                          for j in range(tdim)],
+                         [basis_col[j * tdim + k] for j in range(tdim)])
+                 for k in range(tdim)],
+                [f_transform("J", l, k, tdim, gdim, None)
+                 for k in range(tdim)]))
+            value = f_mul([f_inv(f_detJ(None)), f_inv(f_detJ(None)), value])
+            name = f_component(f_derivatives+_p,
+                               f_matrix_index(p, f_r, f_num_derivs(_t)))
             lines += [f_assign(name, value)]
     else:
         error("Unknown mapping: %s" % mapping)
@@ -664,23 +704,24 @@ def _compute_reference_derivatives(data, dof_data):
 
     return code + [""]
 
+
 def _transform_derivatives(data, dof_data):
     """Transform derivatives back to the physical element by applying the
     transformation matrix."""
 
     # Prefetch formats to speed up code generation.
-    f_loop        = format["generate loop"]
-    f_num_derivs  = format["num derivatives"]
+    f_loop = format["generate loop"]
+    f_num_derivs = format["num derivatives"]
     f_derivatives = format["reference derivatives"]
-    f_values      = format["argument values"]
-    f_mul         = format["mul"]
-    f_iadd        = format["iadd"]
-    f_component   = format["component"]
-    f_transform   = format["transform matrix"]
-    f_r, f_s      = format["free indices"][:2]
-    f_index       = format["matrix index"]
-
-    if data["topological_dimension"]==data["geometric_dimension"]:
+    f_values = format["argument values"]
+    f_mul = format["mul"]
+    f_iadd = format["iadd"]
+    f_component = format["component"]
+    f_transform = format["transform matrix"]
+    f_r, f_s = format["free indices"][:2]
+    f_index = format["matrix index"]
+
+    if data["topological_dimension"] == data["geometric_dimension"]:
         _t = ""
         _g = ""
     else:
@@ -691,7 +732,7 @@ def _transform_derivatives(data, dof_data):
     num_components = dof_data["num_components"]
     reference_offset = dof_data["reference_offset"]
     physical_offset = dof_data["physical_offset"]
-    offset = reference_offset # physical_offset # FIXME: Should be physical offset but that breaks tests
+    offset = reference_offset  # physical_offset # FIXME: Should be physical offset but that breaks tests
 
     mapping = dof_data["mapping"]
     if "piola" in mapping:
@@ -709,7 +750,7 @@ def _transform_derivatives(data, dof_data):
         access_name = f_index(offset + i, f_r, f_num_derivs(_g))
         name = f_component(f_values, access_name)
         access_val = f_index(i, f_s, f_num_derivs(_t))
-        value = f_mul([f_component(f_transform, [f_r, f_s]), f_component(f_derivatives+_p, access_val)])
+        value = f_mul([f_component(f_transform, [f_r, f_s]), f_component(f_derivatives + _p, access_val)])
         lines += [f_iadd(name, value)]
 
     loop_vars = [(f_r, 0, f_num_derivs(_g)), (f_s, 0, f_num_derivs(_t))]
diff --git a/ffc/evaluatedof.py b/ffc/evaluatedof.py
index 36cf5b2..1a959b3 100644
--- a/ffc/evaluatedof.py
+++ b/ffc/evaluatedof.py
@@ -1,3 +1,26 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2009-2016 Marie E. Rognes
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# Modified by Kristian B. Oelgaard 2010-2011
+# Modified by Anders Logg 2013
+# Modified by Lizao Li 2015, 2016
+
 """Code generation for evaluate_dof.
 
 This module generates the functions evaluate_dof and evaluate_dofs.
@@ -24,29 +47,8 @@ FIAT (functional.pt_dict) in the intermediate representation stage.
 
 """
 
-# Copyright (C) 2009 Marie E. Rognes
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Kristian B. Oelgaard 2010-2011
-# Modified by Anders Logg 2013
-# Modified by Lizao Li 2015
-#
-# First added:  2009-xx-yy
-# Last changed: 2015-03-20
+from collections import OrderedDict
+import six
 
 from ffc.cpp import format, remove_unused
 from ffc.utils import pick_first
@@ -55,35 +57,36 @@ from ufl.permutation import build_component_numbering
 __all__ = ["evaluate_dof_and_dofs", "affine_weights"]
 
 # Prefetch formats:
-comment =   format["comment"]
-declare =   format["declaration"]
-assign =    format["assign"]
+comment = format["comment"]
+declare = format["declaration"]
+assign = format["assign"]
 component = format["component"]
-iadd =      format["iadd"]
-inner =     format["inner product"]
-add =       format["addition"]
-multiply =  format["multiply"]
-J =         format["J"]
-Jinv =      format["inv(J)"]
-detJ =      format["det(J)"](None)
-ret =       format["return"]
-f_i =       format["argument dof num"]
-f_values =  format["argument values"]
-f_double =  format["float declaration"]
-f_vals =    format["dof vals"]
-f_result =  format["dof result"]
-f_y =       format["dof physical coordinates"]
-f_x =       format["coordinate_dofs"]
-f_int =     format["int declaration"]
-f_X =       format["dof X"]
-f_D =       format["dof D"]
-f_W =       format["dof W"]
-f_copy =    format["dof copy"]
-f_r, f_s =  format["free indices"][:2]
-f_loop =    format["generate loop"]
+iadd = format["iadd"]
+inner = format["inner product"]
+add = format["addition"]
+multiply = format["multiply"]
+J = format["J"]
+Jinv = format["inv(J)"]
+detJ = format["det(J)"](None)
+ret = format["return"]
+f_i = format["argument dof num"]
+f_values = format["argument values"]
+f_double = format["float declaration"]
+f_vals = format["dof vals"]
+f_result = format["dof result"]
+f_y = format["dof physical coordinates"]
+f_x = format["coordinate_dofs"]
+f_int = format["int declaration"]
+f_X = format["dof X"]
+f_D = format["dof D"]
+f_W = format["dof W"]
+f_copy = format["dof copy"]
+f_r, f_s = format["free indices"][:2]
+f_loop = format["generate loop"]
 
 map_onto_physical = format["map onto physical"]
 
+
 def evaluate_dof_and_dofs(ir):
     "Generate code for evaluate_dof and evaluate_dof."
 
@@ -94,13 +97,22 @@ def evaluate_dof_and_dofs(ir):
     dof_cases = ["%s\n%s" % (c, ret(r)) for (c, r) in cases]
     dof_code = reqs + format["switch"](f_i, dof_cases, ret(format["float"](0.0)))
 
+    # Construct dict with eval code as keys to remove duplicate eval code
+    cases_opt = OrderedDict((case[0], []) for case in cases)
+    for i, (evl, res) in enumerate(cases):
+        cases_opt[evl].append((i, res))
+
     # Combine each case with assignments for evaluate_dofs
-    dofs_cases = "\n".join("%s\n%s" % (c, format["assign"](component(f_values, i), r))
-                           for (i, (c, r)) in enumerate(cases))
-    dofs_code = reqs + dofs_cases
+    dofs_code = reqs
+    for evl, results in six.iteritems(cases_opt):
+        dofs_code += evl + "\n"
+        for i, res in results:
+            dofs_code += format["assign"](component(f_values, i), res) + "\n"
+    dofs_code = dofs_code.rstrip("\n")
 
     return (dof_code, dofs_code)
 
+
 def _generate_common_code(ir):
 
     # Define necessary geometry information based on the ir
@@ -108,7 +120,7 @@ def _generate_common_code(ir):
 
     # Extract variables
     mappings = ir["mappings"]
-    offsets  = ir["physical_offsets"]
+    offsets = ir["physical_offsets"]
     gdim = ir["geometric_dimension"]
     tdim = ir["topological_dimension"]
 
@@ -118,6 +130,7 @@ def _generate_common_code(ir):
 
     return (reqs, cases)
 
+
 def _required_declarations(ir):
     """Generate code for declaring required variables and geometry
     information.
@@ -141,14 +154,16 @@ def _required_declarations(ir):
     # Check whether Jacobians are necessary.
     needs_inverse_jacobian = any(["contravariant piola" in m
                                   for m in ir["mappings"]])
-    needs_jacobian = any(["covariant piola" in m for m in ir["mappings"]]) or any(["pullback as metric" in m for m in ir["mappings"]])
+    needs_jacobian = any(["covariant piola" in m for m in ir["mappings"]])
 
     # Check if Jacobians are needed
     if not (needs_jacobian or needs_inverse_jacobian):
         return "\n".join(code)
 
-    # Otherwise declare intermediate result variable
-    code.append(declare(f_double, f_result))
+    # Intermediate variable needed for multiple point dofs
+    needs_temporary = any(len(dof) > 1 for dof in ir["dofs"])
+    if needs_temporary:
+        code.append(declare(f_double, f_result))
 
     # Add sufficient Jacobian information. Note: same criterion for
     # needing inverse Jacobian as for needing oriented Jacobian
@@ -161,6 +176,7 @@ def _required_declarations(ir):
 
     return "\n".join(code)
 
+
 def _generate_body(i, dof, mapping, gdim, tdim, offset=0, result=f_result):
     "Generate code for a single dof."
 
@@ -185,7 +201,7 @@ def _generate_body(i, dof, mapping, gdim, tdim, offset=0, result=f_result):
     # Map point onto physical element: y = F_K(x)
     code = []
     for j in range(gdim):
-        y = inner(w, [component(f_x(), (k*gdim + j,)) for k in range(tdim + 1)])
+        y = inner(w, [component(f_x(), (k * gdim + j,)) for k in range(tdim + 1)])
         code.append(assign(component(f_y, j), y))
 
     # Evaluate function at physical point
@@ -204,18 +220,16 @@ def _generate_body(i, dof, mapping, gdim, tdim, offset=0, result=f_result):
 
     # Take inner product between components and weights
     value = add([multiply([w, F[index_map[k]]]) for (w, k) in dof[x]])
-    
-    # Assign value to result variable
-    code.append(assign(result, value))
-    return ("\n".join(code), result)
+
+    # Return eval code and value
+    return ("\n".join(code), value)
 
 
 def _generate_multiple_points_body(i, dof, mapping, gdim, tdim,
                                    offset=0, result=f_result):
-
     "Generate c++ for-loop for multiple points (integral bodies)"
 
-    code = [assign(f_result, 0.0)]
+    code = [assign(result, 0.0)]
     points = list(dof.keys())
     n = len(points)
 
@@ -245,7 +259,7 @@ def _generate_multiple_points_body(i, dof, mapping, gdim, tdim,
     code += [comment("Loop over points")]
 
     # Map the points from the reference onto the physical element
-    #assert(gdim == tdim), \
+    # assert(gdim == tdim), \
     #    "Integral moments not supported for manifolds (yet). Please fix"
     lines_r = [map_onto_physical[tdim][gdim] % {"i": i, "j": f_r}]
 
@@ -265,7 +279,7 @@ def _generate_multiple_points_body(i, dof, mapping, gdim, tdim,
                                 component(f_D(i), (f_r, f_s))),
                       component(f_W(i), (f_r, f_s))])
     # Add value from this point to total result
-    lines_s = [iadd(f_result, value)]
+    lines_s = [iadd(result, value)]
 
     # Generate loop over s and add to r.
     loop_vars_s = [(f_s, 0, len_tokens)]
@@ -278,6 +292,7 @@ def _generate_multiple_points_body(i, dof, mapping, gdim, tdim,
     code = "\n".join(code)
     return code
 
+
 def _change_variables(mapping, gdim, tdim, offset):
     """Generate code for mapping function values according to
     'mapping' and offset.
@@ -309,9 +324,14 @@ def _change_variables(mapping, gdim, tdim, offset):
 
       G(X) = J^T g(x)          i.e  G_i(X) = J^T_ij g(x) = J_ji g_j(x)
 
-    'pullback as metric' mapping for g:
+    'double covariant piola' mapping for g:
 
       G(X) = J^T g(x) J     i.e. G_il(X) = J_ji g_jk(x) J_kl
+
+    'double contravariant piola' mapping for g:
+
+      G(X) = det(J)^2 K g(x) K^T  i.e. G_il(X)=(detJ)^2 K_ij g_jk K_lk
+
     """
 
     # meg: Various mappings must be handled both here and in
@@ -340,8 +360,8 @@ def _change_variables(mapping, gdim, tdim, offset):
             values += [inner(jacobian_column, components)]
         return values
 
-    elif mapping == "pullback as metric":
-        # physical to reference pullback as a metric
+    elif mapping == "double covariant piola":
+        # physical to reference pullback as a covariant 2-tensor
         values = []
         for i in range(tdim):
             for l in range(tdim):
@@ -351,11 +371,22 @@ def _change_variables(mapping, gdim, tdim, offset):
                             for j in range(gdim)]) for k in range(gdim)],
                     [J(k, l, gdim, tdim) for k in range(gdim)])]
         return values
-    
+
+    elif mapping == "double contravariant piola":
+        # physical to reference using double contravariant piola
+        values = []
+        for i in range(tdim):
+            for l in range(tdim):
+                values += [multiply([detJ, detJ, inner(
+                    [inner([Jinv(i, j, gdim, tdim) for j in range(gdim)],
+                           [component(f_vals, j * tdim + k + offset)
+                            for j in range(gdim)]) for k in range(gdim)],
+                    [Jinv(l, k, gdim, tdim) for k in range(gdim)])])]
+        return values
+
     else:
         raise Exception("The mapping (%s) is not allowed" % mapping)
 
-    return code
 
 def affine_weights(dim):
     "Compute coefficents for mapping from reference to physical element"
diff --git a/ffc/extras.py b/ffc/extras.py
index 0fd1c95..65f9382 100644
--- a/ffc/extras.py
+++ b/ffc/extras.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This modules provides additional functionality for users of FFC."
 
 # Copyright (C) 2010 Anders Logg
@@ -33,6 +34,7 @@ from ffc.representation import compute_ir
 from ffc.optimization import optimize_ir
 from ffc.codegeneration import generate_code
 
+
 def compute_tensor_representation(form):
     """Compute tensor representation for given form. This function may
     be useful for those (Hi Matt!) that want to access the FFC tensor
@@ -41,7 +43,7 @@ def compute_tensor_representation(form):
     # Set parameters
     parameters = default_parameters()
     parameters["representation"] = "tensor"
-    #parameters["optimize"] = "optimize"
+    # parameters["optimize"] = "optimize"
 
     # The below steps basically duplicate the compiler process but
     # skip the code formatting step. Instead, we extract the relevant
@@ -75,13 +77,16 @@ def compute_tensor_representation(form):
     for i in ir_integrals:
         if i["integral_type"] == "cell":
             t = [A0.A0 for (A0, GK, dummy) in i["AK"]]
-            if len(t) == 1: t = t[0]
+            if len(t) == 1:
+                t = t[0]
         elif i["integral_type"] == "exterior_facet":
             t = [A0.A0 for j in i["AK"] for (A0, GK, dummy) in j]
-            if len(t) == 1: t = t[0]
+            if len(t) == 1:
+                t = t[0]
         elif i["integral_type"] == "interior_facet":
             t = [A0.A0 for j in i["AK"] for k in j for (A0, GK, dummy) in k]
-            if len(t) == 1: t = t[0]
+            if len(t) == 1:
+                t = t[0]
         else:
             raise RuntimeError("Unhandled domain type: %s" % str(i["integral_type"]))
         reference_tensors.append(t)
diff --git a/ffc/fiatinterface.py b/ffc/fiatinterface.py
index 47cfd36..9df6f06 100644
--- a/ffc/fiatinterface.py
+++ b/ffc/fiatinterface.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2009-2013 Kristian B. Oelgaard and Anders Logg
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2009-2016 Kristian B. Oelgaard and Anders Logg
 #
 # This file is part of FFC.
 #
@@ -17,24 +19,23 @@
 #
 # Modified by Garth N. Wells, 2009.
 # Modified by Marie Rognes, 2009-2013.
-# Modified by Martin Alnaes, 2013
-# Modified by Lizao Li, 2015
+# Modified by Martin Sandve Alnæs, 2013
+# Modified by Lizao Li, 2015, 2016
 
 # Python modules
-from numpy import array
 import six
+import numpy
+from numpy import array
 
 # UFL and FIAT modules
 import ufl
 from ufl.utils.sorting import sorted_by_key
-import FIAT
 
-from FIAT.trace import DiscontinuousLagrangeTrace
+import FIAT
+from FIAT.hdiv_trace import HDivTrace
 
 # FFC modules
-from ffc.log import debug, error, ffc_assert
-from ffc.quadratureelement import QuadratureElement as FFCQuadratureElement
-
+from ffc.log import debug, error
 
 from ffc.mixedelement import MixedElement
 from ffc.restrictedelement import RestrictedElement
@@ -49,7 +50,7 @@ supported_families = ("Brezzi-Douglas-Marini",
                       "Crouzeix-Raviart",
                       "Discontinuous Lagrange",
                       "Discontinuous Raviart-Thomas",
-                      "Discontinuous Lagrange Trace",
+                      "HDiv Trace",
                       "Lagrange",
                       "Lobatto",
                       "Nedelec 1st kind H(curl)",
@@ -59,22 +60,26 @@ supported_families = ("Brezzi-Douglas-Marini",
                       "Real",
                       "Bubble",
                       "Quadrature",
-                      "Regge")
+                      "Regge",
+                      "Hellan-Herrmann-Johnson")
 
 # Cache for computed elements
 _cache = {}
 
+
 def reference_cell(dim):
     if isinstance(dim, int):
         return FIAT.ufc_simplex(dim)
     else:
         return FIAT.ufc_simplex(cellname2dim[dim])
 
+
 def reference_cell_vertices(dim):
     "Return dict of coordinates of reference cell vertices for this 'dim'."
     cell = reference_cell(dim)
     return cell.get_vertices()
 
+
 def create_element(ufl_element):
 
     # Create element signature for caching (just use UFL element)
@@ -111,6 +116,7 @@ def create_element(ufl_element):
 
     return element
 
+
 def _create_fiat_element(ufl_element):
     "Create FIAT element corresponding to given finite element."
 
@@ -121,8 +127,8 @@ def _create_fiat_element(ufl_element):
     degree = ufl_element.degree()
 
     # Check that FFC supports this element
-    ffc_assert(family in supported_families,
-               "This element family (%s) is not supported by FFC." % family)
+    if family not in supported_families:
+        error("This element family (%s) is not supported by FFC." % family)
 
     # Handle the space of the constant
     if family == "Real":
@@ -133,7 +139,7 @@ def _create_fiat_element(ufl_element):
     # FIXME: AL: Should this really be here?
     # Handle QuadratureElement
     elif family == "Quadrature":
-        element = FFCQuadratureElement(ufl_element)
+        element = QuadratureElement(ufl_element)
 
     else:
         # Create FIAT cell
@@ -146,7 +152,7 @@ def _create_fiat_element(ufl_element):
             return RestrictedElement(V, _indices(V, "interior", tdim), None)
 
         # Check if finite element family is supported by FIAT
-        if not family in FIAT.supported_elements:
+        if family not in FIAT.supported_elements:
             error("Sorry, finite element of type \"%s\" are not supported by FIAT.", family)
 
         # Create FIAT finite element
@@ -157,26 +163,60 @@ def _create_fiat_element(ufl_element):
             element = ElementClass(fiat_cell, degree)
 
     # Consistency check between UFL and FIAT elements.
-    ffc_assert(element.value_shape() == ufl_element.reference_value_shape(),
-               "Something went wrong in the construction of FIAT element from UFL element." +
-               "Shapes are %s and %s." % (element.value_shape(), ufl_element.reference_value_shape()))
+    if element.value_shape() != ufl_element.reference_value_shape():
+        error("Something went wrong in the construction of FIAT element from UFL element." +
+              "Shapes are %s and %s." % (element.value_shape(), ufl_element.reference_value_shape()))
 
     return element
 
-def create_quadrature(shape, num_points):
+
+def create_quadrature(shape, degree, scheme="default"):
     """
-    Generate quadrature rule (points, weights) for given shape with
-    num_points points in each direction.
+    Generate quadrature rule (points, weights) for given shape
+    that will integrate an polynomial of order 'degree' exactly.
     """
-
     if isinstance(shape, int) and shape == 0:
-        return ([()], array([1.0,]))
+        return (numpy.zeros((1, 0)), numpy.ones((1,)))
 
     if shape in cellname2dim and cellname2dim[shape] == 0:
-        return ([()], array([1.0,]))
+        return (numpy.zeros((1, 0)), numpy.ones((1,)))
+
+    if scheme == "vertex":
+        # The vertex scheme, i.e., averaging the function value in the vertices
+        # and multiplying with the simplex volume, is only of order 1 and
+        # inferior to other generic schemes in terms of error reduction.
+        # Equation systems generated with the vertex scheme have some
+        # properties that other schemes lack, e.g., the mass matrix is
+        # a simple diagonal matrix. This may be prescribed in certain cases.
+        if degree > 1:
+            from warnings import warn
+            warn(("Explicitly selected vertex quadrature (degree 1), "
+                 + "but requested degree is %d.") % degree)
+        if shape == "tetrahedron":
+            return (array([[0.0, 0.0, 0.0],
+                           [1.0, 0.0, 0.0],
+                           [0.0, 1.0, 0.0],
+                           [0.0, 0.0, 1.0]]),
+                    array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0])
+                    )
+        elif shape == "triangle":
+            return (array([[0.0, 0.0],
+                           [1.0, 0.0],
+                           [0.0, 1.0]]),
+                    array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])
+                    )
+        elif shape == "interval":
+            # Trapezoidal rule.
+            return (array([[0.0],
+                           [1.0]]),
+                    array([1.0 / 2.0, 1.0 / 2.0])
+                    )
+
+    quad_rule = FIAT.create_quadrature(reference_cell(shape), degree, scheme)
+    points = numpy.asarray(quad_rule.get_points())
+    weights = numpy.asarray(quad_rule.get_weights())
+    return points, weights
 
-    quad_rule = FIAT.make_quadrature(reference_cell(shape), num_points)
-    return quad_rule.get_points(), quad_rule.get_weights()
 
 def map_facet_points(points, facet):
     """
@@ -200,15 +240,15 @@ def map_facet_points(points, facet):
     # Extract vertex coordinates from cell and map of facet index to
     # indicent vertex indices
     coordinate_dofs = fiat_cell.get_vertices()
-    facet_vertices = fiat_cell.get_topology()[dim-1]
+    facet_vertices = fiat_cell.get_topology()[dim - 1]
 
-    #coordinate_dofs = \
+    # coordinate_dofs = \
     #    {1: ((0.,), (1.,)),
     #     2: ((0., 0.), (1., 0.), (0., 1.)),
     #     3: ((0., 0., 0.), (1., 0., 0.),(0., 1., 0.), (0., 0., 1))}
 
     # Facet vertices
-    #facet_vertices = \
+    # facet_vertices = \
     #    {2: ((1, 2), (0, 2), (0, 1)),
     #     3: ((1, 2, 3), (0, 2, 3), (0, 1, 3), (0, 1, 2))}
 
@@ -217,11 +257,12 @@ def map_facet_points(points, facet):
     new_points = []
     for point in points:
         w = (1.0 - sum(point),) + tuple(point)
-        x = tuple(sum([w[i]*array(coordinates[i]) for i in range(len(w))]))
+        x = tuple(sum([w[i] * array(coordinates[i]) for i in range(len(w))]))
         new_points += [x]
 
     return new_points
 
+
 def _extract_elements(ufl_element, restriction_domain=None):
     "Recursively extract un-nested list of (component) elements."
 
@@ -244,6 +285,7 @@ def _extract_elements(ufl_element, restriction_domain=None):
 
     return elements
 
+
 def _create_restricted_element(ufl_element):
     "Create an FFC representation for an UFL RestrictedElement."
 
@@ -266,6 +308,7 @@ def _create_restricted_element(ufl_element):
 
     error("Cannot create restricted element from %s" % str(ufl_element))
 
+
 def _indices(element, restriction_domain, tdim):
     "Extract basis functions indices that correspond to restriction_domain."
 
@@ -281,7 +324,7 @@ def _indices(element, restriction_domain, tdim):
     # the topological degree of the restriction_domain and of all lower
     # dimensions.
     if restriction_domain == "facet":
-        rdim = tdim-1
+        rdim = tdim - 1
     elif restriction_domain == "face":
         rdim = 2
     elif restriction_domain == "edge":
@@ -298,3 +341,6 @@ def _indices(element, restriction_domain, tdim):
         for (entity, index) in sorted_by_key(entities):
             indices += index
     return indices
+
+# Import FFC module with circular dependency
+from ffc.quadratureelement import QuadratureElement
diff --git a/ffc/formatting.py b/ffc/formatting.py
index 99426ad..306a3db 100644
--- a/ffc/formatting.py
+++ b/ffc/formatting.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 Compiler stage 5: Code formatting
 ---------------------------------
@@ -9,7 +10,7 @@ It relies on templates for UFC code available as part of the module
 ufc_utils.
 """
 
-# Copyright (C) 2009-2015 Anders Logg
+# Copyright (C) 2009-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -37,6 +38,44 @@ from ffc.cpp import format, make_classname
 from ffc.backends.ufc import templates, visibility_snippet, factory_decl, factory_impl
 from ffc.parameters import compilation_relevant_parameters
 
+
+def generate_factory_functions(prefix, kind, classname):
+    publicname = make_classname(prefix, kind, "main")
+    code_h = factory_decl % {
+        "basename": "ufc::%s" % kind,
+        "publicname": publicname,
+        }
+    code_c = factory_impl % {
+        "basename": "ufc::%s" % kind,
+        "publicname": publicname,
+        "privatename": classname
+        }
+    return code_h, code_c
+
+
+def generate_jit_factory_functions(code, prefix):
+    # Extract code
+    code_elements, code_dofmaps, code_coordinate_mappings, code_integrals, code_forms = code
+
+    if code_forms:
+        # Direct jit of form
+        code_h, code_c = generate_factory_functions(
+            prefix, "form", code_forms[-1]["classname"])
+    elif code_coordinate_mappings:
+        # Direct jit of coordinate mapping
+        code_h, code_c = generate_factory_functions(
+            prefix, "coordinate_mapping", code_coordinate_mappings[-1]["classname"])
+    else:
+        # Direct jit of element
+        code_h, code_c = generate_factory_functions(
+            prefix, "finite_element", code_elements[-1]["classname"])
+        fh, fc = generate_factory_functions(
+            prefix, "dofmap", code_dofmaps[-1]["classname"])
+        code_h += fh
+        code_c += fc
+    return code_h, code_c
+
+
 def format_code(code, wrapper_code, prefix, parameters, jit=False):
     "Format given code in UFC format. Returns two strings with header and source file contents."
 
@@ -76,7 +115,6 @@ def format_code(code, wrapper_code, prefix, parameters, jit=False):
         code_c += _format_c("dofmap", code_dofmap, parameters, jit)
 
     # Generate code for coordinate_mappings
-    code_coordinate_mappings = [] # FIXME: This disables output of generated coordinate_mapping class, until implemented properly
     for code_coordinate_mapping in code_coordinate_mappings:
         code_h += _format_h("coordinate_mapping", code_coordinate_mapping, parameters)
         code_c += _format_c("coordinate_mapping", code_coordinate_mapping, parameters)
@@ -91,29 +129,12 @@ def format_code(code, wrapper_code, prefix, parameters, jit=False):
         code_h += _format_h("form", code_form, parameters, jit)
         code_c += _format_c("form", code_form, parameters, jit)
 
-    if jit and not code_forms:
-        kind = "finite_element"
-        pub = make_classname(prefix, kind, "main")
-        code_h += factory_decl % {
-            "basename": "ufc::%s" % kind,
-            "publicname": pub,
-            }
-        code_c += factory_impl % {
-            "basename": "ufc::%s" % kind,
-            "publicname": pub,
-            "privatename": code_elements[-1]["classname"]
-            }
-        kind = "dofmap"
-        pub = make_classname(prefix, kind, "main")
-        code_h += factory_decl % {
-            "basename": "ufc::%s" % kind,
-            "publicname": pub,
-            }
-        code_c += factory_impl % {
-            "basename": "ufc::%s" % kind,
-            "publicname": pub,
-            "privatename": code_dofmaps[-1]["classname"]
-            }
+    # Add factory functions named "..._main" to construct
+    # the main jit object this module
+    if 0: # jit: # should be part of templates now, right? FIXME Remove?
+        fh, fc = generate_jit_factory_functions(code, prefix)
+        code_h += fh
+        code_c += fc
 
     # Add wrappers
     if wrapper_code:
@@ -131,12 +152,14 @@ def format_code(code, wrapper_code, prefix, parameters, jit=False):
 
     return code_h, code_c
 
+
 def write_code(code_h, code_c, prefix, parameters):
     # Write file(s)
     _write_file(code_h, prefix, ".h", parameters)
     if code_c:
         _write_file(code_c, prefix, ".cpp", parameters)
 
+
 def _format_h(class_type, code, parameters, jit=False):
     "Format header code for given class type."
     if jit:
@@ -146,6 +169,7 @@ def _format_h(class_type, code, parameters, jit=False):
     else:
         return templates[class_type + "_combined"] % code + "\n"
 
+
 def _format_c(class_type, code, parameters, jit=False):
     "Format implementation code for given class type."
     if jit:
@@ -155,6 +179,7 @@ def _format_c(class_type, code, parameters, jit=False):
     else:
         return ""
 
+
 def _write_file(output, prefix, postfix, parameters):
     "Write generated code to file."
     filename = os.path.join(parameters["output_dir"], prefix + postfix)
@@ -162,6 +187,7 @@ def _write_file(output, prefix, postfix, parameters):
         hfile.write(output)
     info("Output written to " + filename + ".")
 
+
 def _generate_comment(parameters):
     "Generate code for comment on top of file."
 
@@ -186,13 +212,14 @@ def _generate_comment(parameters):
 
     return comment
 
+
 def _generate_additional_includes(code):
     s = set()
     s.add("#include <ufc.h>")
 
     for code_foo in code:
         # FIXME: Avoid adding these includes if we don't need them
-        #s.add("#include <cmath>")
+        # s.add("#include <cmath>")
         s.add("#include <stdexcept>")
 
         for c in code_foo:
diff --git a/ffc/interpolatevertexvalues.py b/ffc/interpolatevertexvalues.py
index 0fa6624..a301dda 100644
--- a/ffc/interpolatevertexvalues.py
+++ b/ffc/interpolatevertexvalues.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Code generation for interpolate_vertex_values."
 
 # Copyright (C) 2009 Marie E. Rognes
@@ -18,31 +19,33 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Kristian B. Oelgaard 2010
-# Modified by Lizao Li 2015
+# Modified by Lizao Li 2015, 2016
 #
-# Last changed: 2015-03-25
+# Last changed: 2016-08-17
 
+from six import string_types
 from ffc.cpp import format, remove_unused
 
 # Extract code manipulation formats
-inner =     format["inner product"]
+inner = format["inner product"]
 component = format["component"]
-assign =    format["assign"]
-multiply =  format["multiply"]
+assign = format["assign"]
+multiply = format["multiply"]
 
 # Extract formats for the Jacobians
-J =       format["J"]
-Jinv =    format["inv(J)"]
+J = format["J"]
+Jinv = format["inv(J)"]
 invdetJ = format["inverse"](format["det(J)"](None))
 
-f_dof_values =    format["argument dof values"]
+f_dof_values = format["argument dof values"]
 f_vertex_values = format["argument vertex values"]
 
+
 def interpolate_vertex_values(ir):
     "Generate code for interpolate_vertex_values."
 
     # Handle unsupported elements.
-    if isinstance(ir, str):
+    if isinstance(ir, string_types):
         return format["exception"]("interpolate_vertex_values: %s" % ir)
 
     # Add code for Jacobian if necessary
@@ -105,7 +108,8 @@ def _interpolate_vertex_values_element(data, gdim, tdim, total_value_size,
         # Create code for each vertex x_j
         for (j, values_at_vertex) in enumerate(vertex_values):
 
-            if value_size == 1: values_at_vertex = [values_at_vertex]
+            if value_size == 1:
+                values_at_vertex = [values_at_vertex]
 
             # Map basis functions using appropriate mapping
             components = change_of_variables(values_at_vertex, k)
@@ -117,7 +121,7 @@ def _interpolate_vertex_values_element(data, gdim, tdim, total_value_size,
             value = inner(dof_values, components)
 
             # Assign value to correct vertex
-            index = j*total_value_size + (k + value_offset)
+            index = j * total_value_size + (k + value_offset)
             code.append(assign(component(f_vertex_values, index), value))
 
     return "\n".join(code)
@@ -155,10 +159,14 @@ def _change_variables(mapping, gdim, tdim, space_dim):
 
       g(x) = K^T G(X)              i.e   g_i(x) = K^T_ij G_j(X) = K_ji G_j(X)
 
-    'pullback as metric' mapping for f:
-    
+    'double covariant piola' mapping for f:
+
       g_il(x) = K_{ji} G_{jk} K_{kl}
 
+    'double contravariant piola' mapping for g:
+
+      g_il(x) = (det(J))^(-2) J_ij G_jk(X) J_lk
+
     """
 
     if mapping is "affine":
@@ -171,13 +179,21 @@ def _change_variables(mapping, gdim, tdim, space_dim):
         change_of_variables = lambda G, i: [inner([Jinv(j, i, tdim, gdim) for j in range(tdim)],
                                                   [G[j][index] for j in range(tdim)])
                                             for index in range(space_dim)]
-    elif mapping == "pullback as metric":
+    elif mapping == "double covariant piola":
         change_of_variables = lambda G, i: [
             inner([inner([Jinv(j, i // tdim, tdim, gdim) for j in range(tdim)],
                          [G[j][k][index] for j in range(tdim)])
                    for k in range(tdim)],
                   [Jinv(k, i % tdim, tdim, gdim) for k in range(tdim)])
             for index in range(space_dim)]
+    elif mapping == "double contravariant piola":
+        change_of_variables = lambda G, i: [
+            multiply([invdetJ, invdetJ, inner(
+                [inner([J(i // tdim, j, tdim, gdim) for j in range(tdim)],
+                         [G[j][k][index] for j in range(tdim)])
+                   for k in range(tdim)],
+                  [J(i % tdim, k, tdim, gdim) for k in range(tdim)])])
+            for index in range(space_dim)]
     else:
         raise Exception("No such mapping: %s accepted" % mapping)
     return change_of_variables
diff --git a/ffc/jitcompiler.py b/ffc/jitcompiler.py
index 0de1168..0ef7415 100644
--- a/ffc/jitcompiler.py
+++ b/ffc/jitcompiler.py
@@ -1,7 +1,8 @@
+# -*- coding: utf-8 -*-
 """This module provides a just-in-time (JIT) form compiler.
-It uses Instant to wrap the generated code into a Python module."""
+It uses dijitso to wrap the generated code into a Python module."""
 
-# Copyright (C) 2007-2015 Anders Logg
+# Copyright (C) 2007-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -22,150 +23,159 @@ It uses Instant to wrap the generated code into a Python module."""
 # Modified by Ilmar Wilbers, 2008
 # Modified by Kristian B. Oelgaard, 2009
 # Modified by Joachim Haga, 2011.
-# Modified by Martin Alnaes, 2013-2015
+# Modified by Martin Sandve Alnæs, 2013-2016
 
 # Python modules
-import os, sys
-import instant
+import os
+import sys
+from hashlib import sha1
 
-# UFL modules
-from ufl import TestFunction, ds, dx
-from ufl.classes import Form, FiniteElementBase
-from ufl.algorithms import extract_elements, extract_sub_elements, compute_form_data
+# FEniCS modules
+import ufl
+
+# Not importing globally to keep dijitso optional if jit is not used
+#import dijitso
 
 # FFC modules
 from ffc.log import log
-from ffc.log import info
-from ffc.log import warning
-from ffc.log import debug
 from ffc.log import error
 from ffc.log import set_level
 from ffc.log import set_prefix
 from ffc.log import INFO
-from ffc.parameters import validate_jit_parameters
-from ffc.mixedelement import MixedElement
-from ffc.compiler import compile_form, compile_element
-from ffc.formatting import write_code
-from ffc.jitobject import JITObject
-from ffc.quadratureelement import default_quadrature_degree
-from ffc.backends.ufc import build_ufc_module
-from ffc.ufc_include import get_ufc_include
-
-# Set debug level for Instant
-instant.set_log_level("warning")
-
-
-def jit_generate(ufl_object, module_name, parameters):
-    "Generate code and return as strings."
-    if isinstance(ufl_object, Form):
-        code_h, code_c = compile_form(ufl_object, prefix=module_name,
-                                      parameters=parameters, jit=True)
-    elif isinstance(ufl_object, FiniteElementBase):
-        code_h, code_c = compile_element(ufl_object, prefix=module_name,
-                                         parameters=parameters, jit=True)
-    return code_h, code_c
-
-
-def jit_build_with_instant(ufl_object, module_name, parameters):
-    # Use Instant cache if possible
-    cache_dir = parameters.get("cache_dir") or None  # Important that this is None and not "", use "." for curdir
-    module = instant.import_module(module_name, cache_dir=cache_dir)
-    if module:
-        debug("Reusing form from cache.")
-        return module
-
-    if parameters["cpp_optimize"]:
-        cppargs = parameters["cpp_optimize_flags"].split()
-    else:
-        cppargs = ["-O0"]
-
-    # Take lock to serialise file removal.
-    # Need to add "_0" to lock as instant.import_module acquire
-    # lock with name: module_name
-    with instant.file_lock(instant.get_default_cache_dir(),
-                           module_name + "_0") as lock:
-
-        # Retry Instant cache. The module may have been created while we waited
-        # for the lock, even if it didn't exist before.
-        module = instant.import_module(module_name, cache_dir=cache_dir)
-        if module:
-            debug("Reusing form from cache.")
-            return module
-
-        # Write a message
-        log(INFO + 5,
-            "Calling FFC just-in-time (JIT) compiler, this may take some time.")
-        code_h, code_c = jit_generate(ufl_object, module_name, parameters)
-
-        # Write to file
-        write_code(code_h, code_c, module_name, parameters)
-
-        # Build module using Instant
-        debug("Compiling and linking Python extension module, this may take some time.")
-        hfile   = module_name + ".h"
-        cppfile = module_name + ".cpp"
-        module = build_ufc_module(
-            hfile,
-            source_directory = os.curdir,
-            signature = module_name,
-            sources = [cppfile],
-            cppargs = cppargs,
-            cache_dir = cache_dir)
-
-        # Remove code
-        if os.path.isfile(hfile):
-            os.unlink(hfile)
-        if os.path.isfile(cppfile):
-            os.unlink(cppfile)
-
-    return module
+from ffc.parameters import validate_jit_parameters, compute_jit_parameters_signature
+from ffc.compiler import compile_form, compile_element, compile_coordinate_mapping
+from ffc.backends.ufc import get_include_path as get_ufc_include_path
+from ffc.backends.ufc import get_ufc_signature, get_ufc_templates_signature
+from ffc import __version__ as FFC_VERSION
+from ffc.cpp import make_classname
 
 
-def jit_build_with_dijitso(ufl_object, module_name, parameters):
+def jit_generate(ufl_object, module_name, signature, parameters):
+    "Callback function passed to dijitso.jit: generate code and return as strings."
+    log(INFO + 5, "Calling FFC just-in-time (JIT) compiler, this may take some time.")
+
+    # Generate actual code for this object
+    if isinstance(ufl_object, ufl.Form):
+        compile_object = compile_form
+    elif isinstance(ufl_object, ufl.FiniteElementBase):
+        compile_object = compile_element
+    elif isinstance(ufl_object, ufl.Mesh):
+        compile_object = compile_coordinate_mapping
+
+    code_h, code_c, dependent_ufl_objects = compile_object(ufl_object,
+            prefix=module_name, parameters=parameters, jit=True)
+
+    # Jit compile dependent objects separately,
+    # but pass indirect=True to skip instantiating objects.
+    # (this is done in here such that it's only triggered
+    # if parent jit module is missing, and it's done after
+    # compile_object because some misformed ufl objects may
+    # require analysis to determine (looking at you Expression...))
+    dependencies = []
+    for dep in dependent_ufl_objects["element"]:
+        dep_module_name = jit(dep, parameters, indirect=True)
+        dependencies.append(dep_module_name)
+    if 0:  # FIXME: Enable coordinate mapping generation when ready
+        for dep in dependent_ufl_objects["coordinate_mapping"]:
+            dep_module_name = jit(ufl.Mesh(dep, ufl_id=0), parameters, indirect=True)
+            dependencies.append(dep_module_name)
+
+    return code_h, code_c, dependencies
+
+
+def jit_build(ufl_object, module_name, parameters):
+    "Wraps dijitso jit with some parameter conversion etc."
     import dijitso
 
-    def _generate(ufl_object, module_name, signature, parameters):
-        # Write a message
-        log(INFO + 5,
-            "Calling FFC just-in-time (JIT) compiler, this may take some time.")
-        code_h, code_c = jit_generate(ufl_object, module_name, parameters)
-        dependencies = ()
-        return code_h, code_c, dependencies
+    # FIXME: Expose more dijitso parameters?
+    # FIXME: dijitso build params are not part of module_name here.
+    #        Currently dijitso doesn't add to the module signature.
 
     # Translating the C++ flags from ffc parameters to dijitso
     # to get equivalent behaviour to instant code
-    if parameters["cpp_optimize"]:
-        build_params = {
-            "cxxflags_opt": tuple(parameters["cpp_optimize_flags"].split()),
-            "debug": False
-            }
-    else:
-        build_params = {
-            "cxxflags_debug": ("-O0",),
-            "debug": True
-            }
-
-    # Add path to UFC include dir
-    build_params["include_dirs"] = get_ufc_include()
+    build_params = {}
+    build_params["debug"] = not parameters["cpp_optimize"]
+    build_params["cxxflags_opt"] = tuple(parameters["cpp_optimize_flags"].split())
+    build_params["cxxflags_debug"] = ("-O0",)
+    build_params["include_dirs"] = get_ufc_include_path()
 
-    # FFC default is "", use "." to point to curdir
+    # Interpreting FFC default "" as None, use "." if you want to point to curdir
     cache_dir = parameters.get("cache_dir") or None
     if cache_dir:
-        cache_params = { "cache_dir": cache_dir }
+        cache_params = {"cache_dir": cache_dir}
     else:
         cache_params = {}
 
+    # This will do some rudimenrary checking of the params and fill in dijitso defaults
     params = dijitso.validate_params({
         "cache": cache_params,
         "build": build_params,
-        "generator": parameters,
-        })
-
-    module, signature = dijitso.jit(ufl_object, module_name, params, _generate)
+        "generator": parameters,  # ffc parameters, just passed on to jit_generate
+    })
+
+    # Carry out jit compilation, calling jit_generate only if needed
+    module, signature = dijitso.jit(jitable=ufl_object,
+                                    name=module_name,
+                                    params=params,
+                                    generate=jit_generate)
     return module
 
 
-def jit(ufl_object, parameters=None):
+def compute_jit_prefix(ufl_object, parameters, kind=None):
+    "Compute the prefix (module name) for jit modules."
+
+    # Get signature from ufl object
+    if isinstance(ufl_object, ufl.Form):
+        kind = "form"
+        object_signature = ufl_object.signature()
+    elif isinstance(ufl_object, ufl.Mesh):
+        # When coordinate mapping is represented by a Mesh, just getting its coordinate element
+        kind = "coordinate_mapping"
+        ufl_object = ufl_object.ufl_coordinate_element()
+        object_signature = repr(ufl_object)  # ** must match below
+    elif kind == "coordinate_mapping" and isinstance(ufl_object, ufl.FiniteElementBase):
+        # When coordinate mapping is represented by its coordinate element
+        object_signature = repr(ufl_object)  # ** must match above
+    elif isinstance(ufl_object, ufl.FiniteElementBase):
+        kind = "element"
+        object_signature = repr(ufl_object)
+    else:
+        error("Unknown ufl object type %s" % (ufl_object.__class__.__name__,))
+
+    # Compute deterministic string of relevant parameters
+    parameters_signature = compute_jit_parameters_signature(parameters)
+
+    # Build combined signature
+    signatures = [
+        object_signature,
+        parameters_signature,
+        str(FFC_VERSION),
+        get_ufc_signature(),
+        get_ufc_templates_signature(),
+        kind,
+        ]
+    string = ";".join(signatures)
+    signature = sha1(string.encode('utf-8')).hexdigest()
+
+    # Optionally shorten signature
+    max_signature_length = parameters["max_signature_length"]
+    if max_signature_length:
+        signature = signature[:max_signature_length]
+
+    # Combine into prefix with some info including kind
+    prefix = ("ffc_%s_%s" % (kind, signature)).lower()
+    return kind, prefix
+
+
+class FFCError(Exception):
+    pass
+
+
+class FFCJitError(FFCError):
+    pass
+
+
+def jit(ufl_object, parameters=None, indirect=False):
     """Just-in-time compile the given form or element
 
     Parameters:
@@ -173,14 +183,6 @@ def jit(ufl_object, parameters=None):
       ufl_object : The UFL object to be compiled
       parameters : A set of parameters
     """
-    # Check that we get a form or element
-    if isinstance(ufl_object, Form):
-        kind = "form"
-    elif isinstance(ufl_object, FiniteElementBase):
-        kind = "element"
-    else:
-        error("Expecting a UFL form or element, got: %s" % repr(ufl_object))
-
     # Check parameters
     parameters = validate_jit_parameters(parameters)
 
@@ -189,49 +191,59 @@ def jit(ufl_object, parameters=None):
     set_level(parameters["log_level"])
     set_prefix(parameters["log_prefix"])
 
-    # Wrap input
-    jit_object = JITObject(ufl_object, parameters)
-
-    # Set prefix for generated code
-    module_name = "ffc_%s_%s" % (kind, jit_object.signature())
+    # Make unique module name for generated code
+    kind, module_name = compute_jit_prefix(ufl_object, parameters)
 
     # Inspect cache and generate+build if necessary
-    use_ctypes = os.environ.get("FFC_USE_CTYPES")
-    if not use_ctypes:
-        module = jit_build_with_instant(ufl_object, module_name, parameters)
-    else:
-        module = jit_build_with_dijitso(ufl_object, module_name, parameters)
-
-    # Construct instance of compiled form
-    if isinstance(ufl_object, Form):
-        compiled_form = _instantiate_form(module, module_name, use_ctypes)
-        return compiled_form, module, module_name
-    elif isinstance(ufl_object, FiniteElementBase):
-        return _instantiate_element_and_dofmap(module, module_name, use_ctypes)
+    module = jit_build(ufl_object, module_name, parameters)
 
+    # Raise exception on failure to build or import module
+    if module is None:
+        # TODO: To communicate directory name here, need dijitso params to call
+        #fail_dir = dijitso.cache.create_fail_dir_path(signature, dijitso_cache_params)
+        raise FFCJitError("A directory with files to reproduce the jit build failure has been created.")
 
-from ffc.cpp import make_classname
-def _instantiate_form(module, prefix, use_ctypes):
-    "Extract the form from module with only one form."
-    form_id = 0
-    classname = make_classname(prefix, "form", form_id)
-    if use_ctypes:
-        import dijitso
-        form = dijitso.extract_factory_function(module, "create_" + classname)()
-        return form
+    # Construct instance of object from compiled code unless indirect
+    if indirect:
+        return module_name
     else:
-        return getattr(module, "create_" + classname)()
+        # FIXME: Streamline number of return arguments here across kinds
+        if kind == "form":
+            compiled_form = _instantiate_form(module, module_name)
+            return (compiled_form, module, module_name)
+            # TODO: module, module_name are never used in dolfin, drop?
+            #return _instantiate_form(module, module_name)
+        elif kind == "element":
+            fe, dm = _instantiate_element_and_dofmap(module, module_name)
+            return fe, dm
+        elif kind == "coordinate_mapping":
+            cm = _instantiate_coordinate_mapping(module, module_name)
+            return cm
+        else:
+            error("Unknown kind %s" % (kind,))
+
+
+def _instantiate_form(module, prefix):
+    "Instantiate an object of the jit-compiled form."
+    import dijitso
+    classname = make_classname(prefix, "form", "main")
+    form = dijitso.extract_factory_function(module, "create_" + classname)()
+    return form
+
 
-def _instantiate_element_and_dofmap(module, prefix, use_ctypes):
-    """Extract element and dofmap from module."""
+def _instantiate_element_and_dofmap(module, prefix):
+    "Instantiate objects of the jit-compiled finite_element and dofmap."
+    import dijitso
     fe_classname = make_classname(prefix, "finite_element", "main")
     dm_classname = make_classname(prefix, "dofmap", "main")
-    if use_ctypes:
-        import dijitso
-        fe = dijitso.extract_factory_function(module, "create_" + fe_classname)()
-        dm = dijitso.extract_factory_function(module, "create_" + dm_classname)()
-    else:
-        fe = getattr(module, "create_" + fe_classname)()
-        dm = getattr(module, "create_" + dm_classname)()
-
+    fe = dijitso.extract_factory_function(module, "create_" + fe_classname)()
+    dm = dijitso.extract_factory_function(module, "create_" + dm_classname)()
     return (fe, dm)
+
+
+def _instantiate_coordinate_mapping(module, prefix):
+    "Instantiate an object of the jit-compiled coordinate_mapping."
+    import dijitso
+    classname = make_classname(prefix, "coordinate_mapping", "main")
+    form = dijitso.extract_factory_function(module, "create_" + classname)()
+    return form
diff --git a/ffc/jitobject.py b/ffc/jitobject.py
deleted file mode 100644
index dfd5a1b..0000000
--- a/ffc/jitobject.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2008-2013 Anders Logg
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Martin Alnaes, 2013
-
-# Python modules.
-from hashlib import sha1
-
-# UFL modules.
-import ufl
-
-# FFC modules.
-from ffc import __version__ as FFC_VERSION
-from ffc.ufc_signature import ufc_signature
-from ffc.parameters import compute_jit_parameters_signature
-
-# UFC modules.
-from ffc.backends import ufc
-
-
-class JITObject:
-    """This class is a wrapper for a compiled object in the context of
-    specific compiler parameters. A JITObject is identified either by its
-    hash value or by its signature. The hash value is valid only in a
-    single instance of an application (at runtime). The signature is
-    persistent and may be used for caching modules on disk."""
-
-    def __init__(self, ufl_object, parameters):
-        "Create JITObject for given form and parameters"
-        assert isinstance(ufl_object, (ufl.Form, ufl.FiniteElementBase))
-
-        # Store data
-        self.ufl_object = ufl_object
-        self.parameters = parameters
-        self._hash = None
-        self._signature = None
-
-    def __hash__(self):
-        "Return unique integer for form + parameters"
-        # Check if we have computed the hash before
-        if self._hash is None:
-            # Compute hash based on signature
-            self._hash = int(self.signature(), 16)
-        return self._hash
-
-    def __eq__(self, other):
-        "Check for equality"
-        return hash(self) == hash(other)
-
-    def signature(self):
-        "Return unique string for form + parameters"
-
-        # Check if we have computed the signature before
-        if not self._signature is None:
-            return self._signature
-
-        # Get signature from form
-        if isinstance(self.ufl_object, ufl.Form):
-            form_signature = self.ufl_object.signature()
-        elif isinstance(self.ufl_object, ufl.FiniteElementBase):
-            form_signature = repr(self.ufl_object)
-
-        # Compute deterministic string of relevant parameters
-        parameters_signature = compute_jit_parameters_signature(self.parameters)
-
-        # Build common signature
-        signatures = [form_signature,
-                      parameters_signature,
-                      str(FFC_VERSION),
-                      ufc_signature()]
-        string = ";".join(signatures)
-
-        self._signature = sha1(string.encode('utf-8')).hexdigest()
-
-        # Uncomment for debugging
-        #print "form_signature       =", form_signature
-        #print "parameters_signature =", parameters_signature
-        #print "ffc_signature        =", ffc_signature
-        #print "signature            =", self._signature
-
-        return self._signature
-
diff --git a/ffc/log.py b/ffc/log.py
index 7582d66..150c2c1 100644
--- a/ffc/log.py
+++ b/ffc/log.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """This module provides functions used by the FFC implementation to
 output messages. These may be redirected by the user of FFC.
 
@@ -27,7 +28,7 @@ is a wrapper for the standard Python logging module.
 # UFL modules
 from ufl.log import Logger
 from ufl.log import log_functions
-from ufl.log import INFO, DEBUG, ERROR, CRITICAL
+from ufl.log import INFO, DEBUG, WARNING, ERROR, CRITICAL
 from ufl.utils.sorting import sorted_by_key
 from ufl.utils.formatting import dstr, tstr
 
@@ -39,6 +40,8 @@ for foo in log_functions:
     exec("%s = lambda *message : ffc_logger.%s(*message)" % (foo, foo))
 
 # Assertion, copied from UFL
+
+
 def ffc_assert(condition, *message):
     "Assert that condition is true and otherwise issue an error with given message."
     condition or error(*message)
@@ -48,27 +51,33 @@ set_level(INFO)
 
 #--- Specialized FFC debugging tools ---
 
+
 def debug_dict(d, title=""):
     "Pretty-print dictionary."
-    if not title: title = "Dictionary"
+    if not title:
+        title = "Dictionary"
     info("")
     begin(title)
     info("")
     for (key, value) in sorted_by_key(d):
         info(key)
-        info("-"*len(key))
+        info("-" * len(key))
         info(str(value))
         info("")
     end()
 
+
 def debug_ir(ir, name=""):
     "Debug intermediate representation."
     title = "Intermediate representation"
-    if name: title += " (%s)" % str(name)
+    if name:
+        title += " (%s)" % str(name)
     debug_dict(ir, title)
 
+
 def debug_code(code, name=""):
     "Debug generated code."
     title = "Generated code"
-    if name: title += " (%s)" % str(name)
+    if name:
+        title += " (%s)" % str(name)
     debug_dict(code, title)
diff --git a/ffc/mixedelement.py b/ffc/mixedelement.py
index 1e35611..44721fa 100644
--- a/ffc/mixedelement.py
+++ b/ffc/mixedelement.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2005-2010 Anders Logg
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2005-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -19,8 +21,6 @@
 # Modified by Marie E. Rognes, 2007-2010
 # Modified by Kristian B. Oelgaard, 2010
 # Modified by Lizao Li, 2015
-#
-# Last changed: 2015-04-25
 
 # Python modules
 import numpy
@@ -31,7 +31,9 @@ from ffc.log import error
 # UFL utils
 from ufl.utils.sequences import product
 
+
 class MixedElement:
+
     "Create a FFC mixed element from a list of FFC/FIAT elements."
 
     def __init__(self, elements):
@@ -46,7 +48,7 @@ class MixedElement:
 
     def value_shape(self):
         # Values of Tensor elements are flattened in MixedElements
-        num_comps = lambda x: numpy.prod(x) if x else 1 
+        num_comps = lambda x: numpy.prod(x) if x else 1
         return (sum(num_comps(e.value_shape()) or 1 for e in self._elements),)
 
     def entity_dofs(self):
@@ -102,7 +104,7 @@ class MixedElement:
             for dtuple in table.keys():
 
                 # Insert zeros if necessary (should only happen first time)
-                if not dtuple in mixed_table:
+                if dtuple not in mixed_table:
                     # NOTE: It is super important to create a new numpy.zeros
                     # instance to avoid manipulating a numpy reference in case
                     # it is created outside the loop.
@@ -118,6 +120,7 @@ class MixedElement:
 
 #--- Utility functions ---
 
+
 def _combine_entity_dofs(elements):
     """
     Combine the entity_dofs from a list of elements into a combined
@@ -152,6 +155,7 @@ def _combine_entity_dofs(elements):
         offset += e.space_dimension()
     return entity_dofs
 
+
 def _num_components(element):
     "Compute number of components for element."
     return product(element.value_shape())
diff --git a/ffc/optimization.py b/ffc/optimization.py
index a095a8b..62c1048 100644
--- a/ffc/optimization.py
+++ b/ffc/optimization.py
@@ -1,12 +1,6 @@
-"""
-Compiler stage 5: optimization
-------------------------------
+# -*- coding: utf-8 -*-
 
-This module implements the optimization of an intermediate code
-representation.
-"""
-
-# Copyright (C) 2009-2013 Anders Logg
+# Copyright (C) 2009-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -23,15 +17,21 @@ representation.
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes, 2013
-#
-# First added:  2009-12-22
-# Last changed: 2013-02-10
+# Modified by Martin Sandve Alnæs, 2013
+
+"""
+Compiler stage 5: optimization
+------------------------------
+
+This module implements the optimization of an intermediate code
+representation.
+"""
 
 # FFC modules
 from ffc.log import info, begin, end
 from ffc.representation import pick_representation
 
+
 def optimize_ir(ir, parameters):
     "Optimize intermediate form representation."
 
@@ -53,6 +53,7 @@ def optimize_ir(ir, parameters):
 
     return ir_elements, ir_dofmaps, ir_coordinate_mappings, oir_integrals, ir_forms
 
+
 def _optimize_integral_ir(ir, parameters):
     "Compute optimized intermediate represention of integral."
 
diff --git a/ffc/parameters.py b/ffc/parameters.py
index 2a065d2..72b346d 100644
--- a/ffc/parameters.py
+++ b/ffc/parameters.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2005-2015 Anders Logg
+# -*- coding: utf-8 -*-
+# Copyright (C) 2005-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -23,40 +24,42 @@ from ffc.log import INFO
 # FIXME: Document option -fconvert_exceptions_to_warnings
 # FIXME: Remove option epsilon and just rely on precision?
 
-
+# NB! Parameters in the generate and build sets are
+# included in jit signature, cache and log are not.
 _FFC_GENERATE_PARAMETERS = {
-  "format":                         "ufc",   # code generation format
-  "representation":                 "auto",  # form representation / code
-                                             # generation strategy
-  "quadrature_rule":                "auto",  # quadrature rule used for
-                                             # integration of element tensors
-  "quadrature_degree":              -1,      # quadrature degree used for
-                                             # computing integrals
-  "precision":                      15,      # precision used when writing
-                                             # numbers
-  "epsilon":                        1e-14,   # machine precision, used for
-                                             # dropping zero terms
-  "split":                          False,   # split generated code into .h and
-                                             # .cpp file
-  "form_postfix":                   True,    # postfix form name with "Function",
-                                             # "LinearForm" or BilinearForm
-  "convert_exceptions_to_warnings": False,   # convert all exceptions to warning
-                                             # in generated code
-  "error_control":                  False,   # with error control
-  "optimize":                       False,   # optimise the code generation
-  }
+    "format": "ufc",           # code generation format
+    "representation": "auto",  # form representation / code
+                               # generation strategy
+    "quadrature_rule": "auto", # quadrature rule used for
+                               # integration of element tensors
+    "quadrature_degree": -1,   # quadrature degree used for
+                               # computing integrals
+    "precision": 15,           # precision used when writing
+                               # numbers
+    "epsilon": 1e-14,          # machine precision, used for
+                               # dropping zero terms
+    "split": False,            # split generated code into .h and
+                               # .cpp file
+    "form_postfix": True,      # postfix form name with "Function",
+                               # "LinearForm" or BilinearForm
+    "convert_exceptions_to_warnings": False,   # convert all exceptions to warning
+                                               # in generated code
+    "error_control": False,   # with error control
+    "optimize": False,        # optimise the code generation
+    "max_signature_length": 0,  # set to positive integer to shorten signatures
+}
 _FFC_BUILD_PARAMETERS = {
-  "cpp_optimize":                   True,    # optimization for the JIT compiler
-  "cpp_optimize_flags":             "-O2",   # optimization flags for the JIT compiler
-  }
+    "cpp_optimize": True,          # optimization for the JIT compiler
+    "cpp_optimize_flags": "-O2",   # optimization flags for the JIT compiler
+}
 _FFC_CACHE_PARAMETERS = {
-  "cache_dir":                      "",      # cache dir used by Instant
-  "output_dir":                     ".",     # output directory for generated code
-  }
+    "cache_dir": "",        # cache dir used by Instant
+    "output_dir": ".",      # output directory for generated code
+}
 _FFC_LOG_PARAMETERS = {
-  "log_level":                      INFO+5,  # log level, displaying only
-                                             # messages with level >= log_level
-  "log_prefix":                     "",      # log prefix
+    "log_level": INFO + 5,  # log level, displaying only
+                            # messages with level >= log_level
+    "log_prefix": "",       # log prefix
 }
 FFC_PARAMETERS = {}
 FFC_PARAMETERS.update(_FFC_BUILD_PARAMETERS)
@@ -70,11 +73,11 @@ def split_parameters(parameters):
 
     """
     params = {
-        "cache":     {k: parameters[k] for k in _FFC_CACHE_PARAMETERS.keys()},
-        "build":     {k: parameters[k] for k in _FFC_BUILD_PARAMETERS.keys()},
-        "generate":  {k: parameters[k] for k in _FFC_GENERATE_PARAMETERS.keys()},
-        "log":       {k: parameters[k] for k in _FFC_LOG_PARAMETERS.keys()},
-        }
+        "cache": {k: parameters[k] for k in _FFC_CACHE_PARAMETERS.keys()},
+        "build": {k: parameters[k] for k in _FFC_BUILD_PARAMETERS.keys()},
+        "generate": {k: parameters[k] for k in _FFC_GENERATE_PARAMETERS.keys()},
+        "log": {k: parameters[k] for k in _FFC_LOG_PARAMETERS.keys()},
+    }
     return params
 
 
@@ -131,10 +134,10 @@ def compilation_relevant_parameters(parameters):
 
     # This doesn't work because some parameters may not be among the defaults above.
     # That is somewhat confusing but we'll just have to live with it at least for now.
-    #sp = split_parameters(parameters)
-    #p = {}
-    #p.update(sp["generate"])
-    #p.update(sp["build"])
+    # sp = split_parameters(parameters)
+    # p = {}
+    # p.update(sp["generate"])
+    # p.update(sp["build"])
 
     return p
 
@@ -144,3 +147,4 @@ def compute_jit_parameters_signature(parameters):
     from ufl.utils.sorting import canonicalize_metadata
     parameters = compilation_relevant_parameters(parameters)
     return str(canonicalize_metadata(parameters))
+
diff --git a/ffc/plot.py b/ffc/plot.py
index 305cb6c..c1adc33 100644
--- a/ffc/plot.py
+++ b/ffc/plot.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This module provides functionality for plotting finite elements."
 
 # Copyright (C) 2010 Anders Logg
@@ -40,19 +41,20 @@ except:
     _soya_imported = False
 
 # Colors for elements
-element_colors = {"Argyris":                      (0.45, 0.70, 0.80),
-                  "Arnold-Winther":               (0.00, 0.00, 1.00),
-                  "Brezzi-Douglas-Marini":        (1.00, 1.00, 0.00),
-                  "Crouzeix-Raviart":             (1.00, 0.25, 0.25),
-                  "Discontinuous Lagrange":       (0.00, 0.25, 0.00),
+element_colors = {"Argyris": (0.45, 0.70, 0.80),
+                  "Arnold-Winther": (0.00, 0.00, 1.00),
+                  "Brezzi-Douglas-Marini": (1.00, 1.00, 0.00),
+                  "Crouzeix-Raviart": (1.00, 0.25, 0.25),
+                  "Discontinuous Lagrange": (0.00, 0.25, 0.00),
                   "Discontinuous Raviart-Thomas": (0.90, 0.90, 0.30),
-                  "Hermite":                      (0.50, 1.00, 0.50),
-                  "Lagrange":                     (0.00, 1.00, 0.00),
-                  "Mardal-Tai-Winther":           (1.00, 0.10, 0.90),
-                  "Morley":                       (0.40, 0.40, 0.40),
-                  "Nedelec 1st kind H(curl)":     (0.90, 0.30, 0.00),
-                  "Nedelec 2nd kind H(curl)":     (0.70, 0.20, 0.00),
-                  "Raviart-Thomas":               (0.90, 0.60, 0.00)}
+                  "Hermite": (0.50, 1.00, 0.50),
+                  "Lagrange": (0.00, 1.00, 0.00),
+                  "Mardal-Tai-Winther": (1.00, 0.10, 0.90),
+                  "Morley": (0.40, 0.40, 0.40),
+                  "Nedelec 1st kind H(curl)": (0.90, 0.30, 0.00),
+                  "Nedelec 2nd kind H(curl)": (0.70, 0.20, 0.00),
+                  "Raviart-Thomas": (0.90, 0.60, 0.00)}
+
 
 def plot(element, rotate=True):
     "Plot finite element."
@@ -76,7 +78,7 @@ def plot(element, rotate=True):
         # Create cell model
         cell, is3d = create_cell_model(element)
 
-        cellname = element.cell().cellname() # Assuming single cell
+        cellname = element.cell().cellname()  # Assuming single cell
 
         # Create dof models
         dofs, num_moments = create_dof_models(element)
@@ -90,6 +92,7 @@ def plot(element, rotate=True):
         # Render plot window
         render([cell] + dofs, title, num_moments, is3d, rotate)
 
+
 def render(models, title, num_moments, is3d, rotate):
     "Render given list of models."
 
@@ -110,17 +113,19 @@ def render(models, title, num_moments, is3d, rotate):
         scene.atmosphere.bg_color = (1.0, 1.0, 1.0, 1.0)
 
     # Not used, need to manually handle rotation
-    #label = Label3D(scene, text=str(num_moments), size=0.005)
-    #label.set_xyz(1.0, 1.0, 1.0)
-    #label.set_color((0.0, 0.0, 0.0, 1.0))
+    # label = Label3D(scene, text=str(num_moments), size=0.005)
+    # label.set_xyz(1.0, 1.0, 1.0)
+    # label.set_color((0.0, 0.0, 0.0, 1.0))
 
     # Define rotation
     if is3d:
         class RotatingBody(soya.Body):
+
             def advance_time(self, proportion):
                 self.rotate_y(2.0 * proportion)
     else:
         class RotatingBody(soya.Body):
+
             def advance_time(self, proportion):
                 self.rotate_z(2.0 * proportion)
 
@@ -170,6 +175,7 @@ def render(models, title, num_moments, is3d, rotate):
 
     # Handle exit
     class Idler(soya.Idler):
+
         def end_round(self):
             for event in self.events:
                 if event[0] == QUIT:
@@ -180,6 +186,7 @@ def render(models, title, num_moments, is3d, rotate):
     idler = Idler(scene)
     idler.idle()
 
+
 def tangents(n):
     "Return normalized tangent vectors for plane defined by given vector."
 
@@ -197,6 +204,7 @@ def tangents(n):
 
     return t0, t1
 
+
 def Cylinder(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
     "Return model for cylinder from p0 to p1 with radius r."
 
@@ -215,16 +223,16 @@ def Cylinder(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
 
     # Traverse the circles
     num_steps = 10
-    dtheta = 2.0*pi / float(num_steps)
+    dtheta = 2.0 * pi / float(num_steps)
     for i in range(num_steps):
 
         # Compute coordinates for square
-        dx0 = cos(i*dtheta)*t0 + sin(i*dtheta)*t1
-        dx1 = cos((i + 1)*dtheta)*t0 + sin((i + 1)*dtheta)*t1
-        x0 = p0 + r*dx0
-        x1 = p0 + r*dx1
-        x2 = p1 + r*dx0
-        x3 = p1 + r*dx1
+        dx0 = cos(i * dtheta) * t0 + sin(i * dtheta) * t1
+        dx1 = cos((i + 1) * dtheta) * t0 + sin((i + 1) * dtheta) * t1
+        x0 = p0 + r * dx0
+        x1 = p0 + r * dx1
+        x2 = p1 + r * dx0
+        x3 = p1 + r * dx1
 
         # Cover square by two triangles
         v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color)
@@ -241,6 +249,7 @@ def Cylinder(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
 
     return model
 
+
 def Cone(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
     "Return model for cone from p0 to p1 with radius r."
 
@@ -259,20 +268,20 @@ def Cone(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
 
     # Traverse the circles
     num_steps = 10
-    dtheta = 2.0*pi / float(num_steps)
+    dtheta = 2.0 * pi / float(num_steps)
     v2 = soya.Vertex(scene, p1[0], p1[1], p1[2], diffuse=color)
     for i in range(num_steps):
 
         # Compute coordinates for bottom of face
-        dx0 = cos(i*dtheta)*t0 + sin(i*dtheta)*t1
-        dx1 = cos((i + 1)*dtheta)*t0 + sin((i + 1)*dtheta)*t1
-        x0 = p0 + r*dx0
-        x1 = p0 + r*dx1
+        dx0 = cos(i * dtheta) * t0 + sin(i * dtheta) * t1
+        dx1 = cos((i + 1) * dtheta) * t0 + sin((i + 1) * dtheta) * t1
+        x0 = p0 + r * dx0
+        x1 = p0 + r * dx1
 
         # Create face
         v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color)
         v1 = soya.Vertex(scene, x1[0], x1[1], x1[2], diffuse=color)
-        f  = soya.Face(scene, (v0, v1, v2))
+        f = soya.Face(scene, (v0, v1, v2))
         f.double_sided = 1
 
     # Extract model
@@ -280,6 +289,7 @@ def Cone(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
 
     return model
 
+
 def Arrow(scene, x, n, center=False):
     "Return model for arrow from x in direction n."
 
@@ -292,22 +302,23 @@ def Arrow(scene, x, n, center=False):
 
     # Dimensions for arrow
     L = 0.3
-    l = 0.35*L
-    r = 0.04*L
-    R = 0.125*L
+    l = 0.35 * L
+    r = 0.04 * L
+    R = 0.125 * L
 
     # Center arrow
     if center:
         print("Centering!")
-        x -= 0.5*(L + l)*n
+        x -= 0.5 * (L + l) * n
 
     # Create cylinder and cone
-    cylinder = Cylinder(scene, x, x + L*n, r)
-    cone = Cone(scene, x + L*n, x + (L + l)*n, R)
+    cylinder = Cylinder(scene, x, x + L * n, r)
+    cone = Cone(scene, x + L * n, x + (L + l) * n, R)
 
     # Extract model
     return scene.to_model()
 
+
 def UnitTetrahedron(color=(0.0, 1.0, 0.0, 0.5)):
     "Return model for unit tetrahedron."
 
@@ -347,6 +358,7 @@ def UnitTetrahedron(color=(0.0, 1.0, 0.0, 0.5)):
 
     return model
 
+
 def UnitTriangle(color=(0.0, 1.0, 0.0, 0.5)):
     "Return model for unit tetrahedron."
 
@@ -376,6 +388,7 @@ def UnitTriangle(color=(0.0, 1.0, 0.0, 0.5)):
 
     return model
 
+
 def PointEvaluation(x):
     "Return model for point evaluation at given point."
 
@@ -405,6 +418,7 @@ def PointEvaluation(x):
 
     return model
 
+
 def PointDerivative(x):
     "Return model for evaluation of derivatives at given point."
 
@@ -434,6 +448,7 @@ def PointDerivative(x):
 
     return model
 
+
 def PointSecondDerivative(x):
     "Return model for evaluation of second derivatives at given point."
 
@@ -463,6 +478,7 @@ def PointSecondDerivative(x):
 
     return model
 
+
 def DirectionalEvaluation(x, n, flip=False, center=False):
     "Return model for directional evaluation at given point in given direction."
 
@@ -492,6 +508,7 @@ def DirectionalEvaluation(x, n, flip=False, center=False):
 
     return model
 
+
 def DirectionalDerivative(x, n):
     "Return model for directional derivative at given point in given direction."
 
@@ -509,13 +526,14 @@ def DirectionalDerivative(x, n):
     n = 0.75 * n / norm(n)
 
     # Create line
-    line = Cylinder(scene, x - 0.07*n, x + 0.07*n, 0.005)
+    line = Cylinder(scene, x - 0.07 * n, x + 0.07 * n, 0.005)
 
     # Extract model
     model = scene.to_model()
 
     return model
 
+
 def IntegralMoment(cellname, num_moments, x=None):
     "Return model for integral moment for given element."
 
@@ -523,10 +541,10 @@ def IntegralMoment(cellname, num_moments, x=None):
 
     # Set position
     if x is None and cellname == "triangle":
-        a = 1.0 / (2 + sqrt(2)) # this was a fun exercise
+        a = 1.0 / (2 + sqrt(2))  # this was a fun exercise
         x = (a, a, 0.0)
     elif x is None:
-        a = 1.0 / (3 + sqrt(3)) # so was this
+        a = 1.0 / (3 + sqrt(3))  # so was this
         x = (a, a, a)
 
     # Make sure point is 3D
@@ -560,12 +578,13 @@ def IntegralMoment(cellname, num_moments, x=None):
 
     return model
 
+
 def create_cell_model(element):
     "Create Soya3D model for cell."
 
     # Get color
     family = element.family()
-    if not family in element_colors:
+    if family not in element_colors:
         warning("Don't know a good color for elements of type '%s', using default color." % family)
         family = "Lagrange"
     color = element_colors[family]
@@ -580,24 +599,25 @@ def create_cell_model(element):
 
     error("Unable to plot element, unhandled cell type: %s" % str(cellname))
 
+
 def create_dof_models(element):
     "Create Soya3D models for dofs."
 
     # Flags for whether to flip and center arrows
-    directional = {"PointScaledNormalEval": (True,  False),
-                   "PointEdgeTangent":      (False, True),
-                   "PointFaceTangent":      (False, True)}
+    directional = {"PointScaledNormalEval": (True, False),
+                   "PointEdgeTangent": (False, True),
+                   "PointFaceTangent": (False, True)}
 
     # Elements not supported fully by FIAT
-    unsupported = {"Argyris":            argyris_dofs,
-                   "Arnold-Winther":     arnold_winther_dofs,
-                   "Hermite":            hermite_dofs,
+    unsupported = {"Argyris": argyris_dofs,
+                   "Arnold-Winther": arnold_winther_dofs,
+                   "Hermite": hermite_dofs,
                    "Mardal-Tai-Winther": mardal_tai_winther_dofs,
-                   "Morley":             morley_dofs}
+                   "Morley": morley_dofs}
 
     # Check if element is supported
     family = element.family()
-    if not family in unsupported:
+    if family not in unsupported:
         # Create FIAT element and get dofs
         fiat_element = create_element(element)
         dofs = [(dof.get_type_tag(), dof.get_point_dict()) for dof in fiat_element.dual_basis()]
@@ -684,6 +704,7 @@ def create_dof_models(element):
 
     return models, num_moments
 
+
 def create_notation_models():
     "Create Soya 3D models for notation."
 
@@ -723,49 +744,53 @@ def create_notation_models():
 
     return models
 
+
 def pointing_outwards(x, n):
     "Check if n is pointing inwards, used for flipping dofs."
     eps = 1e-10
-    x = array(x) + 0.1*array(n)
+    x = array(x) + 0.1 * array(n)
     return x[0] < -eps or x[1] < -eps or x[2] < -eps or x[2] > 1.0 - x[0] - x[1] + eps
 
+
 def to3d(x):
     "Make sure point is 3D."
     if len(x) == 2:
         x = (x[0], x[1], 0.0)
     return x
 
+
 def arnold_winther_dofs(element):
     "Special fix for Arnold-Winther elements until Rob fixes in FIAT."
 
     if not element.cell().cellname() == "triangle":
         error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.")
 
-    return [("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointScaledNormalEval", {(1.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(2.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(3.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(4.0/5, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(4.0/5, 1.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(3.0/5, 2.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(2.0/5, 3.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(1.0/5, 4.0/5.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   1.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   2.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   3.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   4.0/5.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
+    return [("PointEval", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointEval", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointScaledNormalEval", {(1.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(2.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(3.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(4.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(4.0 / 5, 1.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(3.0 / 5, 2.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(2.0 / 5, 3.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(1.0 / 5, 4.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 1.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 2.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 3.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 4.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
             ("IntegralMoment", None),
             ("IntegralMoment", None),
             ("IntegralMoment", None)]
 
+
 def argyris_dofs(element):
     "Special fix for Hermite elements until Rob fixes in FIAT."
 
@@ -775,83 +800,86 @@ def argyris_dofs(element):
     if not element.cell().cellname() == "triangle":
         error("Unable to plot element, only know how to plot Argyris on triangles.")
 
-    return [("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}),
-            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}),
-            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}),
-            ("PointDeriv",       {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointDeriv",       {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointDeriv",       {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointDeriv",       {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointDeriv",       {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointDeriv",       {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
-            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-            ("PointNormalDeriv", {(0.5, 0.0): [ (0.0, (0,)), (-1.0,  (1,))]}),
-            ("PointNormalDeriv", {(0.5, 0.5): [ (1.0, (0,)), ( 1.0,  (1,))]}),
-            ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), ( 0.0,  (1,))]})]
+    return [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
+            ("PointEval", {(1.0, 0.0): [(1.0, ())]}),
+            ("PointEval", {(0.0, 1.0): [(1.0, ())]}),
+            ("PointDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof twice
+            ("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+            ("PointNormalDeriv", {(0.5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointNormalDeriv", {(0.5, 0.5): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), (0.0, (1,))]})]
+
 
 def hermite_dofs(element):
     "Special fix for Hermite elements until Rob fixes in FIAT."
 
-    dofs_2d = [("PointEval",  {(0.0, 0.0): [ (1.0, ()) ]}),
-               ("PointEval",  {(1.0, 0.0): [ (1.0, ()) ]}),
-               ("PointEval",  {(0.0, 1.0): [ (1.0, ()) ]}),
-               ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice
-               ("PointEval",  {(1.0/3, 1.0/3): [ (1.0, ()) ]})]
-
-    dofs_3d = [("PointEval",  {(0.0, 0.0, 0.0): [ (1.0, ()) ]}),
-               ("PointEval",  {(1.0, 0.0, 0.0): [ (1.0, ()) ]}),
-               ("PointEval",  {(0.0, 1.0, 0.0): [ (1.0, ()) ]}),
-               ("PointEval",  {(0.0, 0.0, 1.0): [ (1.0, ()) ]}),
-               ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times
-               ("PointEval",  {(1.0/3, 1.0/3, 1.0/3): [ (1.0, ()) ]}),
-               ("PointEval",  {(0.0,   1.0/3, 1.0/3): [ (1.0, ()) ]}),
-               ("PointEval",  {(1.0/3, 0.0,   1.0/3): [ (1.0, ()) ]}),
-               ("PointEval",  {(1.0/3, 1.0/3, 0.0):   [ (1.0, ()) ]})]
+    dofs_2d = [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
+               ("PointEval", {(1.0, 0.0): [(1.0, ())]}),
+               ("PointEval", {(0.0, 1.0): [(1.0, ())]}),
+               ("PointDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointDeriv", {(0.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointDeriv", {(1.0, 0.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointDeriv", {(0.0, 1.0): [(1.0, ())]}),  # hack, same dof twice
+               ("PointEval", {(1.0 / 3, 1.0 / 3): [(1.0, ())]})]
+
+    dofs_3d = [("PointEval", {(0.0, 0.0, 0.0): [(1.0, ())]}),
+               ("PointEval", {(1.0, 0.0, 0.0): [(1.0, ())]}),
+               ("PointEval", {(0.0, 1.0, 0.0): [(1.0, ())]}),
+               ("PointEval", {(0.0, 0.0, 1.0): [(1.0, ())]}),
+               ("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}),  # hack, same dof three times
+               ("PointEval", {(1.0 / 3, 1.0 / 3, 1.0 / 3): [(1.0, ())]}),
+               ("PointEval", {(0.0, 1.0 / 3, 1.0 / 3): [(1.0, ())]}),
+               ("PointEval", {(1.0 / 3, 0.0, 1.0 / 3): [(1.0, ())]}),
+               ("PointEval", {(1.0 / 3, 1.0 / 3, 0.0): [(1.0, ())]})]
 
     if element.cell().cellname() == "triangle":
         return dofs_2d
     else:
         return dofs_3d
 
+
 def mardal_tai_winther_dofs(element):
     "Special fix for Mardal-Tai-Winther elements until Rob fixes in FIAT."
 
     if not element.cell().cellname() == "triangle":
         error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.")
 
-    return [("PointScaledNormalEval", {(1.0/3, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(2.0/3, 0.0):     [  (0.0, (0,)), (-1.0, (1,))]}),
-            ("PointScaledNormalEval", {(2.0/3, 1.0/3.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(1.0/3, 2.0/3.0): [  (1.0, (0,)),  (1.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   1.0/3.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointScaledNormalEval", {(0.0,   2.0/3.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointEdgeTangent", {(0.5, 0.0): [ (-1.0, (0,)),  (0.0, (1,))]}),
-            ("PointEdgeTangent", {(0.5, 0.5): [ (-1.0, (0,)),  (1.0, (1,))]}),
-            ("PointEdgeTangent", {(0.0, 0.5): [  (0.0, (0,)), (-1.0, (1,))]})]
+    return [("PointScaledNormalEval", {(1.0 / 3, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(2.0 / 3, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointScaledNormalEval", {(2.0 / 3, 1.0 / 3.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(1.0 / 3, 2.0 / 3.0): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 1.0 / 3.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointScaledNormalEval", {(0.0, 2.0 / 3.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointEdgeTangent", {(0.5, 0.0): [(-1.0, (0,)), (0.0, (1,))]}),
+            ("PointEdgeTangent", {(0.5, 0.5): [(-1.0, (0,)), (1.0, (1,))]}),
+            ("PointEdgeTangent", {(0.0, 0.5): [(0.0, (0,)), (-1.0, (1,))]})]
+
 
 def morley_dofs(element):
     "Special fix for Morley elements until Rob fixes in FIAT."
@@ -859,9 +887,9 @@ def morley_dofs(element):
     if not element.cell().cellname() == "triangle":
         error("Unable to plot element, only know how to plot Morley on triangles.")
 
-    return [("PointEval",        {(0.0, 0.0): [ (1.0, ()) ]}),
-            ("PointEval",        {(1.0, 0.0): [ (1.0, ()) ]}),
-            ("PointEval",        {(0.0, 1.0): [ (1.0, ()) ]}),
-            ("PointNormalDeriv", {(0.5, 0.0): [ (0.0, (0,)), (-1.0,  (1,))]}),
-            ("PointNormalDeriv", {(0.5, 0.5): [ (1.0, (0,)), ( 1.0,  (1,))]}),
-            ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), ( 0.0,  (1,))]})]
+    return [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
+            ("PointEval", {(1.0, 0.0): [(1.0, ())]}),
+            ("PointEval", {(0.0, 1.0): [(1.0, ())]}),
+            ("PointNormalDeriv", {(0.5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
+            ("PointNormalDeriv", {(0.5, 0.5): [(1.0, (0,)), (1.0, (1,))]}),
+            ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), (0.0, (1,))]})]
diff --git a/ffc/quadrature/__init__.py b/ffc/quadrature/__init__.py
index dfb9f86..0757341 100644
--- a/ffc/quadrature/__init__.py
+++ b/ffc/quadrature/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 from .quadraturerepresentation import compute_integral_ir
 from .quadratureoptimization import optimize_integral_ir
 from .quadraturegenerator import generate_integral_code
diff --git a/ffc/quadrature/expr.py b/ffc/quadrature/expr.py
index 3b45946..b7bded3 100644
--- a/ffc/quadrature/expr.py
+++ b/ffc/quadrature/expr.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a base class to represent an expression."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -23,8 +24,10 @@
 # FFC quadrature modules.
 from .symbolics import create_float
 
+
 class Expr(object):
     __slots__ = ("val", "t", "_prec", "_repr", "_hash")
+
     def __init__(self):
         """An Expr object contains:
 
@@ -62,7 +65,8 @@ class Expr(object):
         return True
 
     def __lt__(self, other):
-        """<, compare precedence and _repr if two objects have the same precedence."""
+        """<, compare precedence and _repr if two objects have the same
+precedence."""
         if not isinstance(other, Expr):
             return False
         if self._prec < other._prec:
@@ -81,7 +85,8 @@ class Expr(object):
             return self._repr > other._repr
         return False
 
-    # Public functions (for FloatValue, other classes should overload as needed)
+    # Public functions (for FloatValue, other classes should overload
+    # as needed)
     def expand(self):
         """Expand the expression.
         (FloatValue and Symbol are expanded by construction)."""
@@ -94,13 +99,15 @@ class Expr(object):
         return set()
 
     def get_var_occurrences(self):
-        """Determine the number of times all variables occurs in the expression.
-        Returns a dictionary of variables and the number of times they occur.
-        Works for FloatValue and Symbol."""
+        """Determine the number of times all variables occurs in the
+        expression.  Returns a dictionary of variables and the number
+        of times they occur.  Works for FloatValue and Symbol.
+
+        """
         # There is only one float value (if it is not -1 or 1).
         if self.val == 1.0 or self.val == -1.0:
             return {}
-        return {self:1}
+        return {self: 1}
 
     def ops(self):
         """Return number of operations to compute the expression.
@@ -110,24 +117,29 @@ class Expr(object):
         return 0
 
     def reduce_ops(self):
-        """Reduce number of operations to evaluate the expression.
-        There is nothing to be done for FloatValue and Symbol."""
+        """Reduce number of operations to evaluate the expression.  There is
+        nothing to be done for FloatValue and Symbol.
+
+        """
         # Nothing to be done.
         return self
 
     def reduce_var(self, var):
-        """Reduce the expression by another variable by using division.
-        This works for FloatValue, Symbol and Product."""
+        """Reduce the expression by another variable by using division.  This
+        works for FloatValue, Symbol and Product.
+
+        """
         return self/var
 
     def reduce_vartype(self, var_type):
-        """Reduce expression with given var_type. It returns a tuple
-        (found, remain), where 'found' is an expression that only has variables
-        of type == var_type. If no variables are found, found=(). The 'remain'
-        part contains the leftover after division by 'found' such that:
-        self = found*remain.
-        Works for FloatValue and Symbol."""
+        """Reduce expression with given var_type. It returns a tuple (found,
+        remain), where 'found' is an expression that only has
+        variables of type == var_type. If no variables are found,
+        found=(). The 'remain' part contains the leftover after
+        division by 'found' such that: self = found*remain.  Works for
+        FloatValue and Symbol.
+
+        """
         if self.t == var_type:
             return [(self, create_float(1))]
         return [((), self)]
-
diff --git a/ffc/quadrature/floatvalue.py b/ffc/quadrature/floatvalue.py
index 6f8a328..5cbe796 100644
--- a/ffc/quadrature/floatvalue.py
+++ b/ffc/quadrature/floatvalue.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a class to represent a float."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -26,19 +27,22 @@ from ffc.cpp import format
 
 # FFC quadrature modules.
 from .symbolics import CONST
-#from symbolics import format
 from .symbolics import create_float
 from .symbolics import create_product
 from .symbolics import create_sum
 from .symbolics import create_fraction
 from .expr import Expr
 
+
 class FloatValue(Expr):
+
     def __init__(self, value):
         """Initialise a FloatValue object, it derives from Expr and contains
         no additional variables.
 
-        NOTE: self._prec = 0."""
+        NOTE: self._prec = 0.
+
+        """
 
         # Initialise value, type and class.
         self.val = float(value)
@@ -47,16 +51,16 @@ class FloatValue(Expr):
 
         # Handle 0.0, 1.0 and -1.0 values explicitly.
         EPS = format["epsilon"]
-        if abs(value) <  EPS:
+        if abs(value) < EPS:
             self.val = 0.0
-        elif abs(value - 1.0) <  EPS:
+        elif abs(value - 1.0) < EPS:
             self.val = 1.0
-        elif abs(value + 1.0) <  EPS:
+        elif abs(value + 1.0) < EPS:
             self.val = -1.0
 
-        # Compute the representation now, such that we can use it directly
-        # in the __eq__ and __ne__ methods (improves performance a bit, but
-        # only when objects are cached).
+        # Compute the representation now, such that we can use it
+        # directly in the __eq__ and __ne__ methods (improves
+        # performance a bit, but only when objects are cached).
         self._repr = "FloatValue(%s)" % format["float"](self.val)
 
         # Use repr as hash value
@@ -72,8 +76,8 @@ class FloatValue(Expr):
         "Addition by other objects."
         # NOTE: We expect expanded objects here.
         # This is only well-defined if other is a float or if self.val == 0.
-        if other._prec == 0: # float
-            return create_float(self.val+other.val)
+        if other._prec == 0:  # float
+            return create_float(self.val + other.val)
         elif self.val == 0.0:
             return other
         # Return a new sum
@@ -82,8 +86,8 @@ class FloatValue(Expr):
     def __sub__(self, other):
         "Subtract other objects."
         # NOTE: We expect expanded objects here.
-        if other._prec == 0: # float
-            return create_float(self.val-other.val)
+        if other._prec == 0:  # float
+            return create_float(self.val - other.val)
         # Multiply other by -1
         elif self.val == 0.0:
             return create_product([create_float(-1), other])
@@ -92,12 +96,13 @@ class FloatValue(Expr):
 
     def __mul__(self, other):
         "Multiplication by other objects."
-        # NOTE: We expect expanded objects here i.e., Product([FloatValue])
+        # NOTE: We expect expanded objects here i.e.,
+        # Product([FloatValue])
         # should not be present.
         # Only handle case where other is a float, else let the other
         # object handle the multiplication.
-        if other._prec == 0: # float
-            return create_float(self.val*other.val)
+        if other._prec == 0:  # float
+            return create_float(self.val * other.val)
         return other.__mul__(self)
 
     def __truediv__(self, other):
@@ -106,56 +111,54 @@ class FloatValue(Expr):
         if other.val == 0.0:
             error("Division by zero")
 
-        # TODO: Should we also support division by fraction for generality?
+        # TODO: Should we also support division by fraction for
+        # generality?
         # It should not be needed by this module.
-        if other._prec == 4: # frac
+        if other._prec == 4:  # frac
             error("Did not expected to divide by fraction")
 
         # If fraction will be zero.
         if self.val == 0.0:
             return self
 
-        # NOTE: We expect expanded objects here i.e., Product([FloatValue])
+        # NOTE: We expect expanded objects here i.e.,
+        # Product([FloatValue])
         # should not be present.
         # Handle types appropriately.
-        if other._prec == 0: # float
-            return create_float(self.val/other.val)
+        if other._prec == 0:  # float
+            return create_float(self.val / other.val)
         # If other is a symbol, return a simple fraction.
-        elif other._prec == 1: # sym
+        elif other._prec == 1:  # sym
             return create_fraction(self, other)
         # Don't handle division by sum.
-        elif other._prec == 3: # sum
+        elif other._prec == 3:  # sum
             # TODO: Here we could do: 4 / (2*x + 4*y) -> 2/(x + 2*y).
             return create_fraction(self, other)
 
-        # If other is a product, remove any float value to avoid
-        # 4 / (2*x), this will return 2/x.
+        # If other is a product, remove any float value to avoid 4 /
+        # (2*x), this will return 2/x.
         val = 1.0
         for v in other.vrs:
-            if v._prec == 0: # float
+            if v._prec == 0:  # float
                 val *= v.val
 
-        # If we had any floats, create new numerator and only use 'real' variables
-        # from the product in the denominator.
+        # If we had any floats, create new numerator and only use
+        # 'real' variables from the product in the denominator.
         if val != 1.0:
             # Check if we need to create a new denominator.
             # TODO: Just use other.vrs[1:] instead.
             if len(other.get_vrs()) > 1:
-                return create_fraction(create_float(self.val/val), create_product(other.get_vrs()))
-            # TODO: Because we expect all products to be expanded we shouldn't need
-            # to check for this case, just use other.vrs[1].
+                return create_fraction(create_float(self.val / val),
+                                       create_product(other.get_vrs()))
+            # TODO: Because we expect all products to be expanded we
+            # shouldn't need to check for this case, just use
+            # other.vrs[1].
             elif len(other.get_vrs()) == 1:
-                return create_fraction(create_float(self.val/val), other.vrs[1])
+                return create_fraction(create_float(self.val / val),
+                                       other.vrs[1])
             error("No variables left in denominator")
 
         # Nothing left to do.
         return create_fraction(self, other)
-        
-    __div__ = __truediv__
-
-# FFC quadrature modules.
-from .symbol     import Symbol
-from .product    import Product
-from .sumobj    import Sum
-from .fraction   import Fraction
 
+    __div__ = __truediv__
diff --git a/ffc/quadrature/fraction.py b/ffc/quadrature/fraction.py
index 5cabcfb..e31fb46 100644
--- a/ffc/quadrature/fraction.py
+++ b/ffc/quadrature/fraction.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a class to represent a fraction."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -31,8 +32,13 @@ from .symbolics import create_sum
 from .symbolics import create_fraction
 from .expr import Expr
 
+# FFC quadrature modules.
+from .floatvalue import FloatValue
+
+
 class Fraction(Expr):
     __slots__ = ("num", "denom", "_expanded", "_reduced")
+
     def __init__(self, numerator, denominator):
         """Initialise a Fraction object, it derives from Expr and contains
         the additional variables:
@@ -59,31 +65,32 @@ class Fraction(Expr):
         self._reduced = False
 
         # Only try to eliminate scalar values.
-        # TODO: If we divide by a float, we could add the inverse to the
-        # numerator as a product, but I don't know if this is efficient
-        # since it will involve creating a new object.
-        if denominator._prec == 0 and numerator._prec == 0: # float
-            self.num = create_float(numerator.val/denominator.val)
-            # Remove denominator, such that it will be excluded when printing.
+        # TODO: If we divide by a float, we could add the inverse to
+        # the numerator as a product, but I don't know if this is
+        # efficient since it will involve creating a new object.
+        if denominator._prec == 0 and numerator._prec == 0:  # float
+            self.num = create_float(numerator.val / denominator.val)
+            # Remove denominator, such that it will be excluded when
+            # printing.
             self.denom = None
 
         # Handle zero.
         if self.val == 0.0:
-            # Remove denominator, such that it will be excluded when printing
+            # Remove denominator, such that it will be excluded when
+            # printing
             self.denom = None
 
-        # Compute the representation now, such that we can use it directly
-        # in the __eq__ and __ne__ methods (improves performance a bit, but
-        # only when objects are cached).
+        # Compute the representation now, such that we can use it
+        # directly in the __eq__ and __ne__ methods (improves
+        # performance a bit, but only when objects are cached).
         if self.denom:
-            self._repr = "Fraction(%s, %s)" %(self.num._repr, self.denom._repr)
+            self._repr = "Fraction(%s, %s)" % (self.num._repr, self.denom._repr)
         else:
-            self._repr = "Fraction(%s, %s)" %(self.num._repr, create_float(1)._repr)
+            self._repr = "Fraction(%s, %s)" % (self.num._repr, create_float(1)._repr)
 
         # Use repr as hash value.
         self._hash = hash(self._repr)
 
-
     # Print functions.
     def __str__(self):
         "Simple string representation which will appear in the generated code."
@@ -94,15 +101,17 @@ class Fraction(Expr):
         num = str(self.num)
         denom = str(self.denom)
 
-        # Group numerator if it is a fraction, otherwise it should be handled already.
-        if self.num._prec == 4: # frac
+        # Group numerator if it is a fraction, otherwise it should be
+        # handled already.
+        if self.num._prec == 4:  # frac
             num = format["grouping"](num)
 
-        # Group denominator if it is a fraction or product, or if the value is negative.
-        # NOTE: This will be removed by the optimisations later before writing any code.
-        if self.denom._prec in (2, 4) or self.denom.val < 0.0: # prod or frac
+        # Group denominator if it is a fraction or product, or if the
+        # value is negative.
+        # NOTE: This will be removed by the optimisations later before
+        # writing any code.
+        if self.denom._prec in (2, 4) or self.denom.val < 0.0:  # prod or frac
             denom = format["grouping"](denom)
-#        return num + format["division"] + denom
         return format["div"](num, denom)
 
     # Binary operators.
@@ -110,19 +119,20 @@ class Fraction(Expr):
         "Addition by other objects."
         # Add two fractions if their denominators are equal by creating
         # (expanded) sum of their numerators.
-        if other._prec == 4 and self.denom == other.denom: # frac
-            return create_fraction(create_sum([self.num, other.num]).expand(), self.denom)
+        if other._prec == 4 and self.denom == other.denom:  # frac
+            return create_fraction(create_sum([self.num, other.num]).expand(),
+                                   self.denom)
         return create_sum([self, other])
 
     def __sub__(self, other):
         "Subtract other objects."
         # Return a new sum
-        if other._prec == 4 and self.denom == other.denom: # frac
-            num = create_sum([self.num, create_product([FloatValue(-1), other.num])]).expand()
+        if other._prec == 4 and self.denom == other.denom:  # frac
+            num = create_sum([self.num, create_product([FloatValue(-1),
+                                                        other.num])]).expand()
             return create_fraction(num, self.denom)
         return create_sum([self, create_product([FloatValue(-1), other])])
 
-
     def __mul__(self, other):
         "Multiplication by other objects."
         # NOTE: assuming that we get expanded variables.
@@ -130,11 +140,11 @@ class Fraction(Expr):
         if self.val == 0.0 or other.val == 0.0:
             return create_float(0)
         # Create new expanded numerator and denominator and use '/' to reduce.
-        if other._prec != 4: # frac
-            return (self.num*other)/self.denom
+        if other._prec != 4:  # frac
+            return (self.num * other) / self.denom
         # If we have a fraction, create new numerator and denominator and use
         # '/' to reduce expression.
-        return create_product([self.num, other.num]).expand()/create_product([self.denom, other.denom]).expand()
+        return create_product([self.num, other.num]).expand() / create_product([self.denom, other.denom]).expand()
 
     def __truediv__(self, other):
         "Division by other objects."
@@ -158,7 +168,7 @@ class Fraction(Expr):
         # if a Fraction is the return value, then multiply the denominator of
         # that value by denominator of self. Otherwise the reduction was
         # successful and we just use the denom of self as denominator.
-        return self.num/(other*self.denom)
+        return self.num / (other * self.denom)
 
     __div__ = __truediv__
 
@@ -182,23 +192,23 @@ class Fraction(Expr):
         # If both the numerator and denominator are fractions, create new
         # numerator and denominator and use division to possibly reduce the
         # expression.
-        if num._prec == 4 and denom._prec == 4: # frac
+        if num._prec == 4 and denom._prec == 4:  # frac
             new_num = create_product([num.num, denom.denom]).expand()
             new_denom = create_product([num.denom, denom.num]).expand()
-            self._expanded = new_num/new_denom
+            self._expanded = new_num / new_denom
         # If the numerator is a fraction, multiply denominators and use
         # division to reduce expression.
-        elif num._prec == 4: # frac
+        elif num._prec == 4:  # frac
             new_denom = create_product([num.denom, denom]).expand()
-            self._expanded = num.num/new_denom
+            self._expanded = num.num / new_denom
         # If the denominator is a fraction multiply by the inverse and
         # use division to reduce expression.
-        elif denom._prec == 4: # frac
+        elif denom._prec == 4:  # frac
             new_num = create_product([num, denom.denom]).expand()
-            self._expanded = new_num/denom.num
+            self._expanded = new_num / denom.num
         # Use division to reduce the expression, no need to call expand().
         else:
-            self._expanded = num/denom
+            self._expanded = num / denom
         return self._expanded
 
     def get_unique_vars(self, var_type):
@@ -209,8 +219,11 @@ class Fraction(Expr):
         return var
 
     def get_var_occurrences(self):
-        """Determine the number of minimum number of times all variables occurs
-        in the expression simply by calling the function on the numerator."""
+        """Determine the number of minimum number of times all variables
+        occurs in the expression simply by calling the function on the
+        numerator.
+
+        """
         return self.num.get_var_occurrences()
 
     def ops(self):
@@ -218,14 +231,17 @@ class Fraction(Expr):
         # If we have a denominator, add the operations and +1 for '/'.
         if self.denom:
             return self.num.ops() + self.denom.ops() + 1
-        # Else we just return the number of operations for the numerator.
+        # Else we just return the number of operations for the
+        # numerator.
         return self.num.ops()
 
     def reduce_ops(self):
-        # Try to reduce operations by reducing the numerator and denominator.
-        # FIXME: We assume expanded variables here, so any common variables in
-        # the numerator and denominator are already removed i.e, there is no
-        # risk of encountering (x + x*y) / x -> x*(1 + y)/x -> (1 + y).
+        # Try to reduce operations by reducing the numerator and
+        # denominator.
+        # FIXME: We assume expanded variables here, so any common
+        # variables in the numerator and denominator are already
+        # removed i.e, there is no risk of encountering (x + x*y) / x
+        # -> x*(1 + y)/x -> (1 + y).
         if self._reduced:
             return self._reduced
         num = self.num.reduce_ops()
@@ -240,86 +256,78 @@ class Fraction(Expr):
         "Reduce the fraction by another variable through division of numerator."
         # We assume that this function is only called by reduce_ops, such that
         # we just need to consider the numerator.
-        return create_fraction(self.num/var, self.denom)
+        return create_fraction(self.num / var, self.denom)
 
     def reduce_vartype(self, var_type):
-        """Reduce expression with given var_type. It returns a tuple
-        (found, remain), where 'found' is an expression that only has variables
-        of type == var_type. If no variables are found, found=(). The 'remain'
-        part contains the leftover after division by 'found' such that:
-        self = found*remain."""
+        """Reduce expression with given var_type. It returns a tuple (found,
+        remain), where 'found' is an expression that only has
+        variables of type == var_type. If no variables are found,
+        found=(). The 'remain' part contains the leftover after
+        division by 'found' such that: self = found*remain.
+
+        """
 
         # Reduce the numerator by the var type.
-#        print "self.num._prec: ", self.num._prec
-#        print "self.num: ", self.num
         if self.num._prec == 3:
             foo = self.num.reduce_vartype(var_type)
             if len(foo) == 1:
                 num_found, num_remain = foo[0]
-#                num_found, num_remain = self.num.reduce_vartype(var_type)[0]
             else:
-                # meg: I have only a marginal idea of what I'm doing here!
-#                print "here: "
+                # meg: I have only a marginal idea of what I'm doing
+                # here!
                 new_sum = []
                 for num_found, num_remain in foo:
                     if num_found == ():
                         new_sum.append(create_fraction(num_remain, self.denom))
                     else:
-                        new_sum.append(create_fraction(create_product([num_found, num_remain]), self.denom))
+                        new_sum.append(create_fraction(create_product([num_found, num_remain]),
+                                                       self.denom))
                 return create_sum(new_sum).expand().reduce_vartype(var_type)
         else:
-#            num_found, num_remain = self.num.reduce_vartype(var_type)
             foo = self.num.reduce_vartype(var_type)
             if len(foo) != 1:
                 raise RuntimeError("This case is not handled")
             num_found, num_remain = foo[0]
 
-#        # TODO: Remove this test later, expansion should have taken care of
-#        # no denominator.
-#        if not self.denom:
-#            error("This fraction should have been expanded.")
-
         # If the denominator is not a Sum things are straightforward.
         denom_found = None
         denom_remain = None
-#        print "self.denom: ", self.denom
-#        print "self.denom._prec: ", self.denom._prec
-        if self.denom._prec != 3: # sum
-#            denom_found, denom_remain = self.denom.reduce_vartype(var_type)
+        if self.denom._prec != 3:  # sum
             foo = self.denom.reduce_vartype(var_type)
             if len(foo) != 1:
                 raise RuntimeError("This case is not handled")
             denom_found, denom_remain = foo[0]
 
-        # If we have a Sum in the denominator, all terms must be reduced by
-        # the same terms to make sense
+        # If we have a Sum in the denominator, all terms must be
+        # reduced by the same terms to make sense
         else:
             remain = []
             for m in self.denom.vrs:
-#                d_found, d_remain = m.reduce_vartype(var_type)
                 foo = m.reduce_vartype(var_type)
                 d_found, d_remain = foo[0]
-                # If we've found a denom, but the new found is different from
-                # the one already found, terminate loop since it wouldn't make
-                # sense to reduce the fraction.
-                # TODO: handle I0/((I0 + I1)/(G0 + G1) + (I1 + I2)/(G1 + G2))
+                # If we've found a denom, but the new found is
+                # different from the one already found, terminate loop
+                # since it wouldn't make sense to reduce the fraction.
+                # TODO: handle I0/((I0 + I1)/(G0 + G1) + (I1 + I2)/(G1
+                # + G2))
                 # better than just skipping.
-#                if len(foo) != 1:
-#                    raise RuntimeError("This case is not handled")
                 if len(foo) != 1 or (denom_found is not None and repr(d_found) != repr(denom_found)):
-                    # If the denominator of the entire sum has a type which is
-                    # lower than or equal to the vartype that we are currently
-                    # reducing for, we have to move it outside the expression
-                    # as well.
-                    # TODO: This is quite application specific, but I don't see
-                    # how we can do it differently at the moment.
+                    # If the denominator of the entire sum has a type
+                    # which is lower than or equal to the vartype that
+                    # we are currently reducing for, we have to move
+                    # it outside the expression as well.
+                    # TODO: This is quite application specific, but I
+                    # don't see how we can do it differently at the
+                    # moment.
                     if self.denom.t <= var_type:
                         if not num_found:
                             num_found = create_float(1)
-                        return [(create_fraction(num_found, self.denom), num_remain)]
+                        return [(create_fraction(num_found, self.denom),
+                                 num_remain)]
                     else:
                         # The remainder is always a fraction
-                        return [(num_found, create_fraction(num_remain, self.denom))]
+                        return [(num_found, create_fraction(num_remain,
+                                                            self.denom))]
 
                 # Update denom found and add remainder.
                 denom_found = d_found
@@ -347,13 +355,4 @@ class Fraction(Expr):
                 found = create_fraction(create_float(1), denom_found)
             else:
                 found = ()
-#        print "found: ", found
-#        print len((found, remain))
         return [(found, remain)]
-
-# FFC quadrature modules.
-from .floatvalue import FloatValue
-from .symbol     import Symbol
-from .product    import Product
-from .sumobj    import Sum
-
diff --git a/ffc/quadrature/optimisedquadraturetransformer.py b/ffc/quadrature/optimisedquadraturetransformer.py
index 10e5246..965017d 100644
--- a/ffc/quadrature/optimisedquadraturetransformer.py
+++ b/ffc/quadrature/optimisedquadraturetransformer.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "QuadratureTransformer (optimised) for quadrature code generation to translate UFL expressions."
 
 # Copyright (C) 2009-2011 Kristian B. Oelgaard
@@ -19,61 +20,59 @@
 #
 # Modified by Anders Logg, 2009
 
-# Python modules.
-from numpy import shape
 
-from six import iteritems, iterkeys
+from six import iterkeys
 from six.moves import xrange as range
 from six import advance_iterator as next
-def firstkey(d):
-    return next(iterkeys(d))
 
 # UFL common.
-from ufl import product
 from ufl.utils.sorting import sorted_by_key
 from ufl import custom_integral_types
 
 # UFL Classes.
-from ufl.classes import FixedIndex
 from ufl.classes import IntValue
 from ufl.classes import FloatValue
 from ufl.classes import Coefficient
 from ufl.classes import Operator
 
 # FFC modules.
-from ffc.log import info, debug, error, ffc_assert
+from ffc.log import error, ffc_assert
 from ffc.cpp import format
 from ffc.quadrature.quadraturetransformerbase import QuadratureTransformerBase
 from ffc.quadrature.quadratureutils import create_permutations
 
 # Symbolics functions
-#from ffc.quadrature.symbolics import set_format
-from ffc.quadrature.symbolics import create_float, create_symbol, create_product,\
-                                     create_sum, create_fraction, BASIS, IP, GEO, CONST
+from ffc.quadrature.symbolics import (create_float, create_symbol,
+                                      create_product, create_sum,
+                                      create_fraction, BASIS, IP, GEO)
+
+
+def firstkey(d):
+    return next(iterkeys(d))
+
 
 class QuadratureTransformerOpt(QuadratureTransformerBase):
+
     "Transform UFL representation to quadrature code."
 
     def __init__(self, *args):
 
         # Initialise base class.
         QuadratureTransformerBase.__init__(self, *args)
-#        set_format(format)
 
     # -------------------------------------------------------------------------
     # Start handling UFL classes.
     # -------------------------------------------------------------------------
+
     # -------------------------------------------------------------------------
     # AlgebraOperators (algebra.py).
     # -------------------------------------------------------------------------
     def sum(self, o, *operands):
-        #print("Visiting Sum: " + repr(o) + "\noperands: " + "\n".join(map(repr, operands)))
-
         code = {}
         # Loop operands that has to be summend.
         for op in operands:
-            # If entries does already exist we can add the code, otherwise just
-            # dump them in the element tensor.
+            # If entries does already exist we can add the code,
+            # otherwise just dump them in the element tensor.
             for key, val in sorted(op.items()):
                 if key in code:
                     code[key].append(val)
@@ -95,14 +94,15 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         return code
 
     def product(self, o, *operands):
-        #print("\n\nVisiting Product:\n" + str(tree_format(o)))
 
         permute = []
         not_permute = []
 
-        # Sort operands in objects that needs permutation and objects that does not.
+        # Sort operands in objects that needs permutation and objects
+        # that does not.
         for op in operands:
-            # If we get an empty dict, something was zero and so is the product.
+            # If we get an empty dict, something was zero and so is
+            # the product.
             if not op:
                 return {}
             if len(op) > 1 or (op and firstkey(op) != ()):
@@ -111,39 +111,38 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                 not_permute.append(op[()])
 
         # Create permutations.
-        # TODO: After all indices have been expanded I don't think that we'll
-        # ever get more than a list of entries and values.
-        #print("\npermute: " + repr(permute))
-        #print("\nnot_permute: " + repr(not_permute))
+        # TODO: After all indices have been expanded I don't think
+        # that we'll ever get more than a list of entries and values.
         permutations = create_permutations(permute)
-        #print("\npermutations: " + repr(permutations))
 
         # Create code.
-        code ={}
+        code = {}
         if permutations:
             for key, val in permutations.items():
                 # Sort key in order to create a unique key.
                 l = sorted(key)
 
-                # TODO: I think this check can be removed for speed since we
-                # just have a list of objects we should never get any conflicts here.
-                ffc_assert(tuple(l) not in code, "This key should not be in the code.")
+                # TODO: I think this check can be removed for speed
+                # since we just have a list of objects we should never
+                # get any conflicts here.
+                ffc_assert(tuple(l) not in code,
+                           "This key should not be in the code.")
 
                 code[tuple(l)] = create_product(val + not_permute)
         else:
-            return {():create_product(not_permute)}
+            return {(): create_product(not_permute)}
         return code
 
     def division(self, o, *operands):
-        #print("\n\nVisiting Division: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
 
-        ffc_assert(len(operands) == 2, "Expected exactly two operands (numerator and denominator): " + repr(operands))
+        ffc_assert(len(operands) == 2,
+                   "Expected exactly two operands (numerator and denominator): " + repr(operands))
 
         # Get the code from the operands.
         numerator_code, denominator_code = operands
 
         # TODO: Are these safety checks needed?
-        ffc_assert(() in denominator_code and len(denominator_code) == 1, \
+        ffc_assert(() in denominator_code and len(denominator_code) == 1,
                    "Only support function type denominator: " + repr(denominator_code))
 
         code = {}
@@ -155,8 +154,6 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         return code
 
     def power(self, o):
-        #print("\n\nVisiting Power: " + repr(o))
-
         # Get base and exponent.
         base, expo = o.ufl_operands
 
@@ -164,170 +161,175 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         base_code = self.visit(base)
 
         # TODO: Are these safety checks needed?
-        ffc_assert(() in base_code and len(base_code) == 1, "Only support function type base: " + repr(base_code))
+        ffc_assert(() in base_code and len(base_code) == 1,
+                   "Only support function type base: " + repr(base_code))
 
         # Get the base code and create power.
         val = base_code[()]
 
         # Handle different exponents
         if isinstance(expo, IntValue):
-            return {(): create_product([val]*expo.value())}
+            return {(): create_product([val] * expo.value())}
         elif isinstance(expo, FloatValue):
             exp = format["floating point"](expo.value())
-            sym = create_symbol(format["std power"](str(val), exp), val.t, val, 1)
+            sym = create_symbol(format["std power"](str(val), exp), val.t,
+                                val, 1)
             return {(): sym}
         elif isinstance(expo, (Coefficient, Operator)):
             exp = self.visit(expo)[()]
-#            print "pow exp: ", exp
-#            print "pow val: ", val
-            sym = create_symbol(format["std power"](str(val), exp), val.t, val, 1)
+            sym = create_symbol(format["std power"](str(val), exp), val.t,
+                                val, 1)
             return {(): sym}
         else:
             error("power does not support this exponent: " + repr(expo))
 
     def abs(self, o, *operands):
-        #print("\n\nVisiting Abs: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
 
         # TODO: Are these safety checks needed?
-        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \
+        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1,
                    "Abs expects one operand of function type: " + repr(operands))
 
         # Take absolute value of operand.
         val = operands[0][()]
-        new_val = create_symbol(format["absolute value"](str(val)), val.t, val, 1)
-        return {():new_val}
+        new_val = create_symbol(format["absolute value"](str(val)), val.t,
+                                val, 1)
+        return {(): new_val}
 
     def min_value(self, o, *operands):
         # Take minimum value of operands.
         val0 = operands[0][()]
         val1 = operands[1][()]
         t = min(val0.t, val1.t)
-        # FIXME: I don't know how to implement this the optimized way. Is this right?
+        # FIXME: I don't know how to implement this the optimized
+        # way. Is this right?
         new_val = create_symbol(format["min value"](str(val0), str(val1)), t)
-        return {():new_val}
+        return {(): new_val}
 
     def max_value(self, o, *operands):
         # Take maximum value of operands.
         val0 = operands[0][()]
         val1 = operands[1][()]
         t = min(val0.t, val1.t)
-        # FIXME: I don't know how to implement this the optimized way. Is this right?
+        # FIXME: I don't know how to implement this the optimized
+        # way. Is this right?
         new_val = create_symbol(format["max value"](str(val0), str(val1)), t)
-        return {():new_val}
+        return {(): new_val}
 
     # -------------------------------------------------------------------------
     # Condition, Conditional (conditional.py).
     # -------------------------------------------------------------------------
     def not_condition(self, o, *operands):
-        # This is a Condition but not a BinaryCondition, and the operand will be another Condition
+        # This is a Condition but not a BinaryCondition, and the
+        # operand will be another Condition
         # Get condition expression and do safety checks.
         # Might be a bit too strict?
         c, = operands
-        ffc_assert(len(c) == 1 and firstkey(c) == (),\
-            "Condition for NotCondition should only be one function: " + repr(c))
-        sym = create_symbol(format["not"](str(c[()])), c[()].t, base_op=c[()].ops()+1)
+        ffc_assert(len(c) == 1 and firstkey(c) == (),
+                   "Condition for NotCondition should only be one function: " + repr(c))
+        sym = create_symbol(format["not"](str(c[()])), c[()].t, base_op=c[()].ops() + 1)
         return {(): sym}
 
     def binary_condition(self, o, *operands):
 
-        # Get LHS and RHS expressions and do safety checks.
-        # Might be a bit too strict?
+        # Get LHS and RHS expressions and do safety checks.  Might be
+        # a bit too strict?
         lhs, rhs = operands
-        ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),\
-            "LHS of Condtion should only be one function: " + repr(lhs))
-        ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),\
-            "RHS of Condtion should only be one function: " + repr(rhs))
+        ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),
+                   "LHS of Condtion should only be one function: " + repr(lhs))
+        ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),
+                   "RHS of Condtion should only be one function: " + repr(rhs))
 
         # Map names from UFL to cpp.py.
-        name_map = {"==":"is equal", "!=":"not equal",\
-                    "<":"less than", ">":"greater than",\
-                    "<=":"less equal", ">=":"greater equal",\
+        name_map = {"==": "is equal", "!=": "not equal",
+                    "<": "less than", ">": "greater than",
+                    "<=": "less equal", ">=": "greater equal",
                     "&&": "and", "||": "or"}
 
         # Get the minimum type
         t = min(lhs[()].t, rhs[()].t)
         ops = lhs[()].ops() + rhs[()].ops() + 1
-        cond = str(lhs[()])+format[name_map[o._name]]+str(rhs[()])
+        cond = str(lhs[()]) + format[name_map[o._name]] + str(rhs[()])
         sym = create_symbol(format["grouping"](cond), t, base_op=ops)
         return {(): sym}
 
     def conditional(self, o, *operands):
         # Get condition and return values; and do safety check.
         cond, true, false = operands
-        ffc_assert(len(cond) == 1 and firstkey(cond) == (),\
-            "Condtion should only be one function: " + repr(cond))
-        ffc_assert(len(true) == 1 and firstkey(true) == (),\
-            "True value of Condtional should only be one function: " + repr(true))
-        ffc_assert(len(false) == 1 and firstkey(false) == (),\
-            "False value of Condtional should only be one function: " + repr(false))
+        ffc_assert(len(cond) == 1 and firstkey(cond) == (),
+                   "Condtion should only be one function: " + repr(cond))
+        ffc_assert(len(true) == 1 and firstkey(true) == (),
+                   "True value of Condtional should only be one function: " + repr(true))
+        ffc_assert(len(false) == 1 and firstkey(false) == (),
+                   "False value of Condtional should only be one function: " + repr(false))
 
         # Get values and test for None
         t_val = true[()]
         f_val = false[()]
 
         # Get the minimum type and number of operations
-        # TODO: conditionals are currently always located inside the ip loop,
-        # therefore the type has to be at least IP (fix bug #1082048). This can
-        # be optimised.
+        # TODO: conditionals are currently always located inside the
+        # ip loop, therefore the type has to be at least IP (fix bug
+        # #1082048). This can be optimised.
         t = min([cond[()].t, t_val.t, f_val.t, IP])
         ops = sum([cond[()].ops(), t_val.ops(), f_val.ops()])
 
         # Create expression for conditional
-        # TODO: Handle this differently to expose the variables which are used
-        # to create the expressions.
-        expr = create_symbol(format["evaluate conditional"](cond[()], t_val, f_val), t)
+        # TODO: Handle this differently to expose the variables which
+        # are used to create the expressions.
+        expr = create_symbol(format["evaluate conditional"](cond[()], t_val,
+                                                            f_val), t)
         num = len(self.conditionals)
         name = create_symbol(format["conditional"](num), t)
-        if not expr in self.conditionals:
+        if expr not in self.conditionals:
             self.conditionals[expr] = (t, ops, num)
         else:
             num = self.conditionals[expr][2]
             name = create_symbol(format["conditional"](num), t)
-        return {():name}
+        return {(): name}
 
     # -------------------------------------------------------------------------
     # FacetNormal, CellVolume, Circumradius, FacetArea (geometry.py).
     # -------------------------------------------------------------------------
-    def cell_coordinate(self, o): # FIXME
+    def cell_coordinate(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_coordinate(self, o): # FIXME
+    def facet_coordinate(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_origin(self, o): # FIXME
+    def cell_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_origin(self, o): # FIXME
+    def facet_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_origin(self, o): # FIXME
+    def cell_facet_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian(self, o): # FIXME
+    def jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian_determinant(self, o): # FIXME
+    def jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian_inverse(self, o): # FIXME
+    def jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian(self, o): # FIXME
+    def facet_jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian_determinant(self, o): # FIXME
+    def facet_jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian_inverse(self, o): # FIXME
+    def facet_jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian(self, o): # FIXME
+    def cell_facet_jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian_determinant(self, o): # FIXME
+    def cell_facet_jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian_inverse(self, o): # FIXME
+    def cell_facet_jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
     def facet_normal(self, o):
@@ -338,49 +340,52 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                    "FacetNormal expects 1 component index: " + repr(components))
 
         # Handle 1D as a special case.
-        # FIXME: KBO: This has to change for mD elements in R^n : m < n
-        if self.gdim == 1: # FIXME: MSA UFL uses shape (1,) now, can we remove the special case here then?
+        # FIXME: KBO: This has to change for mD elements in R^n : m <
+        # n
+        if self.gdim == 1:  # FIXME: MSA UFL uses shape (1,) now, can we remove the special case here then?
             normal_component = format["normal component"](self.restriction, "")
         else:
-            normal_component = format["normal component"](self.restriction, components[0])
+            normal_component = format["normal component"](self.restriction,
+                                                          components[0])
         self.trans_set.add(normal_component)
 
         return {(): create_symbol(normal_component, GEO)}
 
-    def cell_normal(self, o): # FIXME
+    def cell_normal(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
     def cell_volume(self, o):
         # FIXME: KBO: This has to change for higher order elements
-        #detJ = format["det(J)"](self.restriction)
-        #volume = format["absolute value"](detJ)
-        #self.trans_set.add(detJ)
+        # detJ = format["det(J)"](self.restriction)
+        # volume = format["absolute value"](detJ)
+        # self.trans_set.add(detJ)
 
         volume = format["cell volume"](self.restriction)
         self.trans_set.add(volume)
 
-        return {():create_symbol(volume, GEO)}
+        return {(): create_symbol(volume, GEO)}
 
     def circumradius(self, o):
         # FIXME: KBO: This has to change for higher order elements
         circumradius = format["circumradius"](self.restriction)
         self.trans_set.add(circumradius)
 
-        return {():create_symbol(circumradius, GEO)}
+        return {(): create_symbol(circumradius, GEO)}
 
     def facet_area(self, o):
         # FIXME: KBO: This has to change for higher order elements
-        # NOTE: Omitting restriction because the area of a facet is the same
-        # on both sides.
-        # FIXME: Since we use the scale factor, facet area has no meaning
-        # for cell integrals. (Need check in FFC or UFL).
+        # NOTE: Omitting restriction because the area of a facet is
+        # the same on both sides.
+        # FIXME: Since we use the scale factor, facet area has no
+        # meaning for cell integrals. (Need check in FFC or UFL).
         area = format["facet area"]
         self.trans_set.add(area)
 
-        return {():create_symbol(area, GEO)}
+        return {(): create_symbol(area, GEO)}
 
     def min_facet_edge_length(self, o):
-        # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL).
+        # FIXME: this has no meaning for cell integrals. (Need check
+        # in FFC or UFL).
 
         tdim = self.tdim
         if tdim < 3:
@@ -389,10 +394,11 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         edgelen = format["min facet edge length"](self.restriction)
         self.trans_set.add(edgelen)
 
-        return {():create_symbol(edgelen, GEO)}
+        return {(): create_symbol(edgelen, GEO)}
 
     def max_facet_edge_length(self, o):
-        # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL).
+        # FIXME: this has no meaning for cell integrals. (Need check
+        # in FFC or UFL).
 
         tdim = self.tdim
         if tdim < 3:
@@ -401,24 +407,22 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         edgelen = format["max facet edge length"](self.restriction)
         self.trans_set.add(edgelen)
 
-        return {():create_symbol(edgelen, GEO)}
+        return {(): create_symbol(edgelen, GEO)}
 
-    def cell_orientation(self, o): # FIXME
+    def cell_orientation(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def quadrature_weight(self, o): # FIXME
+    def quadrature_weight(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    # -------------------------------------------------------------------------
-
     def create_argument(self, ufl_argument, derivatives, component, local_comp,
-                        local_offset, ffc_element, transformation, multiindices,
-                        tdim, gdim, avg):
+                        local_offset, ffc_element, transformation,
+                        multiindices, tdim, gdim, avg):
         "Create code for basis functions, and update relevant tables of used basis."
 
         # Prefetch formats to speed up code generation.
-        f_transform     = format["transform"]
-        f_detJ          = format["det(J)"]
+        f_transform = format["transform"]
+        f_detJ = format["det(J)"]
 
         # Reset code
         code = {}
@@ -432,17 +436,23 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                     deriv = []
 
                 # Create mapping and basis name.
-                mapping, basis = self._create_mapping_basis(component, deriv, avg, ufl_argument, ffc_element)
-                if not mapping in code:
+                mapping, basis = self._create_mapping_basis(component, deriv,
+                                                            avg, ufl_argument,
+                                                            ffc_element)
+                if mapping not in code:
                     code[mapping] = []
 
                 if basis is not None:
                     # Add transformation if needed.
-                    code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim))
+                    code[mapping].append(self.__apply_transform(basis,
+                                                                derivatives,
+                                                                multi, tdim,
+                                                                gdim))
 
         # Handle non-affine mappings.
         else:
-            ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.")
+            ffc_assert(avg is None,
+                       "Taking average is not supported for non-affine mappings.")
 
             # Loop derivatives and get multi indices.
             for multi in multiindices:
@@ -450,26 +460,94 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                 if not any(deriv):
                     deriv = []
 
-                for c in range(tdim):
-                    # Create mapping and basis name.
-                    mapping, basis = self._create_mapping_basis(c + local_offset, deriv, avg, ufl_argument, ffc_element)
-                    if not mapping in code:
-                        code[mapping] = []
-
-                    if basis is not None:
-                        # Multiply basis by appropriate transform.
-                        if transformation == "covariant piola":
-                            dxdX = create_symbol(f_transform("JINV", c, local_comp, tdim, gdim, self.restriction), GEO)
-                            basis = create_product([dxdX, basis])
-                        elif transformation == "contravariant piola":
-                            detJ = create_fraction(create_float(1), create_symbol(f_detJ(self.restriction), GEO))
-                            dXdx = create_symbol(f_transform("J", local_comp, c, gdim, tdim, self.restriction), GEO)
-                            basis = create_product([detJ, dXdx, basis])
-                        else:
-                            error("Transformation is not supported: " + repr(transformation))
-
+                if transformation in ["covariant piola",
+                                      "contravariant piola"]:
+                    for c in range(tdim):
+                        # Create mapping and basis name.
+                        mapping, basis = self._create_mapping_basis(c + local_offset, deriv, avg, ufl_argument, ffc_element)
+                        if mapping not in code:
+                            code[mapping] = []
+
+                        if basis is not None:
+                            # Multiply basis by appropriate transform.
+                            if transformation == "covariant piola":
+                                dxdX = create_symbol(f_transform("JINV", c,
+                                                                 local_comp, tdim,
+                                                                 gdim,
+                                                                 self.restriction),
+                                                     GEO)
+                                basis = create_product([dxdX, basis])
+                            elif transformation == "contravariant piola":
+                                detJ = create_fraction(create_float(1),
+                                                       create_symbol(f_detJ(self.restriction), GEO))
+                                dXdx = create_symbol(f_transform("J", local_comp,
+                                                                 c, gdim, tdim,
+                                                                 self.restriction),
+                                                     GEO)
+                                basis = create_product([detJ, dXdx, basis])
                         # Add transformation if needed.
-                        code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim))
+                        code[mapping].append(self.__apply_transform(basis,
+                                                                    derivatives,
+                                                                    multi, tdim,
+                                                                    gdim))
+                elif transformation == "double covariant piola":
+                    # g_ij = (Jinv)_ki G_kl (Jinv)lj
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            mapping, basis = self._create_mapping_basis(
+                                k * tdim + l + local_offset,
+                                deriv, avg, ufl_argument, ffc_element)
+                            if mapping not in code:
+                                code[mapping] = []
+                            if basis is not None:
+                                J1 = create_symbol(
+                                    f_transform("JINV", k, i, tdim, gdim,
+                                                self.restriction), GEO)
+                                J2 = create_symbol(
+                                    f_transform("JINV", l, j, tdim, gdim,
+                                                 self.restriction), GEO)
+                                basis = create_product([J1, basis, J2])
+                                # Add transformation if needed.
+                                code[mapping].append(
+                                    self.__apply_transform(
+                                        basis, derivatives, multi,
+                                        tdim, gdim))
+                elif transformation == "double contravariant piola":
+                    # g_ij = (detJ)^(-2) J_ik G_kl J_jl
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            mapping, basis = self._create_mapping_basis(
+                                k * tdim + l + local_offset,
+                                deriv, avg, ufl_argument, ffc_element)
+                            if mapping not in code:
+                                code[mapping] = []
+                            if basis is not None:
+                                J1 = create_symbol(
+                                    f_transform("J", i, k, gdim, tdim,
+                                                self.restriction), GEO)
+                                J2 = create_symbol(
+                                    f_transform("J", j, l, gdim, tdim,
+                                                self.restriction), GEO)
+                                invdetJ = create_fraction(
+                                    create_float(1),
+                                    create_symbol(f_detJ(self.restriction),
+                                                  GEO))
+                                basis = create_product([invdetJ, invdetJ, J1,
+                                                        basis, J2])
+                                # Add transformation if needed.
+                                code[mapping].append(
+                                    self.__apply_transform(
+                                        basis, derivatives, multi,
+                                        tdim, gdim))
+                else:
+                    error("Transformation is not supported: " + repr(transformation))
+
 
         # Add sums and group if necessary.
         for key, val in list(code.items()):
@@ -481,14 +559,15 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         return code
 
     def create_function(self, ufl_function, derivatives, component, local_comp,
-                       local_offset, ffc_element, is_quad_element, transformation, multiindices,
-                       tdim, gdim, avg):
+                        local_offset, ffc_element, is_quad_element,
+                        transformation, multiindices, tdim, gdim, avg):
         "Create code for basis functions, and update relevant tables of used basis."
-        ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.")
+        ffc_assert(ufl_function in self._function_replace_values,
+                   "Expecting ufl_function to have been mapped prior to this call.")
 
         # Prefetch formats to speed up code generation.
-        f_transform     = format["transform"]
-        f_detJ          = format["det(J)"]
+        f_transform = format["transform"]
+        f_detJ = format["det(J)"]
 
         # Reset code
         code = []
@@ -502,14 +581,19 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                     deriv = []
 
                 # Create function name.
-                function_name = self._create_function_name(component, deriv, avg, is_quad_element, ufl_function, ffc_element)
+                function_name = self._create_function_name(component, deriv,
+                                                           avg, is_quad_element,
+                                                           ufl_function,
+                                                           ffc_element)
                 if function_name:
                     # Add transformation if needed.
-                    code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim))
+                    code.append(self.__apply_transform(function_name,
+                                                       derivatives, multi, tdim, gdim))
 
         # Handle non-affine mappings.
         else:
-            ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.")
+            ffc_assert(avg is None,
+                       "Taking average is not supported for non-affine mappings.")
 
             # Loop derivatives and get multi indices.
             for multi in multiindices:
@@ -517,22 +601,85 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
                 if not any(deriv):
                     deriv = []
 
-                for c in range(tdim):
-                    function_name = self._create_function_name(c + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element)
-                    if function_name:
-                        # Multiply basis by appropriate transform.
-                        if transformation == "covariant piola":
-                            dxdX = create_symbol(f_transform("JINV", c, local_comp, tdim, gdim, self.restriction), GEO)
-                            function_name = create_product([dxdX, function_name])
-                        elif transformation == "contravariant piola":
-                            detJ = create_fraction(create_float(1), create_symbol(f_detJ(self.restriction), GEO))
-                            dXdx = create_symbol(f_transform("J", local_comp, c, gdim, tdim, self.restriction), GEO)
-                            function_name = create_product([detJ, dXdx, function_name])
-                        else:
-                            error("Transformation is not supported: ", repr(transformation))
-
-                        # Add transformation if needed.
-                        code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim))
+                if transformation in ["covariant piola",
+                                      "contravariant piola"]:
+                    for c in range(tdim):
+                        function_name = self._create_function_name(c + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element)
+                        if function_name:
+                            # Multiply basis by appropriate transform.
+                            if transformation == "covariant piola":
+                                dxdX = create_symbol(f_transform("JINV", c,
+                                                                 local_comp, tdim,
+                                                                 gdim,
+                                                                 self.restriction),
+                                                     GEO)
+                                function_name = create_product([dxdX, function_name])
+                            elif transformation == "contravariant piola":
+                                detJ = create_fraction(create_float(1),
+                                                       create_symbol(f_detJ(self.restriction),
+                                                                     GEO))
+                                dXdx = create_symbol(f_transform("J", local_comp,
+                                                                 c, gdim, tdim,
+                                                                 self.restriction),
+                                                     GEO)
+                                function_name = create_product([detJ, dXdx,
+                                                                function_name])
+                            # Add transformation if needed.
+                            code.append(self.__apply_transform(function_name,
+                                                               derivatives, multi,
+                                                               tdim, gdim))
+                elif transformation == "double covariant piola":
+                    # g_ij = (Jinv)_ki G_kl (Jinv)lj
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            function_name = self._create_function_name(
+                                k * tdim + l + local_offset, deriv, avg,
+                                is_quad_element, ufl_function, ffc_element)
+                            J1 = create_symbol(
+                                f_transform("JINV", k, i, tdim, gdim,
+                                            self.restriction), GEO)
+                            J2 = create_symbol(
+                                f_transform("JINV", l, j, tdim, gdim,
+                                            self.restriction), GEO)
+                            function_name = create_product([J1, function_name,
+                                                            J2])
+                            # Add transformation if needed.
+                            code.append(self.__apply_transform(
+                                function_name, derivatives, multi, tdim, gdim))
+                elif transformation == "double contravariant piola":
+                    # g_ij = (detJ)^(-2) J_ik G_kl J_jl
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            function_name = self._create_function_name(
+                                k * tdim + l + local_offset,
+                                deriv, avg, is_quad_element,
+                                ufl_function, ffc_element)
+                            J1 = create_symbol(
+                                f_transform("J", i, k, tdim, gdim,
+                                            self.restriction), GEO)
+                            J2 = create_symbol(
+                                f_transform("J", j, l, tdim, gdim,
+                                            self.restriction), GEO)
+                            invdetJ = create_fraction(
+                                create_float(1),
+                                create_symbol(f_detJ(self.restriction), GEO))
+                            function_name = create_product([invdetJ, invdetJ,
+                                                            J1, function_name,
+                                                            J2])
+                            # Add transformation if needed.
+                            code.append(self.__apply_transform(function_name,
+                                                               derivatives,
+                                                               multi, tdim,
+                                                               gdim))
+                else:
+                    error("Transformation is not supported: ",
+                          repr(transformation))
 
         if not code:
             return create_float(0.0)
@@ -548,14 +695,15 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
     # -------------------------------------------------------------------------
     def __apply_transform(self, function, derivatives, multi, tdim, gdim):
         "Apply transformation (from derivatives) to basis or function."
-        f_transform     = format["transform"]
+        f_transform = format["transform"]
 
         # Add transformation if needed.
         transforms = []
-        if not self.integral_type in custom_integral_types:
+        if self.integral_type not in custom_integral_types:
             for i, direction in enumerate(derivatives):
                 ref = multi[i]
-                t = f_transform("JINV", ref, direction, tdim, gdim, self.restriction)
+                t = f_transform("JINV", ref, direction, tdim, gdim,
+                                self.restriction)
                 transforms.append(create_symbol(t, GEO))
 
         transforms.append(function)
@@ -565,40 +713,40 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
     # Helper functions for transformation of UFL objects in base class
     # -------------------------------------------------------------------------
     def _create_symbol(self, symbol, domain):
-        return {():create_symbol(symbol, domain)}
+        return {(): create_symbol(symbol, domain)}
 
     def _create_product(self, symbols):
         return create_product(symbols)
 
     def _format_scalar_value(self, value):
-        #print("format_scalar_value: %d" % value)
+        # print("format_scalar_value: %d" % value)
         if value is None:
-            return {():create_float(0.0)}
-        return {():create_float(value)}
+            return {(): create_float(0.0)}
+        return {(): create_float(value)}
 
     def _math_function(self, operands, format_function):
-        #print("Calling _math_function() of optimisedquadraturetransformer.")
         # TODO: Are these safety checks needed?
-        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \
+        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1,
                    "MathFunctions expect one operand of function type: " + repr(operands))
         # Use format function on value of operand.
         operand = operands[0]
         for key, val in list(operand.items()):
             new_val = create_symbol(format_function(str(val)), val.t, val, 1)
             operand[key] = new_val
-        #raise Exception("pause")
+        # raise Exception("pause")
         return operand
 
     def _bessel_function(self, operands, format_function):
         # TODO: Are these safety checks needed?
-        # TODO: work on reference instead of copies? (like math_function)
-        ffc_assert(len(operands) == 2,\
-          "BesselFunctions expect two operands of function type: " + repr(operands))
+        # TODO: work on reference instead of copies? (like
+        # math_function)
+        ffc_assert(len(operands) == 2,
+                   "BesselFunctions expect two operands of function type: " + repr(operands))
         nu, x = operands
-        ffc_assert(len(nu) == 1 and () in nu,\
-          "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu))
-        ffc_assert(len(x) == 1 and () in x,\
-          "Expecting one operand of function type as second argument to BesselFunction : " + repr(x))
+        ffc_assert(len(nu) == 1 and () in nu,
+                   "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu))
+        ffc_assert(len(x) == 1 and () in x,
+                   "Expecting one operand of function type as second argument to BesselFunction : " + repr(x))
         nu = nu[()]
         x = x[()]
         if nu is None:
@@ -606,8 +754,8 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         if x is None:
             x = format["floating point"](0.0)
 
-        sym = create_symbol(format_function(x,nu), x.t, x, 1)
-        return {():sym}
+        sym = create_symbol(format_function(x, nu), x.t, x, 1)
+        return {(): sym}
 
     # -------------------------------------------------------------------------
     # Helper functions for code_generation()
@@ -616,7 +764,6 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         return expression.ops()
 
     def _create_entry_data(self, val, integral_type):
-#        zero = False
         # Multiply value by weight and determinant
         ACCESS = GEO
         weight = format["weight"](self.points)
@@ -635,8 +782,8 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
             value = create_product([val, weight,
                                     create_symbol(f_scale_factor, GEO)])
 
-        # Update sets of used variables (if they will not be used because of
-        # optimisations later, they will be reset).
+        # Update sets of used variables (if they will not be used
+        # because of optimisations later, they will be reset).
         trans_set.update([str(x) for x in value.get_unique_vars(GEO)])
         used_points = set([self.points])
         ops = self._count_operations(value)
diff --git a/ffc/quadrature/parameters.py b/ffc/quadrature/parameters.py
index 0fc66ac..21dd180 100644
--- a/ffc/quadrature/parameters.py
+++ b/ffc/quadrature/parameters.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Quadrature representation class for UFL"
 
 # Copyright (C) 2009-2014 Kristian B. Oelgaard
@@ -18,7 +19,7 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Anders Logg 2009, 2014
-# Modified by Martin Alnaes 2013-2014
+# Modified by Martin Sandve Alnæs 2013-2014
 
 # UFL modules
 from ufl import custom_integral_types
@@ -26,27 +27,27 @@ from ufl import custom_integral_types
 # FFC modules
 from ffc.log import warning
 
+
 def parse_optimise_parameters(parameters, itg_data):
 
     # Initialize parameters
-    optimise_parameters = {"eliminate zeros":     False,
-                           "optimisation":        False,
-                           "ignore ones":         False,
-                           "remove zero terms":   False,
-                           "ignore zero tables":  False}
-
+    optimise_parameters = {"eliminate zeros": False,
+                           "optimisation": False,
+                           "ignore ones": False,
+                           "remove zero terms": False,
+                           "ignore zero tables": False}
 
     # Set optimized parameters
     if parameters["optimize"] and itg_data.integral_type in custom_integral_types:
         warning("Optimization not available for custom integrals, skipping optimization.")
     elif parameters["optimize"]:
-        optimise_parameters["ignore ones"]        = True
-        optimise_parameters["remove zero terms"]  = True
+        optimise_parameters["ignore ones"] = True
+        optimise_parameters["remove zero terms"] = True
         optimise_parameters["ignore zero tables"] = True
 
-        # Do not include this in below if/else clause since we want to be
-        # able to switch on this optimisation in addition to the other
-        # optimisations.
+        # Do not include this in below if/else clause since we want to
+        # be able to switch on this optimisation in addition to the
+        # other optimisations.
         if "eliminate_zeros" in parameters:
             optimise_parameters["eliminate zeros"] = True
 
@@ -59,10 +60,10 @@ def parse_optimise_parameters(parameters, itg_data):
         # The current default optimisation (for -O) is equal to
         # '-feliminate_zeros -fsimplify_expressions'.
         else:
-            # If '-O -feliminate_zeros' was given on the command line, do not
-            # simplify expressions
-            if not "eliminate_zeros" in parameters:
+            # If '-O -feliminate_zeros' was given on the command line,
+            # do not simplify expressions
+            if "eliminate_zeros" not in parameters:
                 optimise_parameters["eliminate zeros"] = True
-                optimise_parameters["optimisation"]    = "simplify_expressions"
+                optimise_parameters["optimisation"] = "simplify_expressions"
 
     return optimise_parameters
diff --git a/ffc/quadrature/product.py b/ffc/quadrature/product.py
index 6eccbca..21fd5ba 100644
--- a/ffc/quadrature/product.py
+++ b/ffc/quadrature/product.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a class to represent a product."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -20,21 +21,26 @@
 # First added:  2009-07-12
 # Last changed: 2010-03-11
 
-# FFC modules.
+from functools import reduce
+
+# FFC modules
 from ffc.log import error
 from ffc.cpp import format
 
-# FFC quadrature modules.
+# FFC quadrature modules
 from .symbolics import create_float
 from .symbolics import create_product
 from .symbolics import create_sum
 from .symbolics import create_fraction
 from .expr import Expr
-from functools import reduce
 
-#class Product(object):
+# FFC quadrature modules
+from .floatvalue import FloatValue
+
+
 class Product(Expr):
     __slots__ = ("vrs", "_expanded")
+
     def __init__(self, variables):
         """Initialise a Product object, it derives from Expr and contains
         the additional variables:
@@ -66,55 +72,57 @@ class Product(Expr):
                     break
 
                 # Collect floats into one variable
-                if var._prec == 0: # float
+                if var._prec == 0:  # float
                     float_val *= var.val
                     continue
-                # Take care of product such that we don't create nested products.
-                elif var._prec == 2: # prod
-#                    if var.vrs[0]._prec == 0:
-#                        float_val *= var.vrs[0].val
-#                        self.vrs += var.vrs[1:]
-#                        continue
-#                    self.vrs += var.vrs
-#                    continue
+                # Take care of product such that we don't create
+                # nested products.
+                elif var._prec == 2:  # prod
                     # If expanded product is a float, just add it.
                     if var._expanded and var._expanded._prec == 0:
                         float_val *= var._expanded.val
-                    # If expanded product is symbol, this product is still expanded and add symbol.
+                    # If expanded product is symbol, this product is
+                    # still expanded and add symbol.
                     elif var._expanded and var._expanded._prec == 1:
                         self.vrs.append(var._expanded)
-                    # If expanded product is still a product, add the variables.
+                    # If expanded product is still a product, add the
+                    # variables.
                     elif var._expanded and var._expanded._prec == 2:
-#                        self.vrs.append(var)
-                        # Add copies of the variables of other product (collect floats).
+                        # Add copies of the variables of other product
+                        # (collect floats).
                         if var._expanded.vrs[0]._prec == 0:
                             float_val *= var._expanded.vrs[0].val
                             self.vrs += var._expanded.vrs[1:]
                             continue
                         self.vrs += var._expanded.vrs
-                    # If expanded product is a sum or fraction, we must expand this product later.
+                    # If expanded product is a sum or fraction, we
+                    # must expand this product later.
                     elif var._expanded and var._expanded._prec in (3, 4):
                         self._expanded = False
                         self.vrs.append(var._expanded)
-                    # Else the product is not expanded, and we must expand this one later
+                    # Else the product is not expanded, and we must
+                    # expand this one later
                     else:
                         self._expanded = False
-                        # Add copies of the variables of other product (collect floats).
+                        # Add copies of the variables of other product
+                        # (collect floats).
                         if var.vrs[0]._prec == 0:
                             float_val *= var.vrs[0].val
                             self.vrs += var.vrs[1:]
                             continue
                         self.vrs += var.vrs
                     continue
-                # If we have sums or fractions in the variables the product is not expanded.
-                elif var._prec in (3, 4): # sum or frac
+                # If we have sums or fractions in the variables the
+                # product is not expanded.
+                elif var._prec in (3, 4):  # sum or frac
                     self._expanded = False
 
-                # Just add any variable at this point to list of new vars.
+                # Just add any variable at this point to list of new
+                # vars.
                 self.vrs.append(var)
 
-            # If value is 1 there is no need to include it, unless it is the
-            # only parameter left i.e., 2*0.5 = 1.
+            # If value is 1 there is no need to include it, unless it
+            # is the only parameter left i.e., 2*0.5 = 1.
             if float_val and float_val != 1.0:
                 self.val = float_val
                 self.vrs.append(create_float(float_val))
@@ -138,25 +146,27 @@ class Product(Expr):
         # Sort the variables such that comparisons work.
         self.vrs.sort()
 
-        # Compute the representation now, such that we can use it directly
-        # in the __eq__ and __ne__ methods (improves performance a bit, but
-        # only when objects are cached).
+        # Compute the representation now, such that we can use it
+        # directly in the __eq__ and __ne__ methods (improves
+        # performance a bit, but only when objects are cached).
         self._repr = "Product([%s])" % ", ".join([v._repr for v in self.vrs])
 
         # Use repr as hash value.
         self._hash = hash(self._repr)
 
-        # Store self as expanded value, if we did not encounter any sums or fractions.
+        # Store self as expanded value, if we did not encounter any
+        # sums or fractions.
         if self._expanded:
             self._expanded = self
 
     # Print functions.
     def __str__(self):
         "Simple string representation which will appear in the generated code."
-        # If we have more than one variable and the first float is -1 exlude the 1.
+        # If we have more than one variable and the first float is -1
+        # exlude the 1.
         if len(self.vrs) > 1 and self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0:
             # Join string representation of members by multiplication
-            return   format["sub"](["", format["mul"]([str(v) for v in self.vrs[1:]])])
+            return format["sub"](["", format["mul"]([str(v) for v in self.vrs[1:]])])
         return format["mul"]([str(v) for v in self.vrs])
 
     # Binary operators.
@@ -167,24 +177,28 @@ class Product(Expr):
         if other._prec == 2 and self.get_vrs() == other.get_vrs():
             # Return expanded product, to get rid of 3*x + -2*x -> x, not 1*x.
             return create_product([create_float(self.val + other.val)] + list(self.get_vrs())).expand()
-        # if self == 2*x and other == x return 3*x.
-        elif other._prec == 1: # sym
+
+        elif other._prec == 1:  # sym
             if self.get_vrs() == (other,):
-                # Return expanded product, to get rid of -x + x -> 0, not product(0).
-                return create_product([create_float(self.val + 1.0), other]).expand()
+                # Return expanded product, to get rid of -x + x -> 0,
+                # not product(0).
+                return create_product([create_float(self.val + 1.0),
+                                       other]).expand()
         # Return sum
         return create_sum([self, other])
 
     def __sub__(self, other):
         "Subtract other objects."
         if other._prec == 2 and self.get_vrs() == other.get_vrs():
-            # Return expanded product, to get rid of 3*x + -2*x -> x, not 1*x.
+            # Return expanded product, to get rid of 3*x + -2*x -> x,
+            # not 1*x.
             return create_product([create_float(self.val - other.val)] + list(self.get_vrs())).expand()
-        # if self == 2*x and other == x return 3*x.
-        elif other._prec == 1: # sym
+        elif other._prec == 1:  # sym
             if self.get_vrs() == (other,):
-                # Return expanded product, to get rid of -x + x -> 0, not product(0).
-                return create_product([create_float(self.val - 1.0), other]).expand()
+                # Return expanded product, to get rid of -x + x -> 0,
+                # not product(0).
+                return create_product([create_float(self.val - 1.0),
+                                       other]).expand()
         # Return sum
         return create_sum([self, create_product([FloatValue(-1), other])])
 
@@ -195,12 +209,12 @@ class Product(Expr):
             return create_float(0)
 
         # If other is a Sum or Fraction let them handle it.
-        if other._prec in (3, 4): # sum or frac
+        if other._prec in (3, 4):  # sum or frac
             return other.__mul__(self)
 
-        # NOTE: We expect expanded sub-expressions with no nested operators.
-        # Create new product adding float or symbol.
-        if other._prec in (0, 1): # float or sym
+        # NOTE: We expect expanded sub-expressions with no nested
+        # operators.  Create new product adding float or symbol.
+        if other._prec in (0, 1):  # float or sym
             return create_product(self.vrs + [other])
         # Create new product adding all variables from other Product.
         return create_product(self.vrs + other.vrs)
@@ -216,10 +230,11 @@ class Product(Expr):
             return self.vrs[0]
 
         # If other is a Sum we can only return a fraction.
-        # NOTE: Expect that other is expanded i.e., x + x -> 2*x which can be handled
+        # NOTE: Expect that other is expanded i.e., x + x -> 2*x which
+        # can be handled
         # TODO: Fix x / (x + x*y) -> 1 / (1 + y).
         # Or should this be handled when reducing a fraction?
-        if other._prec == 3: # sum
+        if other._prec == 3:  # sum
             return create_fraction(self, other)
 
         # Handle division by FloatValue, Symbol, Product and Fraction.
@@ -229,20 +244,21 @@ class Product(Expr):
         num = self.vrs[:]
         denom = []
         # Add floatvalue, symbol and products to the list of denominators.
-        if other._prec in (0, 1): # float or sym
+        if other._prec in (0, 1):  # float or sym
             denom = [other]
-        elif other._prec == 2: # prod
+        elif other._prec == 2:  # prod
             # Get copy.
             denom = other.vrs[:]
         # fraction.
         else:
             error("Did not expected to divide by fraction.")
 
-        # Loop entries in denominator and remove from numerator (and denominator).
+        # Loop entries in denominator and remove from numerator (and
+        # denominator).
         for d in denom[:]:
             # Add the inverse of a float to the numerator and continue.
-            if d._prec == 0: # float
-                num.append(create_float(1.0/d.val))
+            if d._prec == 0:  # float
+                num.append(create_float(1.0 / d.val))
                 denom.remove(d)
                 continue
             if d in num:
@@ -257,7 +273,8 @@ class Product(Expr):
             num = create_product(num).expand()
         elif num:
             num = num[0]
-        # If all variables in the numerator has been eliminated we need to add '1'.
+        # If all variables in the numerator has been eliminated we
+        # need to add '1'.
         else:
             num = create_float(1)
 
@@ -265,7 +282,8 @@ class Product(Expr):
             return create_fraction(num, create_product(denom))
         elif denom:
             return create_fraction(num, denom[0])
-        # If we no longer have a denominater, just return the numerator.
+        # If we no longer have a denominater, just return the
+        # numerator.
         return num
 
     __div__ = __truediv__
@@ -274,8 +292,8 @@ class Product(Expr):
     def expand(self):
         "Expand all members of the product."
         # If we just have one variable, compute the expansion of it
-        # (it is not a Product, so it should be safe). We need this to get
-        # rid of Product([Symbol]) type expressions.
+        # (it is not a Product, so it should be safe). We need this to
+        # get rid of Product([Symbol]) type expressions.
         if len(self.vrs) == 1:
             self._expanded = self.vrs[0].expand()
             return self._expanded
@@ -284,34 +302,35 @@ class Product(Expr):
         if self._expanded:
             return self._expanded
 
-        # Sort variables such that we don't call the '*' operator more than we have to.
+        # Sort variables such that we don't call the '*' operator more
+        # than we have to.
         float_syms = []
         sum_fracs = []
         for v in self.vrs:
-            if v._prec in (0, 1): # float or sym
+            if v._prec in (0, 1):  # float or sym
                 float_syms.append(v)
                 continue
             exp = v.expand()
 
             # If the expanded expression is a float, sym or product,
             # we can add the variables.
-            if exp._prec in (0, 1): # float or sym
+            if exp._prec in (0, 1):  # float or sym
                 float_syms.append(exp)
-            elif exp._prec == 2: # prod
+            elif exp._prec == 2:  # prod
                 float_syms += exp.vrs
             else:
                 sum_fracs.append(exp)
         # If we have floats or symbols add the symbols to the rest as a single
         # product (for speed).
         if len(float_syms) > 1:
-            sum_fracs.append( create_product(float_syms) )
+            sum_fracs.append(create_product(float_syms))
         elif float_syms:
             sum_fracs.append(float_syms[0])
 
         # Use __mult__ to reduce list to one single variable.
-        # TODO: Can this be done more efficiently without creating all the
-        # intermediate variables?
-        self._expanded = reduce(lambda x,y: x*y, sum_fracs)
+        # TODO: Can this be done more efficiently without creating all
+        # the intermediate variables?
+        self._expanded = reduce(lambda x, y: x * y, sum_fracs)
         return self._expanded
 
     def get_unique_vars(self, var_type):
@@ -323,11 +342,15 @@ class Product(Expr):
         return var
 
     def get_var_occurrences(self):
-        """Determine the number of times all variables occurs in the expression.
-        Returns a dictionary of variables and the number of times they occur."""
-        # TODO: The product should be expanded at this stage, should we check
-        # this?
-        # Create dictionary and count number of occurrences of each variable.
+        """Determine the number of times all variables occurs in the
+        expression.  Returns a dictionary of variables and the number
+        of times they occur.
+
+        """
+        # TODO: The product should be expanded at this stage, should
+        # we check this?
+        # Create dictionary and count number of occurrences of each
+        # variable.
         d = {}
         for v in self.vrs:
             if v in d:
@@ -338,9 +361,10 @@ class Product(Expr):
 
     def get_vrs(self):
         "Return all 'real' variables."
-        # A product should only have one float value after initialisation.
+        # A product should only have one float value after
+        # initialisation.
         # TODO: Use this knowledge directly in other classes?
-        if self.vrs[0]._prec == 0: # float
+        if self.vrs[0]._prec == 0:  # float
             return tuple(self.vrs[1:])
         return tuple(self.vrs)
 
@@ -353,29 +377,30 @@ class Product(Expr):
         for v in self.vrs:
             op += v.ops()
 
-        # Subtract 1, if the first member is -1 i.e., -1*x*y -> x*y is only 1 op.
+        # Subtract 1, if the first member is -1 i.e., -1*x*y -> x*y is
+        # only 1 op.
         if self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0:
             op -= 1
         return op
 
     def reduce_ops(self):
         "Reduce the number of operations to evaluate the product."
-        # It's not possible to reduce a product if it is already expanded and
-        # it should be at this stage.
-        # TODO: Is it safe to return self.expand().reduce_ops() if product is
-        # not expanded? And do we want to?
-#        if self._expanded:
-#            return self._expanded
-#        error("Product must be expanded first before we can reduce the number of operations.")
-        # TODO: This should crash if it goes wrong (the above is more correct but slower).
+        # It's not possible to reduce a product if it is already
+        # expanded and it should be at this stage.
+        # TODO: Is it safe to return self.expand().reduce_ops() if
+        # product is not expanded? And do we want to?
+
+        # TODO: This should crash if it goes wrong
         return self._expanded
 
     def reduce_vartype(self, var_type):
-        """Reduce expression with given var_type. It returns a tuple
-        (found, remain), where 'found' is an expression that only has variables
-        of type == var_type. If no variables are found, found=(). The 'remain'
-        part contains the leftover after division by 'found' such that:
-        self = found*remain."""
+        """Reduce expression with given var_type. It returns a tuple (found,
+        remain), where 'found' is an expression that only has
+        variables of type == var_type. If no variables are found,
+        found=(). The 'remain' part contains the leftover after
+        division by 'found' such that: self = found*remain.
+
+        """
         # Sort variables according to type.
         found = []
         remains = []
@@ -405,10 +430,3 @@ class Product(Expr):
 
         # Return whatever we found.
         return [(found, remains)]
-
-# FFC quadrature modules.
-from .floatvalue import FloatValue
-from .symbol     import Symbol
-from .sumobj    import Sum
-from .fraction   import Fraction
-
diff --git a/ffc/quadrature/quadraturegenerator.py b/ffc/quadrature/quadraturegenerator.py
index 137420f..fa6abd0 100644
--- a/ffc/quadrature/quadraturegenerator.py
+++ b/ffc/quadrature/quadraturegenerator.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Code generator for quadrature representation."
 
 # Copyright (C) 2009-2014 Kristian B. Oelgaard
@@ -19,10 +20,10 @@
 #
 # Modified by Mehdi Nikbakht 2010
 # Modified by Anders Logg 2013-2014
-# Modified by Martin Alnaes 2013-2014
+# Modified by Martin Sandve Alnæs 2013-2014
 
 # Python modules
-import functools, itertools
+import functools
 import numpy
 
 # UFL modules
@@ -31,7 +32,7 @@ from ufl.utils.derivativetuples import compute_derivative_tuples
 from ufl import custom_integral_types
 
 # FFC modules
-from ffc.log import info, debug, ffc_assert, error, warning
+from ffc.log import ffc_assert, error, warning
 from ffc.cpp import format, remove_unused
 
 from ffc.representationutils import initialize_integral_code
@@ -39,6 +40,7 @@ from ffc.representationutils import initialize_integral_code
 # Utility and optimization functions for quadraturegenerator
 from ffc.quadrature.symbolics import generate_aux_constants
 
+
 def generate_integral_code(ir, prefix, parameters):
     "Generate code for integral from intermediate representation."
 
@@ -53,45 +55,46 @@ def generate_integral_code(ir, prefix, parameters):
 
     return code
 
+
 def _tabulate_tensor(ir, prefix, parameters):
     "Generate code for a single integral (tabulate_tensor())."
 
     # Prefetch formatting to speedup code generation
-    f_comment      = format["comment"]
-    f_G            = format["geometry constant"]
+    f_comment = format["comment"]
+    f_G = format["geometry constant"]
     f_const_double = format["assign"]
-    f_switch       = format["switch"]
-    f_float        = format["float"]
-    f_assign       = format["assign"]
-    f_A            = format["element tensor"]
-    f_r            = format["free indices"][0]
-    f_loop         = format["generate loop"]
-    f_int          = format["int"]
-    f_facet        = format["facet"]
+    f_switch = format["switch"]
+    f_float = format["float"]
+    f_assign = format["assign"]
+    f_A = format["element tensor"]
+    f_r = format["free indices"][0]
+    f_loop = format["generate loop"]
+    f_int = format["int"]
+    f_facet = format["facet"]
 
     # Get data
-    opt_par       = ir["optimise_parameters"]
+    opt_par = ir["optimise_parameters"]
     integral_type = ir["integral_type"]
-    gdim          = ir["geometric_dimension"]
-    tdim          = ir["topological_dimension"]
-    num_facets    = ir["num_facets"]
-    num_vertices  = ir["num_vertices"]
-    prim_idims    = ir["prim_idims"]
-    integrals     = ir["trans_integrals"]
-    geo_consts    = ir["geo_consts"]
-    oriented      = ir["needs_oriented"]
-    element_data  = ir["element_data"]
-    num_cells     = ir["num_cells"]
+    gdim = ir["geometric_dimension"]
+    tdim = ir["topological_dimension"]
+    num_facets = ir["num_facets"]
+    num_vertices = ir["num_vertices"]
+    prim_idims = ir["prim_idims"]
+    integrals = ir["trans_integrals"]
+    geo_consts = ir["geo_consts"]
+    oriented = ir["needs_oriented"]
+    element_data = ir["element_data"]
+    num_cells = ir["num_cells"]
 
     # Create sets of used variables
-    used_weights    = set()
+    used_weights = set()
     used_psi_tables = set()
-    used_nzcs       = set()
-    trans_set       = set()
+    used_nzcs = set()
+    trans_set = set()
     sets = [used_weights, used_psi_tables, used_nzcs, trans_set]
 
-    affine_tables = {} # TODO: This is not populated anywhere, remove?
-    quadrature_weights = ir["quadrature_weights"]
+    affine_tables = {}  # TODO: This is not populated anywhere, remove?
+    quadrature_rules = ir["quadrature_rules"]
 
     operations = []
     if integral_type == "cell":
@@ -108,7 +111,7 @@ def _tabulate_tensor(ir, prefix, parameters):
         operations.append([num_ops])
 
         # Generate code for basic geometric quantities
-        jacobi_code  = ""
+        jacobi_code = ""
         jacobi_code += format["compute_jacobian"](tdim, gdim)
         jacobi_code += "\n"
         jacobi_code += format["compute_jacobian_inverse"](tdim, gdim)
@@ -118,28 +121,33 @@ def _tabulate_tensor(ir, prefix, parameters):
         jacobi_code += format["scale factor snippet"]
 
         # Generate code for cell volume and circumradius
-        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type)
-        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type)
+        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim,
+                                                               integral_type)
+        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim,
+                                                                integral_type)
 
     elif integral_type == "exterior_facet":
 
         # Iterate over facets
         cases = [None for i in range(num_facets)]
         for i in range(num_facets):
-            # Update transformer with facets and generate case code + set of used geometry terms.
-            c, mem_code, ops = _generate_element_tensor(integrals[i], sets, opt_par, gdim, tdim)
+            # Update transformer with facets and generate case code +
+            # set of used geometry terms.
+            c, mem_code, ops = _generate_element_tensor(integrals[i], sets,
+                                                        opt_par, gdim, tdim)
             case = [f_comment("Total number of operations to compute element tensor (from this point): %d" % ops)]
             case += c
             cases[i] = "\n".join(case)
 
-            # Save number of operations (for printing info on operations).
+            # Save number of operations (for printing info on
+            # operations).
             operations.append([i, ops])
 
         # Generate tensor code for all cases using a switch.
         tensor_code = f_switch(f_facet(None), cases)
 
         # Generate code for basic geometric quantities
-        jacobi_code  = ""
+        jacobi_code = ""
         jacobi_code += format["compute_jacobian"](tdim, gdim)
         jacobi_code += "\n"
         jacobi_code += format["compute_jacobian_inverse"](tdim, gdim)
@@ -147,43 +155,49 @@ def _tabulate_tensor(ir, prefix, parameters):
             jacobi_code += format["orientation"](tdim, gdim)
         jacobi_code += "\n"
         jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim)
-        jacobi_code += "\n\n" + format["generate normal"](tdim, gdim, integral_type)
+        jacobi_code += "\n\n" + format["generate normal"](tdim, gdim,
+                                                          integral_type)
         jacobi_code += "\n\n" + format["generate facet area"](tdim, gdim)
         if tdim == 3:
             jacobi_code += "\n\n" + format["generate min facet edge length"](tdim, gdim)
             jacobi_code += "\n\n" + format["generate max facet edge length"](tdim, gdim)
 
         # Generate code for cell volume and circumradius
-        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type)
-        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type)
+        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim,
+                                                               integral_type)
+        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim,
+                                                                integral_type)
 
     elif integral_type == "interior_facet":
 
-        # Modify the dimensions of the primary indices because we have a macro element
-        prim_idims = [d*2 for d in prim_idims]
+        # Modify the dimensions of the primary indices because we have
+        # a macro element
+        prim_idims = [d * 2 for d in prim_idims]
 
         # Iterate over combinations of facets
         cases = [[None for j in range(num_facets)] for i in range(num_facets)]
         for i in range(num_facets):
             for j in range(num_facets):
-                # Update transformer with facets and generate case code + set of used geometry terms.
+                # Update transformer with facets and generate case
+                # code + set of used geometry terms.
                 c, mem_code, ops = _generate_element_tensor(integrals[i][j],
-                                                            sets,
-                                                            opt_par,
-                                                            gdim,
-                                                            tdim)
+                                                            sets, opt_par,
+                                                            gdim, tdim)
                 case = [f_comment("Total number of operations to compute element tensor (from this point): %d" % ops)]
                 case += c
                 cases[i][j] = "\n".join(case)
 
-                # Save number of operations (for printing info on operations).
+                # Save number of operations (for printing info on
+                # operations).
                 operations.append([i, j, ops])
 
         # Generate tensor code for all cases using a switch.
-        tensor_code = f_switch(f_facet("+"), [f_switch(f_facet("-"), cases[i]) for i in range(len(cases))])
+        tensor_code = f_switch(f_facet("+"),
+                               [f_switch(f_facet("-"),
+                                         cases[i]) for i in range(len(cases))])
 
         # Generate code for basic geometric quantities
-        jacobi_code  = ""
+        jacobi_code = ""
         for _r in ["+", "-"]:
             jacobi_code += format["compute_jacobian"](tdim, gdim, r=_r)
             jacobi_code += "\n"
@@ -192,23 +206,26 @@ def _tabulate_tensor(ir, prefix, parameters):
                 jacobi_code += format["orientation"](tdim, gdim, r=_r)
             jacobi_code += "\n"
         jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim, r="+")
-        jacobi_code += "\n\n" + format["generate normal"](tdim, gdim, integral_type)
+        jacobi_code += "\n\n" + format["generate normal"](tdim, gdim,
+                                                          integral_type)
         jacobi_code += "\n\n" + format["generate facet area"](tdim, gdim)
         if tdim == 3:
             jacobi_code += "\n\n" + format["generate min facet edge length"](tdim, gdim, r="+")
             jacobi_code += "\n\n" + format["generate max facet edge length"](tdim, gdim, r="+")
 
         # Generate code for cell volume and circumradius
-        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type)
-        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type)
+        jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim,
+                                                               integral_type)
+        jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim,
+                                                                integral_type)
 
     elif integral_type == "vertex":
 
         # Iterate over vertices
         cases = [None for i in range(num_vertices)]
         for i in range(num_vertices):
-            # Update transformer with vertices and generate case code +
-            # set of used geometry terms.
+            # Update transformer with vertices and generate case code
+            # + set of used geometry terms.
             c, mem_code, ops = _generate_element_tensor(integrals[i],
                                                         sets,
                                                         opt_par,
@@ -218,21 +235,22 @@ def _tabulate_tensor(ir, prefix, parameters):
             case += c
             cases[i] = "\n".join(case)
 
-            # Save number of operations (for printing info on operations).
+            # Save number of operations (for printing info on
+            # operations).
             operations.append([i, ops])
 
         # Generate tensor code for all cases using a switch.
         tensor_code = f_switch(format["vertex"], cases)
 
         # Generate code for basic geometric quantities
-        jacobi_code  = ""
+        jacobi_code = ""
         jacobi_code += format["compute_jacobian"](tdim, gdim)
         jacobi_code += "\n"
         jacobi_code += format["compute_jacobian_inverse"](tdim, gdim)
         if oriented:
             jacobi_code += format["orientation"](tdim, gdim)
         jacobi_code += "\n"
-        jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim) # FIXME: This is not defined in a point???
+        jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim)  # FIXME: This is not defined in a point???
 
     elif integral_type in custom_integral_types:
 
@@ -243,7 +261,6 @@ def _tabulate_tensor(ir, prefix, parameters):
             num_cells = 2
         elif integral_type == "overlap":
             num_cells = 2
-        #else: num_cells = ir["num_cells"] at the top
 
         # Warning that more than two cells in only partly supported.
         # The missing piece is to couple multiple cells to
@@ -253,7 +270,7 @@ def _tabulate_tensor(ir, prefix, parameters):
 
         # Modify the dimensions of the primary indices because we have a macro element
         if num_cells == 2:
-            prim_idims = [d*2 for d in prim_idims]
+            prim_idims = [d * 2 for d in prim_idims]
 
         # Check whether we need to generate facet normals
         generate_custom_facet_normal = num_cells == 2
@@ -268,10 +285,12 @@ def _tabulate_tensor(ir, prefix, parameters):
 
         tensor_code = "\n".join(tensor_code)
 
-        # Set operations equal to num_ops (for printing info on operations).
+        # Set operations equal to num_ops (for printing info on
+        # operations).
         operations.append([num_ops])
 
-        # FIXME: Jacobi code is only needed when we use cell volume or circumradius.
+        # FIXME: Jacobi code is only needed when we use cell volume or
+        # circumradius.
         # FIXME: Does not seem to be removed by removed_unused.
 
         # Generate code for basic geometric quantities
@@ -283,22 +302,26 @@ def _tabulate_tensor(ir, prefix, parameters):
             jacobi_code += "\n\n"
             if num_cells > 1:
                 jacobi_code += f_comment("Extract vertex coordinates\n")
-                jacobi_code += format["extract_cell_coordinates"]((tdim + 1)*gdim*i, r=i)
+                jacobi_code += format["extract_cell_coordinates"]((tdim + 1) * gdim * i, r=i)
                 jacobi_code += "\n\n"
             jacobi_code += format["compute_jacobian"](tdim, gdim, r=r)
             jacobi_code += "\n"
             jacobi_code += format["compute_jacobian_inverse"](tdim, gdim, r=r)
             jacobi_code += "\n"
-            jacobi_code += format["generate cell volume"](tdim, gdim, integral_type, r=r if num_cells > 1 else None)
+            jacobi_code += format["generate cell volume"](tdim, gdim,
+                                                          integral_type,
+                                                          r=r if num_cells > 1 else None)
             jacobi_code += "\n"
-            jacobi_code += format["generate circumradius"](tdim, gdim, integral_type, r=r if num_cells > 1 else None)
+            jacobi_code += format["generate circumradius"](tdim, gdim,
+                                                           integral_type,
+                                                           r=r if num_cells > 1 else None)
             jacobi_code += "\n"
 
     else:
         error("Unhandled integral type: " + str(integral_type))
 
-    # After we have generated the element code for all facets we can remove
-    # the unused transformations.
+    # After we have generated the element code for all facets we can
+    # remove the unused transformations.
     common = [remove_unused(jacobi_code, trans_set)]
 
     # FIXME: After introduction of custom integrals, the common code
@@ -307,13 +330,14 @@ def _tabulate_tensor(ir, prefix, parameters):
 
     # Add common code except for custom integrals
     if integral_type not in custom_integral_types:
-        common += _tabulate_weights([quadrature_weights[p] for p in sorted(used_weights)])
+        common += _tabulate_weights([quadrature_rules[p] for p in sorted(used_weights)])
 
         # Add common code for updating tables
         name_map = ir["name_map"]
         tables = ir["unique_tables"]
-        tables.update(affine_tables) # TODO: This is not populated anywhere, remove?
-        common += _tabulate_psis(tables, used_psi_tables, name_map, used_nzcs, opt_par, integral_type, gdim)
+        tables.update(affine_tables)  # TODO: This is not populated anywhere, remove?
+        common += _tabulate_psis(tables, used_psi_tables, name_map, used_nzcs,
+                                 opt_par, integral_type, gdim)
 
     # Add special tabulation code for custom integral
     else:
@@ -324,17 +348,18 @@ def _tabulate_tensor(ir, prefix, parameters):
                                                        num_vertices,
                                                        num_cells)
 
-    # Reset the element tensor (array 'A' given as argument to tabulate_tensor() by assembler)
+    # Reset the element tensor (array 'A' given as argument to
+    # tabulate_tensor() by assembler)
     # Handle functionals.
     common += [f_comment("Reset values in the element tensor.")]
-    value = f_float(0)
     if prim_idims == []:
         common += [f_assign(f_A(f_int(0)), f_float(0))]
     else:
-        dim = functools.reduce(lambda v, u: v*u, prim_idims)
+        dim = functools.reduce(lambda v, u: v * u, prim_idims)
         common += f_loop([f_assign(f_A(f_r), f_float(0))], [(f_r, 0, dim)])
 
-    # Create the constant geometry declarations (only generated if simplify expressions are enabled).
+    # Create the constant geometry declarations (only generated if
+    # simplify expressions are enabled).
     geo_ops, geo_code = generate_aux_constants(geo_consts, f_G, f_const_double)
     if geo_code:
         common += [f_comment("Number of operations to compute geometry constants: %d." % geo_ops)]
@@ -343,15 +368,9 @@ def _tabulate_tensor(ir, prefix, parameters):
 
     # Add comments.
     common += ["", f_comment("Compute element tensor using UFL quadrature representation")]
-    common += [f_comment("Optimisations: %s" % ", ".join([str((k, opt_par[k]))\
-                for k in sorted(opt_par.keys())]))]
-
-    # Print info on operation count.
-    message = {"cell":           "Cell, number of operations to compute tensor: %s",
-               "exterior_facet": "Exterior facet %d, number of operations to compute tensor: %s",
-               "interior_facet": "Interior facets (%d, %d), number of operations to compute tensor: %s",
-               "vertex":         "Vertex %s, number of operations to compute tensor: %s",
-               "custom":         "Custom domain, number of operations to compute tensor: %s"}
+    common += [f_comment("Optimisations: %s" % ", ".join([str((k, opt_par[k]))
+                                                          for k in sorted(opt_par.keys())]))]
+
     for ops in operations:
         # Add geo ops count to integral ops count for writing info.
         if isinstance(ops[-1], int):
@@ -359,30 +378,32 @@ def _tabulate_tensor(ir, prefix, parameters):
 
     return "\n".join(common) + "\n" + tensor_code
 
-def _generate_element_tensor(integrals, sets, optimise_parameters, gdim, tdim, generate_custom_facet_normal=False):
+
+def _generate_element_tensor(integrals, sets, optimise_parameters, gdim, tdim,
+                             generate_custom_facet_normal=False):
     "Construct quadrature code for element tensors."
 
     # Prefetch formats to speed up code generation.
-    f_comment   = format["comment"]
-    f_ip        = format["integration points"]
-    f_I         = format["ip constant"]
-    f_loop      = format["generate loop"]
+    f_comment = format["comment"]
+    f_ip = format["integration points"]
+    f_I = format["ip constant"]
+    f_loop = format["generate loop"]
     f_ip_coords = format["generate ip coordinates"]
-    f_coords    = format["coordinate_dofs"]
-    f_double    = format["float declaration"]
-    f_decl      = format["declaration"]
-    f_X         = format["ip coordinates"]
-    f_C         = format["conditional"]
+    f_coords = format["coordinate_dofs"]
+    f_double = format["float declaration"]
+    f_decl = format["declaration"]
+    f_X = format["ip coordinates"]
+    f_C = format["conditional"]
 
     # Initialise return values.
-    element_code     = []
+    element_code = []
     tensor_ops_count = 0
 
-    # TODO: KBO: The members_code was used when I generated the load_table.h
-    # file which could load tables of basisfunction. This feature has not
-    # been reimplemented. However, with the new design where we only
-    # tabulate unique tables (and only non-zero entries) it doesn't seem to
-    # be necessary. Should it be deleted?
+    # TODO: KBO: The members_code was used when I generated the
+    # load_table.h file which could load tables of basisfunction. This
+    # feature has not been reimplemented. However, with the new design
+    # where we only tabulate unique tables (and only non-zero entries)
+    # it doesn't seem to be necessary. Should it be deleted?
     members_code = ""
 
     # We receive a dictionary {num_points: form,}.
@@ -429,28 +450,27 @@ def _generate_element_tensor(integrals, sets, optimise_parameters, gdim, tdim, g
                 num_ops += ops
 
         # Generate code for ip constant declarations.
-#        ip_const_ops, ip_const_code = generate_aux_constants(ip_consts, f_I,\
-#                                        format["const float declaration"], True)
-        ip_const_ops, ip_const_code = generate_aux_constants(ip_consts, f_I,\
-                                        format["assign"], True)
+        ip_const_ops, ip_const_code = generate_aux_constants(ip_consts, f_I,
+                                                             format["assign"], True)
         num_ops += ip_const_ops
         if ip_const_code:
-            ip_code += ["", f_comment("Number of operations to compute ip constants: %d" %ip_const_ops)]
+            ip_code += ["", f_comment("Number of operations to compute ip constants: %d" % ip_const_ops)]
             ip_code += [format["declaration"](format["float declaration"], f_I(len(ip_consts)))]
             ip_code += ip_const_code
 
         # Generate code to evaluate the element tensor.
-        integral_code, ops = _generate_integral_code(points, terms, sets, optimise_parameters)
+        integral_code, ops = _generate_integral_code(points, terms, sets,
+                                                     optimise_parameters)
         num_ops += ops
         if points is None:
             quadrature_ops = "unknown"
             tensor_ops_count = "unknown"
         else:
-            quadrature_ops = num_ops*points
+            quadrature_ops = num_ops * points
             tensor_ops_count += quadrature_ops
         ip_code += integral_code
-        element_code.append(f_comment\
-            ("Number of operations to compute element tensor for following IP loop = %s" % str(quadrature_ops)))
+        element_code.append(f_comment
+                            ("Number of operations to compute element tensor for following IP loop = %s" % str(quadrature_ops)))
 
         # Generate code for custom facet normal if necessary
         if generate_custom_facet_normal:
@@ -471,17 +491,18 @@ def _generate_element_tensor(integrals, sets, optimise_parameters, gdim, tdim, g
 
     return (element_code, members_code, tensor_ops_count)
 
+
 def _generate_functions(functions, sets):
     "Generate declarations for functions and code to compute values."
 
     f_comment = format["comment"]
-    f_double  = format["float declaration"]
-    f_F       = format["function value"]
-    f_float   = format["floating point"]
-    f_decl    = format["declaration"]
-    f_r       = format["free indices"][0]
-    f_iadd    = format["iadd"]
-    f_loop    = format["generate loop"]
+    f_double = format["float declaration"]
+    f_F = format["function value"]
+    f_float = format["floating point"]
+    f_decl = format["declaration"]
+    f_r = format["free indices"][0]
+    f_iadd = format["iadd"]
+    f_loop = format["generate loop"]
 
     # Create the function declarations.
     code = ["", f_comment("Coefficient declarations.")]
@@ -503,7 +524,6 @@ def _generate_functions(functions, sets):
     # Loop ranges and get list of functions.
     for loop_range, list_of_functions in sorted(function_list.items()):
         function_expr = {}
-        function_numbers = []
         # Loop functions.
         func_ops = 0
         for function in list_of_functions:
@@ -517,35 +537,36 @@ def _generate_functions(functions, sets):
             # TODO: This check can be removed for speed later.
             ffc_assert(number not in function_expr, "This is definitely not supposed to happen!")
 
-            # Convert function (that might be a symbol) to a simple string and save.
+            # Convert function (that might be a symbol) to a simple
+            # string and save.
             function = str(function)
             function_expr[number] = function
 
-            # Get number of operations to compute entry and add to function operations count.
-            func_ops += (ops + 1)*range_i
+            # Get number of operations to compute entry and add to
+            # function operations count.
+            func_ops += (ops + 1) * range_i
 
         # Add function operations to total count
         total_ops += func_ops
         code += ["", f_comment("Total number of operations to compute function values = %d" % func_ops)]
 
-        # Sort the functions according to name and create loop to compute the function values.
+        # Sort the functions according to name and create loop to
+        # compute the function values.
         lines = [f_iadd(f_F(n), function_expr[n]) for n in sorted(function_expr.keys())]
-        code += f_loop(lines, [(f_r, 0, loop_range)]) # TODO: If loop_range == 1, this loop may be unneccessary. Not sure if it's safe to just skip it.
+        code += f_loop(lines, [(f_r, 0, loop_range)])  # TODO: If loop_range == 1, this loop may be unneccessary. Not sure if it's safe to just skip it.
 
     return code, total_ops
 
+
 def _generate_integral_code(points, terms, sets, optimise_parameters):
     "Generate code to evaluate the element tensor."
 
     # Prefetch formats to speed up code generation.
-    f_comment       = format["comment"]
-    f_mul           = format["mul"]
-    f_scale_factor  = format["scale factor"]
-    f_iadd          = format["iadd"]
-    f_add           = format["add"]
-    f_A             = format["element tensor"]
-    f_loop          = format["generate loop"]
-    f_B             = format["basis constant"]
+    f_comment = format["comment"]
+    f_iadd = format["iadd"]
+    f_A = format["element tensor"]
+    f_loop = format["generate loop"]
+    f_B = format["basis constant"]
 
     # Initialise return values.
     code = []
@@ -558,33 +579,34 @@ def _generate_integral_code(points, terms, sets, optimise_parameters):
     # Loop terms and create code.
     for loop, (data, entry_vals) in sorted(terms.items()):
 
-        # If we don't have any entry values, there's no need to generate the
-        # loop.
+        # If we don't have any entry values, there's no need to
+        # generate the loop.
         if not entry_vals:
             continue
 
         # Get data.
         t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data
 
-        # If we have a value, then we also need to update the sets of used variables.
+        # If we have a value, then we also need to update the sets of
+        # used variables.
         trans_set.update(t_set)
         used_weights.update(u_weights)
         used_psi_tables.update(u_psi_tables)
         used_nzcs.update(u_nzcs)
 
         # Generate code for basis constant declarations.
-#        basis_const_ops, basis_const_code = generate_aux_constants(basis_consts, f_B,\
-#                                        format["const float declaration"], True)
-        basis_const_ops, basis_const_code = generate_aux_constants(basis_consts, f_B,\
-                                        format["assign"], True)
+        basis_const_ops, basis_const_code = generate_aux_constants(basis_consts,
+                                                                   f_B,
+                                                                   format["assign"], True)
         decl_code = []
         if basis_consts:
-            decl_code = [format["declaration"](format["float declaration"], f_B(len(basis_consts)))]
+            decl_code = [format["declaration"](format["float declaration"],
+                                               f_B(len(basis_consts)))]
         loops[loop] = [basis_const_ops, decl_code + basis_const_code]
 
         for entry, value, ops in entry_vals:
-            # Compute number of operations to compute entry
-            # (add 1 because of += in assignment).
+            # Compute number of operations to compute entry (add 1
+            # because of += in assignment).
             entry_ops = ops + 1
 
             # Create comment for number of operations
@@ -596,7 +618,7 @@ def _generate_integral_code(points, terms, sets, optimise_parameters):
     # Write all the loops of basis functions.
     for loop, ops_lines in sorted(loops.items()):
         ops, lines = ops_lines
-        prim_ops = functools.reduce(lambda i, j: i*j, [ops] + [l[2] for l in loop])
+        prim_ops = functools.reduce(lambda i, j: i * j, [ops] + [l[2] for l in loop])
         # Add number of operations for current loop to total count.
         num_ops += prim_ops
         code += ["", f_comment("Number of operations for primary indices: %d" % prim_ops)]
@@ -604,25 +626,26 @@ def _generate_integral_code(points, terms, sets, optimise_parameters):
 
     return code, num_ops
 
-def _tabulate_weights(quadrature_weights):
+
+def _tabulate_weights(quadrature_rules):
     "Generate table of quadrature weights."
 
     # Prefetch formats to speed up code generation.
-    f_float     = format["floating point"]
-    f_table     = format["static const float declaration"]
-    f_sep       = format["list separator"]
-    f_weight    = format["weight"]
+    f_float = format["floating point"]
+    f_table = format["static const float declaration"]
+    f_sep = format["list separator"]
+    f_weight = format["weight"]
     f_component = format["component"]
-    f_group     = format["grouping"]
-    f_decl      = format["declaration"]
-    f_tensor    = format["tabulate tensor"]
-    f_comment   = format["comment"]
-    f_int       = format["int"]
+    f_group = format["grouping"]
+    f_decl = format["declaration"]
+    f_tensor = format["tabulate tensor"]
+    f_comment = format["comment"]
+    f_int = format["int"]
 
     code = ["", f_comment("Array of quadrature weights.")]
 
     # Loop tables of weights and create code.
-    for weights, points in quadrature_weights:
+    for weights, points in quadrature_rules:
         # FIXME: For now, raise error if we don't have weights.
         # We might want to change this later.
         ffc_assert(weights.any(), "No weights.")
@@ -636,9 +659,10 @@ def _tabulate_weights(quadrature_weights):
             value = f_tensor(weights)
         code += [f_decl(f_table, name, value)]
 
-        # Tabulate the quadrature points (uncomment for different parameters).
-        # 1) Tabulate the points as: p0, p1, p2, with p0 = (x0, y0, z0) etc.
-        # Use f_float to format the value (enable variable precision).
+        # Tabulate the quadrature points (uncomment for different
+        # parameters).  1) Tabulate the points as: p0, p1, p2, with p0
+        # = (x0, y0, z0) etc.  Use f_float to format the value (enable
+        # variable precision).
         formatted_points = [f_group(f_sep.join([f_float(val) for val in point]))
                             for point in points]
 
@@ -654,22 +678,22 @@ def _tabulate_weights(quadrature_weights):
 #            comment = "Quadrature coordinates on the UFC reference element: "
 #            code += [format["comment"](comment)]
 
-#            # All points have the same number of coordinates.
+# All points have the same number of coordinates.
 #            num_coord = len(points[0])
 
-#            # All points have x-coordinates.
+# All points have x-coordinates.
 #            xs = [f_float(p[0]) for p in points]
 #            comment = "X: " + f_sep.join(xs)
 #            code += [format["comment"](comment)]
 
 #            ys = []
 #            zs = []
-#            # Tabulate y-coordinate if we have 2 or more coordinates.
+# Tabulate y-coordinate if we have 2 or more coordinates.
 #            if num_coord >= 2:
 #                ys = [f_float(p[1]) for p in points]
 #                comment = "Y: " + f_sep.join(ys)
 #                code += [format["comment"](comment)]
-#            # Only tabulate z-coordinate if we have 3 coordinates.
+# Only tabulate z-coordinate if we have 3 coordinates.
 #            if num_coord == 3:
 #                zs = [f_float(p[2]) for p in points]
 #                comment = "Z: " + f_sep.join(zs)
@@ -679,49 +703,45 @@ def _tabulate_weights(quadrature_weights):
 
     return code
 
-def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs, optimise_parameters, integral_type, gdim):
+
+def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs,
+                   optimise_parameters, integral_type, gdim):
     "Tabulate values of basis functions and their derivatives at quadrature points."
 
     # Prefetch formats to speed up code generation.
-    f_comment      = format["comment"]
-    f_table        = format["static const float declaration"]
-    f_vector_table = format["vector table declaration"]
-    f_double_array = format["const double array declaration"]
-    f_component    = format["component"]
-    f_const_uint   = format["static const uint declaration"]
-    f_nzcolumns    = format["nonzero columns"]
-    f_list         = format["list"]
-    f_decl         = format["declaration"]
-    f_tensor       = format["tabulate tensor"]
-    f_new_line     = format["new line"]
-    f_int          = format["int"]
-    f_eval_basis   = format["call basis_all"]
-    f_eval_derivs  = format["call basis_derivatives_all"]
-    f_loop         = format["generate loop"]
-    f_quad_point   = format["quadrature point"]
-    f_eval_basis   = format["evaluate basis snippet"]
+    f_comment = format["comment"]
+    f_table = format["static const float declaration"]
+    f_component = format["component"]
+    f_const_uint = format["static const uint declaration"]
+    f_nzcolumns = format["nonzero columns"]
+    f_list = format["list"]
+    f_decl = format["declaration"]
+    f_tensor = format["tabulate tensor"]
+    f_new_line = format["new line"]
+    f_int = format["int"]
 
     # FIXME: Check if we can simplify the tabulation
     code = []
     code += [f_comment("Values of basis functions at quadrature points.")]
 
-    # Get list of non zero columns, if we ignore ones, ignore columns with one component.
+    # Get list of non zero columns, if we ignore ones, ignore columns
+    # with one component.
     if optimise_parameters["ignore ones"]:
         nzcs = []
         for key, val in sorted(inv_name_map.items()):
-            # Check if we have a table of ones or if number of non-zero columns
-            # is larger than one.
+            # Check if we have a table of ones or if number of
+            # non-zero columns is larger than one.
             if val[1] and len(val[1][1]) > 1 or not val[3]:
                 nzcs.append(val[1])
     else:
-        nzcs = [val[1] for key, val in sorted(inv_name_map.items())\
-                                        if val[1]]
+        nzcs = [val[1] for key, val in sorted(inv_name_map.items())
+                if val[1]]
 
     # TODO: Do we get arrays that are not unique?
     new_nzcs = []
     for nz in nzcs:
         # Only get unique arrays.
-        if not nz in new_nzcs:
+        if nz not in new_nzcs:
             new_nzcs.append(nz)
 
     # Construct name map.
@@ -736,10 +756,11 @@ def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs, optimise_pa
     # Loop items in table and tabulate.
     for name in sorted(used_psi_tables):
 
-        # Only proceed if values are still used (if they're not remapped).
+        # Only proceed if values are still used (if they're not
+        # remapped).
         vals = tables[name]
 
-        if not vals is None:
+        if vals is not None:
 
             # Add declaration to name.
             ip, dofs = numpy.shape(vals)
@@ -755,7 +776,7 @@ def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs, optimise_pa
                 for n in name_map[name]:
                     if inv_name_map[n][1] and inv_name_map[n][1] in new_nzcs:
                         i, cols = inv_name_map[n][1]
-                        if not i in used_nzcs:
+                        if i not in used_nzcs:
                             continue
                         code += [f_comment("Array of non-zero columns")]
                         value = f_list([f_int(c) for c in list(cols)])
@@ -766,26 +787,23 @@ def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs, optimise_pa
                         new_nzcs.remove(inv_name_map[n][1])
     return code
 
-def _evaluate_basis_at_quadrature_points(psi_tables,
-                                         gdim,
-                                         element_data,
-                                         form_prefix,
-                                         num_vertices,
-                                         num_cells):
+
+def _evaluate_basis_at_quadrature_points(psi_tables, gdim, element_data,
+                                         form_prefix, num_vertices, num_cells):
     "Generate code for calling evaluate basis (derivatives) at quadrature points"
 
     # Prefetch formats to speed up code generation
-    f_comment          = format["comment"]
-    f_declaration      = format["declaration"]
-    f_static_array     = format["static array"]
-    f_loop             = format["generate loop"]
-    f_eval_basis_decl  = format["eval_basis_decl"]
-    f_eval_basis_init  = format["eval_basis_init"]
-    f_eval_basis       = format["eval_basis"]
-    f_eval_basis_copy  = format["eval_basis_copy"]
+    f_comment = format["comment"]
+    f_declaration = format["declaration"]
+    f_static_array = format["static array"]
+    f_loop = format["generate loop"]
+    f_eval_basis_decl = format["eval_basis_decl"]
+    f_eval_basis_init = format["eval_basis_init"]
+    f_eval_basis = format["eval_basis"]
+    f_eval_basis_copy = format["eval_basis_copy"]
     f_eval_derivs_decl = format["eval_derivs_decl"]
     f_eval_derivs_init = format["eval_derivs_init"]
-    f_eval_derivs      = format["eval_derivs"]
+    f_eval_derivs = format["eval_derivs"]
     f_eval_derivs_copy = format["eval_derivs_copy"]
 
     code = []
@@ -805,17 +823,19 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
     # data and fill in the psi_tables. We therefore need to extract
     # for each prefix, which tables need to be filled in.
 
-    # For each unique prefix, check which derivatives and components are used
+    # For each unique prefix, check which derivatives and components
+    # are used
     used_derivatives_and_components = {}
     for prefix in prefixes:
         used_derivatives_and_components[prefix] = {}
         for table in psi_tables:
-            if not prefix in table: continue
+            if prefix not in table:
+                continue
 
             # Check for derivative
             if "_D" in table:
                 d = table.split("_D")[1].split("_")[0]
-                n = sum([int(_d) for _d in d]) # FIXME: Assume at most 9 derivatives...
+                n = sum([int(_d) for _d in d])  # FIXME: Assume at most 9 derivatives...
             else:
                 n = 0
 
@@ -826,7 +846,7 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
                 c = None
 
             # Note that derivative has been used
-            if not n in used_derivatives_and_components[prefix]:
+            if n not in used_derivatives_and_components[prefix]:
                 used_derivatives_and_components[prefix][n] = set()
             used_derivatives_and_components[prefix][n].add(c)
 
@@ -839,17 +859,18 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
     for prefix in prefixes:
 
         # Get element data for current element
-        counter        = int(prefix.split("FE")[1])
-        element_number = element_data[counter]["element_number"]
-        space_dim      = element_data[counter]["num_element_dofs"]
-        value_size     = element_data[counter]["value_size"]
+        counter = int(prefix.split("FE")[1])
+        space_dim = element_data[counter]["num_element_dofs"]
+        value_size = element_data[counter]["physical_value_size"]
+        element_classname = element_data[counter]["classname"]
 
         # Iterate over derivative orders
         for n, components in sorted_by_key(used_derivatives_and_components[prefix]):
             # components are a set and need to be sorted
             components = sorted(components)
 
-            # Code for evaluate_basis_all (n = 0 which means it's not a derivative)
+            # Code for evaluate_basis_all (n = 0 which means it's not
+            # a derivative)
             if n == 0:
 
                 code += [f_comment("--- Evaluation of basis functions ---")]
@@ -857,8 +878,8 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
 
                 # Compute variables for code generation
                 eval_stride = value_size
-                eval_size   = space_dim*eval_stride
-                table_size  = num_cells*space_dim
+                eval_size = space_dim * eval_stride
+                table_size = num_cells * space_dim
 
                 # Iterate over components and initialize tables
                 for c in components:
@@ -880,19 +901,18 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
                 for cell_number in range(num_cells):
 
                     # Compute variables for code generation
-                    eval_name     = "%s_values_%d" % (prefix, cell_number)
-                    table_offset  = cell_number*space_dim
-                    vertex_offset = cell_number*num_vertices*gdim
+                    eval_name = "%s_values_%d" % (prefix, cell_number)
+                    table_offset = cell_number * space_dim
+                    vertex_offset = cell_number * num_vertices * gdim
 
                     # Generate block of code for loop
                     block = []
 
                     # Generate code for calling evaluate_basis_all
-                    block += [f_eval_basis % {"form_prefix":    form_prefix,
-                                              "element_number": element_number,
-                                              "eval_name":      eval_name,
-                                              "gdim":           gdim,
-                                              "vertex_offset":  vertex_offset}]
+                    block += [f_eval_basis % {"classname": element_classname,
+                                              "eval_name": eval_name,
+                                              "gdim": gdim,
+                                              "vertex_offset": vertex_offset}]
 
                     # Iterate over components and extract values
                     for c in components:
@@ -907,11 +927,11 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
 
                         # Generate code for copying values
                         block += [""]
-                        block += [f_eval_basis_copy % {"table_name":   table_name,
-                                                       "eval_name":    eval_name,
-                                                       "eval_stride":  eval_stride,
-                                                       "eval_offset":  eval_offset,
-                                                       "space_dim":    space_dim,
+                        block += [f_eval_basis_copy % {"table_name": table_name,
+                                                       "eval_name": eval_name,
+                                                       "eval_stride": eval_stride,
+                                                       "eval_offset": eval_offset,
+                                                       "space_dim": space_dim,
                                                        "table_offset": table_offset}]
 
                     # Generate code
@@ -923,12 +943,15 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
             # Code for evaluate_basis_derivatives_all (derivative of degree n > 0)
             else:
 
-                code += [f_comment("--- Evaluation of basis function derivatives of order %d ---" % n) ]
+                code += [f_comment("--- Evaluation of basis function derivatives of order %d ---" % n)]
                 code += [""]
 
-                # FIXME: We extract values for all possible derivatives, even
-                # FIXME: if not all are used. (For components, we extract only
-                # FIXME: components that are actually used.) This may be optimized
+                # FIXME: We extract values for all possible
+                # derivatives, even
+                # FIXME: if not all are used. (For components, we
+                # extract only
+                # FIXME: components that are actually used.) This may
+                # be optimized
                 # FIXME: but the extra cost is likely small.
 
                 # Get derivative tuples
@@ -938,16 +961,17 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
                 derivs = ["".join(str(_d) for _d in d) for d in deriv_tuples]
 
                 # Compute variables for code generation
-                eval_stride = value_size*len(derivs)
-                eval_size   = space_dim*eval_stride
-                table_size  = num_cells*space_dim
+                eval_stride = value_size * len(derivs)
+                eval_size = space_dim * eval_stride
+                table_size = num_cells * space_dim
 
                 # Iterate over derivatives and initialize tables
                 seen_derivs = set()
                 for d in derivs:
 
                     # Skip derivative if seen before (d^2/dxdy = d^2/dydx)
-                    if d in seen_derivs: continue
+                    if d in seen_derivs:
+                        continue
                     seen_derivs.add(d)
 
                     # Iterate over components
@@ -970,27 +994,27 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
                 for cell_number in range(num_cells):
 
                     # Compute variables for code generation
-                    eval_name     = "%s_dvalues_%d_%d" % (prefix, n, cell_number)
-                    table_offset  = cell_number*space_dim
-                    vertex_offset = cell_number*num_vertices*gdim
+                    eval_name = "%s_dvalues_%d_%d" % (prefix, n, cell_number)
+                    table_offset = cell_number * space_dim
+                    vertex_offset = cell_number * num_vertices * gdim
 
                     # Generate block of code for loop
                     block = []
 
                     # Generate code for calling evaluate_basis_derivatives_all
-                    block += [f_eval_derivs % {"form_prefix":    form_prefix,
-                                               "element_number": element_number,
-                                               "eval_name":      eval_name,
-                                               "gdim":           gdim,
-                                               "vertex_offset":  vertex_offset,
-                                               "n":              n}]
+                    block += [f_eval_derivs % {"classname": element_classname,
+                                               "eval_name": eval_name,
+                                               "gdim": gdim,
+                                               "vertex_offset": vertex_offset,
+                                               "n": n}]
 
                     # Iterate over derivatives and extract values
                     seen_derivs = set()
                     for i, d in enumerate(derivs):
 
                         # Skip derivative if seen before (d^2/dxdy = d^2/dydx)
-                        if d in seen_derivs: continue
+                        if d in seen_derivs:
+                            continue
                         seen_derivs.add(d)
 
                         # Iterate over components
@@ -1002,15 +1026,15 @@ def _evaluate_basis_at_quadrature_points(psi_tables,
                                 eval_offset = i
                             else:
                                 table_name = prefix + "_C%s_D%s" % (c, d)
-                                eval_offset = len(derivs)*int(c) + i
+                                eval_offset = len(derivs) * int(c) + i
 
                             # Generate code for copying values
                             block += [""]
-                            block += [(f_eval_derivs_copy % {"table_name":   table_name,
-                                                             "eval_name":    eval_name,
-                                                             "eval_stride":  eval_stride,
-                                                             "eval_offset":  eval_offset,
-                                                             "space_dim":    space_dim,
+                            block += [(f_eval_derivs_copy % {"table_name": table_name,
+                                                             "eval_name": eval_name,
+                                                             "eval_stride": eval_stride,
+                                                             "eval_offset": eval_offset,
+                                                             "space_dim": space_dim,
                                                              "table_offset": table_offset})]
 
                     # Generate code
diff --git a/ffc/quadrature/quadratureoptimization.py b/ffc/quadrature/quadratureoptimization.py
index cc857b3..a46dab4 100644
--- a/ffc/quadrature/quadratureoptimization.py
+++ b/ffc/quadrature/quadratureoptimization.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2013 Kristian B. Oelgaard
 #
 # This file is part of FFC.
@@ -16,16 +17,17 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Marie E. Rognes, 2013
-# Modified by Martin Alnaes, 2013-2014
+# Modified by Martin Sandve Alnæs, 2013-2014
 
 from ufl.utils.sorting import sorted_by_key
 
 # FFC modules
-from ffc.log import info, error, warning
+from ffc.log import info, error
 from ffc.cpp import format
-from ffc.quadrature.symbolics import optimise_code, BASIS, IP, GEO, CONST
+from ffc.quadrature.symbolics import optimise_code, BASIS, IP, GEO
 from ffc.quadrature.symbolics import create_product, create_sum, create_symbol, create_fraction
 
+
 def optimize_integral_ir(ir, parameters):
     "Compute optimized intermediate representation of integral."
 
@@ -40,11 +42,11 @@ def optimize_integral_ir(ir, parameters):
     if parameters["optimisation"]:
 
         # Get parameters
-        integrals      = ir["trans_integrals"]
-        integral_type  = ir["integral_type"]
-        num_facets     = ir["num_facets"]
-        num_vertices   = ir["num_vertices"]
-        geo_consts     = ir["geo_consts"]
+        integrals = ir["trans_integrals"]
+        integral_type = ir["integral_type"]
+        num_facets = ir["num_facets"]
+        num_vertices = ir["num_vertices"]
+        geo_consts = ir["geo_consts"]
         psi_tables_map = ir["psi_tables_map"]
 
         # Optimize based on integral type
@@ -66,7 +68,7 @@ def optimize_integral_ir(ir, parameters):
                 for j in range(num_facets):
                     info("Optimising expressions for facet integral (%d, %d)" % (i, j))
                     if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"):
-                        _precompute_expressions(integrals[i][j], geo_consts,parameters["optimisation"])
+                        _precompute_expressions(integrals[i][j], geo_consts, parameters["optimisation"])
                     else:
                         _simplify_expression(integrals[i][j], geo_consts, psi_tables_map)
         elif integral_type == "vertex":
@@ -81,6 +83,7 @@ def optimize_integral_ir(ir, parameters):
 
     return ir
 
+
 def _simplify_expression(integral, geo_consts, psi_tables_map):
     for points, terms, functions, ip_consts, coordinate, conditionals in integral:
         # NOTE: sorted is needed to pass the regression tests on the buildbots
@@ -105,6 +108,7 @@ def _simplify_expression(integral, geo_consts, psi_tables_map):
             terms[loop][0][2] = psi_tables
             terms[loop][1] = new_entry_vals
 
+
 def _precompute_expressions(integral, geo_consts, optimisation):
     for points, terms, functions, ip_consts, coordinate, conditionals in integral:
         for loop, (data, entry_vals) in sorted_by_key(terms):
@@ -117,6 +121,7 @@ def _precompute_expressions(integral, geo_consts, optimisation):
                     new_entry_vals.append((entry, value, value.ops()))
             terms[loop][1] = new_entry_vals
 
+
 def _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisation):
     f_G = format["geometry constant"]
     f_I = format["ip constant"]
@@ -217,7 +222,7 @@ def _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisa
 #            new_sum = create_sum(vrs)
 #            if new_sum.t == BASIS:
 #                return new_sum
-##                return _reduce_expression(new_sum, [], basis_consts, f_B, True)
+# return _reduce_expression(new_sum, [], basis_consts, f_B, True)
 #            elif new_sum.t == IP:
 #                return _reduce_expression(new_sum, [], ip_consts, f_I, True)
 #            elif new_sum.t == GEO:
@@ -226,6 +231,7 @@ def _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisa
 #    else:
 #        error("Must have product or sum here: %s" % repr(new_val))
 
+
 def _reduce_expression(expr, symbols, const_dict, f_name, use_expr_type=False):
     if use_expr_type:
         if expr not in const_dict:
diff --git a/ffc/quadrature/quadraturerepresentation.py b/ffc/quadrature/quadraturerepresentation.py
index 0a89d2a..695c230 100644
--- a/ffc/quadrature/quadraturerepresentation.py
+++ b/ffc/quadrature/quadraturerepresentation.py
@@ -1,6 +1,7 @@
+# -*- coding: utf-8 -*-
 "Quadrature representation class for UFL"
 
-# Copyright (C) 2009-2015 Kristian B. Oelgaard
+# Copyright (C) 2009-2016 Kristian B. Oelgaard
 #
 # This file is part of FFC.
 #
@@ -18,18 +19,19 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Anders Logg 2009, 2014
-# Modified by Martin Alnaes 2013-2015
+# Modified by Martin Sandve Alnæs 2013-2016
 
 # Python modules
-import numpy, itertools, collections
+import six
+import collections
 
 # UFL modules
-from ufl.classes import Form, Integral
+from ufl.classes import Integral
 from ufl.sorting import sorted_expr_sum
 from ufl import custom_integral_types
 
 # FFC modules
-from ffc.log import ffc_assert, info, error, warning
+from ffc.log import ffc_assert, info, error
 from ffc.utils import product
 from ffc.fiatinterface import create_element
 
@@ -39,12 +41,13 @@ from ffc.quadrature.parameters import parse_optimise_parameters
 
 from ffc.quadrature.quadraturetransformer import QuadratureTransformer
 from ffc.quadrature.optimisedquadraturetransformer import QuadratureTransformerOpt
-import six
+
 
 def compute_integral_ir(itg_data,
                         form_data,
                         form_id,
                         element_numbers,
+                        classnames,
                         parameters):
     "Compute intermediate represention of integral."
 
@@ -56,20 +59,22 @@ def compute_integral_ir(itg_data,
     # Create and save the optisation parameters.
     ir["optimise_parameters"] = parse_optimise_parameters(parameters, itg_data)
 
-    # Sort integrals into a dict with quadrature degree and rule as key
+    # Sort integrals into a dict with quadrature degree and rule as
+    # key
     sorted_integrals = sort_integrals(itg_data.integrals,
                                       itg_data.metadata["quadrature_degree"],
                                       itg_data.metadata["quadrature_rule"])
 
-    # Tabulate quadrature points and basis function values in these points
+    # Tabulate quadrature points and basis function values in these
+    # points
     integrals_dict, psi_tables, quadrature_rules = \
         tabulate_basis(sorted_integrals, form_data, itg_data)
 
     # Save tables for quadrature weights and points
-    ir["quadrature_weights"] = quadrature_rules # TODO: Rename this ir entry to quadrature_rules
+    ir["quadrature_rules"] = quadrature_rules
 
-    # Create dimensions of primary indices, needed to reset the argument 'A'
-    # given to tabulate_tensor() by the assembler.
+    # Create dimensions of primary indices, needed to reset the
+    # argument 'A' given to tabulate_tensor() by the assembler.
     ir["prim_idims"] = [create_element(ufl_element).space_dimension()
                         for ufl_element in form_data.argument_elements]
 
@@ -90,28 +95,34 @@ def compute_integral_ir(itg_data,
 
     # Transform integrals.
     cell = itg_data.domain.ufl_cell()
-    ir["trans_integrals"] = _transform_integrals_by_type(ir, transformer, integrals_dict,
-                                                         itg_data.integral_type, cell)
+    ir["trans_integrals"] = _transform_integrals_by_type(ir, transformer,
+                                                         integrals_dict,
+                                                         itg_data.integral_type,
+                                                         cell)
 
     # Save tables populated by transformer
     ir["name_map"] = transformer.name_map
     ir["unique_tables"] = transformer.unique_tables  # Basis values?
 
-    # Save tables map, to extract table names for optimisation option -O.
+    # Save tables map, to extract table names for optimisation option
+    # -O.
     ir["psi_tables_map"] = transformer.psi_tables_map
     ir["additional_includes_set"] = transformer.additional_includes_set
 
-    # Insert empty data which will be populated if optimization is turned on
+    # Insert empty data which will be populated if optimization is
+    # turned on
     ir["geo_consts"] = {}
 
-    # Extract element data for psi_tables, needed for runtime quadrature.
-    # This is used by integral type custom_integral.
-    ir["element_data"] = _extract_element_data(transformer.element_map, element_numbers)
+    # Extract element data for psi_tables, needed for runtime
+    # quadrature.  This is used by integral type custom_integral.
+    ir["element_data"] = _extract_element_data(transformer.element_map, classnames)
 
     return ir
 
-def sort_integrals(integrals, default_quadrature_degree, default_quadrature_rule):
-    """Sort and accumulate integrals according to the number of quadrature points needed per axis.
+
+def sort_integrals(integrals, default_scheme, default_degree):
+    """Sort and accumulate integrals according to the number of quadrature
+    points needed per axis.
 
     All integrals should be over the same (sub)domain.
     """
@@ -119,10 +130,11 @@ def sort_integrals(integrals, default_quadrature_degree, default_quadrature_rule
     if not integrals:
         return {}
 
-    # Get domain properties from first integral, assuming all are the same
-    integral_type  = integrals[0].integral_type()
-    subdomain_id   = integrals[0].subdomain_id()
-    domain         = integrals[0].ufl_domain()
+    # Get domain properties from first integral, assuming all are the
+    # same
+    integral_type = integrals[0].integral_type()
+    subdomain_id = integrals[0].subdomain_id()
+    domain = integrals[0].ufl_domain()
     ffc_assert(all(integral_type == itg.integral_type() for itg in integrals),
                "Expecting only integrals of the same type.")
     ffc_assert(all(domain == itg.ufl_domain() for itg in integrals),
@@ -132,24 +144,28 @@ def sort_integrals(integrals, default_quadrature_degree, default_quadrature_rule
 
     sorted_integrands = collections.defaultdict(list)
     for integral in integrals:
-        # Override default degree and rule if specified in integral metadata
+        # Override default degree and rule if specified in integral
+        # metadata
         integral_metadata = integral.metadata() or {}
-        degree = integral_metadata.get("quadrature_degree", default_quadrature_degree)
-        rule = integral_metadata.get("quadrature_rule", default_quadrature_rule)
+        degree = integral_metadata.get("quadrature_degree", default_degree)
+        scheme = integral_metadata.get("quadrature_rule", default_scheme)
         assert isinstance(degree, int)
-        # Add integrand to dictionary according to degree and rule.
-        key = (degree, rule)
-        sorted_integrands[key].append(integral.integrand())
+        # Add integrand to dictionary according to degree and scheme.
+        rule = (scheme, degree)
+        sorted_integrands[rule].append(integral.integrand())
 
     # Create integrals from accumulated integrands.
     sorted_integrals = {}
-    for key, integrands in list(sorted_integrands.items()):
+    for rule, integrands in list(sorted_integrands.items()):
         # Summing integrands in a canonical ordering defined by UFL
         integrand = sorted_expr_sum(integrands)
-        sorted_integrals[key] = Integral(integrand, integral_type, domain, subdomain_id, {}, None)
+        sorted_integrals[rule] = Integral(integrand, integral_type, domain,
+                                          subdomain_id, {}, None)
     return sorted_integrals
 
-def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type, cell):
+
+def _transform_integrals_by_type(ir, transformer, integrals_dict,
+                                 integral_type, cell):
     num_facets = cell.num_facets()
     num_vertices = cell.num_vertices()
 
@@ -165,7 +181,8 @@ def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type,
         for i in range(num_facets):
             info("Transforming exterior facet integral %d" % i)
             transformer.update_facets(i, None)
-            terms[i] = _transform_integrals(transformer, integrals_dict, integral_type)
+            terms[i] = _transform_integrals(transformer, integrals_dict,
+                                            integral_type)
 
     elif integral_type == "interior_facet":
         # Compute transformed integrals.
@@ -174,7 +191,8 @@ def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type,
             for j in range(num_facets):
                 info("Transforming interior facet integral (%d, %d)" % (i, j))
                 transformer.update_facets(i, j)
-                terms[i][j] = _transform_integrals(transformer, integrals_dict, integral_type)
+                terms[i][j] = _transform_integrals(transformer, integrals_dict,
+                                                   integral_type)
 
     elif integral_type == "vertex":
         # Compute transformed integrals.
@@ -182,7 +200,8 @@ def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type,
         for i in range(num_vertices):
             info("Transforming vertex integral (%d)" % i)
             transformer.update_vertex(i)
-            terms[i] = _transform_integrals(transformer, integrals_dict, integral_type)
+            terms[i] = _transform_integrals(transformer, integrals_dict,
+                                            integral_type)
 
     elif integral_type in custom_integral_types:
 
@@ -195,6 +214,7 @@ def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type,
         error("Unhandled domain type: " + str(integral_type))
     return terms
 
+
 def _transform_integrals(transformer, integrals, integral_type):
     "Transform integrals from UFL expression to quadrature representation."
     transformed_integrals = []
@@ -202,10 +222,12 @@ def _transform_integrals(transformer, integrals, integral_type):
         transformer.update_points(point)
         terms = transformer.generate_terms(integral.integrand(), integral_type)
         transformed_integrals.append((point, terms, transformer.function_data,
-                                      {}, transformer.coordinate, transformer.conditionals))
+                                      {}, transformer.coordinate,
+                                      transformer.conditionals))
     return transformed_integrals
 
-def _extract_element_data(element_map, element_numbers):
+
+def _extract_element_data(element_map, classnames):
     "Extract element data for psi_tables"
 
     # Iterate over map
@@ -219,16 +241,12 @@ def _extract_element_data(element_map, element_numbers):
             # Compute value size
             value_size = product(ufl_element.value_shape())
 
-            # Get element number
-            element_number = element_numbers.get(ufl_element)
-            if element_number is None:
-                # FIXME: Should not be necessary, we should always know the element number
-                #warning("Missing element number, likely because vector elements are not yet supported in custom integrals.")
-                pass
-
+            # Get element classname
+            element_classname = classnames["finite_element"][ufl_element]
+    
             # Store data
-            element_data[counter] = {"value_size":      value_size,
+            element_data[counter] = {"physical_value_size": value_size,
                                      "num_element_dofs": fiat_element.space_dimension(),
-                                     "element_number":  element_number}
+                                     "classname": element_classname}
 
     return element_data
diff --git a/ffc/quadrature/quadraturetransformer.py b/ffc/quadrature/quadraturetransformer.py
index 75ec7b8..e65e88a 100644
--- a/ffc/quadrature/quadraturetransformer.py
+++ b/ffc/quadrature/quadraturetransformer.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "QuadratureTransformer for quadrature code generation to translate UFL expressions."
 
 # Copyright (C) 2009-2011 Kristian B. Oelgaard
@@ -19,31 +20,24 @@
 #
 # Modified by Peter Brune 2009
 # Modified by Anders Logg 2009, 2013
-# Modified by Lizao Li, 2015
+# Modified by Lizao Li, 2015, 2016
 
-# Python modules.
-from numpy import shape
-
-from six import iteritems, iterkeys
+from six import iterkeys
 from six.moves import xrange as range
 from six import advance_iterator as next
-def firstkey(d):
-    return next(iterkeys(d))
 
 # UFL common.
-from ufl.utils.stacks import StackDict, Stack
 from ufl.utils.sorting import sorted_by_key
 from ufl import custom_integral_types
 
 # UFL Classes.
-from ufl.classes import FixedIndex
 from ufl.classes import IntValue
 from ufl.classes import FloatValue
 from ufl.classes import Coefficient
 from ufl.classes import Operator
 
 # FFC modules.
-from ffc.log import info, debug, error, ffc_assert
+from ffc.log import error, ffc_assert
 from ffc.cpp import format
 
 # Utility and optimisation functions for quadraturegenerator.
@@ -52,7 +46,13 @@ from ffc.quadrature.quadratureutils import create_permutations
 from ffc.quadrature.reduce_operations import operation_count
 from ffc.quadrature.symbolics import IP
 
+
+def firstkey(d):
+    return next(iterkeys(d))
+
+
 class QuadratureTransformer(QuadratureTransformerBase):
+
     "Transform UFL representation to quadrature code."
 
     def __init__(self, *args):
@@ -67,13 +67,13 @@ class QuadratureTransformer(QuadratureTransformerBase):
     # AlgebraOperators (algebra.py).
     # -------------------------------------------------------------------------
     def sum(self, o, *operands):
-        #print("Visiting Sum: " + "\noperands: \n" + "\n".join(map(repr, operands)))
+        # print("Visiting Sum: " + "\noperands: \n" + "\n".join(map(repr, operands)))
 
         # Prefetch formats to speed up code generation.
-        f_group  = format["grouping"]
-        f_add    = format["add"]
-        f_mult   = format["multiply"]
-        f_float  = format["floating point"]
+        f_group = format["grouping"]
+        f_add = format["add"]
+        f_mult = format["multiply"]
+        f_float = format["floating point"]
         code = {}
 
         # Loop operands that has to be summed and sort according to map (j,k).
@@ -90,7 +90,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         for key, val in sorted_by_key(code):
 
             # Exclude all zero valued terms from sum
-            value = [v for v in val if not v is None]
+            value = [v for v in val if v is not None]
 
             if len(value) > 1:
                 # NOTE: Since we no longer call expand_indices, the following
@@ -130,7 +130,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         return code
 
     def product(self, o, *operands):
-        #print("Visiting Product with operands: \n" + "\n".join(map(repr,operands)))
+        # print("Visiting Product with operands: \n" + "\n".join(map(repr,operands)))
 
         # Prefetch formats to speed up code generation.
         f_mult = format["multiply"]
@@ -148,13 +148,13 @@ class QuadratureTransformer(QuadratureTransformerBase):
                 not_permute.append(op[()])
 
         # Create permutations.
-        #print("\npermute: " + repr(permute))
-        #print("\nnot_permute: " + repr(not_permute))
+        # print("\npermute: " + repr(permute))
+        # print("\nnot_permute: " + repr(not_permute))
         permutations = create_permutations(permute)
-        #print("\npermutations: " + repr(permutations))
+        # print("\npermutations: " + repr(permutations))
 
         # Create code.
-        code ={}
+        code = {}
         if permutations:
             for key, val in sorted(permutations.items()):
                 # Sort key in order to create a unique key.
@@ -209,20 +209,20 @@ class QuadratureTransformer(QuadratureTransformerBase):
         return code
 
     def division(self, o, *operands):
-        #print("Visiting Division with operands: \n" + "\n".join(map(repr,operands)))
+        # print("Visiting Division with operands: \n" + "\n".join(map(repr,operands)))
 
         # Prefetch formats to speed up code generation.
-        f_div      = format["div"]
+        f_div = format["div"]
         f_grouping = format["grouping"]
 
-        ffc_assert(len(operands) == 2, \
+        ffc_assert(len(operands) == 2,
                    "Expected exactly two operands (numerator and denominator): " + repr(operands))
 
         # Get the code from the operands.
         numerator_code, denominator_code = operands
 
         # TODO: Are these safety checks needed? Need to check for None?
-        ffc_assert(() in denominator_code and len(denominator_code) == 1, \
+        ffc_assert(() in denominator_code and len(denominator_code) == 1,
                    "Only support function type denominator: " + repr(denominator_code))
 
         code = {}
@@ -244,7 +244,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         return code
 
     def power(self, o):
-        #print("\n\nVisiting Power: " + repr(o))
+        # print("\n\nVisiting Power: " + repr(o))
 
         # Get base and exponent.
         base, expo = o.ufl_operands
@@ -270,25 +270,25 @@ class QuadratureTransformer(QuadratureTransformerBase):
             error("power does not support this exponent: " + repr(expo))
 
     def abs(self, o, *operands):
-        #print("\n\nVisiting Abs: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
+        # print("\n\nVisiting Abs: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
 
         # Prefetch formats to speed up code generation.
         f_abs = format["absolute value"]
 
         # TODO: Are these safety checks needed? Need to check for None?
-        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \
+        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1,
                    "Abs expects one operand of function type: " + repr(operands))
 
         # Take absolute value of operand.
-        return {():f_abs(operands[0][()])}
+        return {(): f_abs(operands[0][()])}
 
     def min_value(self, o, *operands):
         f_min = format["min value"]
-        return {():f_min(operands[0][()], operands[1][()])}
+        return {(): f_min(operands[0][()], operands[1][()])}
 
     def max_value(self, o, *operands):
         f_max = format["max value"]
-        return {():f_max(operands[0][()], operands[1][()])}
+        return {(): f_max(operands[0][()], operands[1][()])}
 
     # -------------------------------------------------------------------------
     # Condition, Conditional (conditional.py).
@@ -298,8 +298,8 @@ class QuadratureTransformer(QuadratureTransformerBase):
         # Get condition expression and do safety checks.
         # Might be a bit too strict?
         cond, = operands
-        ffc_assert(len(cond) == 1 and firstkey(cond) == (),\
-            "Condition for NotCondition should only be one function: " + repr(cond))
+        ffc_assert(len(cond) == 1 and firstkey(cond) == (),
+                   "Condition for NotCondition should only be one function: " + repr(cond))
         return {(): format["not"](cond[()])}
 
     def binary_condition(self, o, *operands):
@@ -307,16 +307,16 @@ class QuadratureTransformer(QuadratureTransformerBase):
         # Get LHS and RHS expressions and do safety checks.
         # Might be a bit too strict?
         lhs, rhs = operands
-        ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),\
-            "LHS of Condition should only be one function: " + repr(lhs))
-        ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),\
-            "RHS of Condition should only be one function: " + repr(rhs))
+        ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),
+                   "LHS of Condition should only be one function: " + repr(lhs))
+        ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),
+                   "RHS of Condition should only be one function: " + repr(rhs))
 
         # Map names from UFL to cpp.py.
-        name_map = {"==":"is equal", "!=":"not equal",\
-                    "<":"less than", ">":"greater than",\
-                    "<=":"less equal", ">=":"greater equal",\
-                    "&&":"and", "||": "or"}
+        name_map = {"==": "is equal", "!=": "not equal",
+                    "<": "less than", ">": "greater than",
+                    "<=": "less equal", ">=": "greater equal",
+                    "&&": "and", "||": "or"}
 
         # Get values and test for None
         l_val = lhs[()]
@@ -332,12 +332,12 @@ class QuadratureTransformer(QuadratureTransformerBase):
 
         # Get condition and return values; and do safety check.
         cond, true, false = operands
-        ffc_assert(len(cond) == 1 and firstkey(cond) == (),\
-            "Condtion should only be one function: " + repr(cond))
-        ffc_assert(len(true) == 1 and firstkey(true) == (),\
-            "True value of Condtional should only be one function: " + repr(true))
-        ffc_assert(len(false) == 1 and firstkey(false) == (),\
-            "False value of Condtional should only be one function: " + repr(false))
+        ffc_assert(len(cond) == 1 and firstkey(cond) == (),
+                   "Condtion should only be one function: " + repr(cond))
+        ffc_assert(len(true) == 1 and firstkey(true) == (),
+                   "True value of Condtional should only be one function: " + repr(true))
+        ffc_assert(len(false) == 1 and firstkey(false) == (),
+                   "False value of Condtional should only be one function: " + repr(false))
 
         # Get values and test for None
         t_val = true[()]
@@ -351,60 +351,60 @@ class QuadratureTransformer(QuadratureTransformerBase):
         expr = format["evaluate conditional"](cond[()], t_val, f_val)
         num = len(self.conditionals)
         name = format["conditional"](num)
-        if not expr in self.conditionals:
+        if expr not in self.conditionals:
             self.conditionals[expr] = (IP, operation_count(expr, format), num)
         else:
             num = self.conditionals[expr][2]
             name = format["conditional"](num)
-        return {():name}
+        return {(): name}
 
     # -------------------------------------------------------------------------
     # FacetNormal, CellVolume, Circumradius, FacetArea (geometry.py).
     # -------------------------------------------------------------------------
-    def cell_coordinate(self, o): # FIXME
+    def cell_coordinate(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_coordinate(self, o): # FIXME
+    def facet_coordinate(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_origin(self, o): # FIXME
+    def cell_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_origin(self, o): # FIXME
+    def facet_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_origin(self, o): # FIXME
+    def cell_facet_origin(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian(self, o): # FIXME
+    def jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian_determinant(self, o): # FIXME
+    def jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def jacobian_inverse(self, o): # FIXME
+    def jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian(self, o): # FIXME
+    def facet_jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian_determinant(self, o): # FIXME
+    def facet_jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def facet_jacobian_inverse(self, o): # FIXME
+    def facet_jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian(self, o): # FIXME
+    def cell_facet_jacobian(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian_determinant(self, o): # FIXME
+    def cell_facet_jacobian_determinant(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def cell_facet_jacobian_inverse(self, o): # FIXME
+    def cell_facet_jacobian_inverse(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
     def facet_normal(self, o):
-        #print("Visiting FacetNormal:")
+        # print("Visiting FacetNormal:")
 
         # Get the component
         components = self.component()
@@ -414,15 +414,15 @@ class QuadratureTransformer(QuadratureTransformerBase):
 
         # Handle 1D as a special case.
         # FIXME: KBO: This has to change for mD elements in R^n : m < n
-        if self.gdim == 1: # FIXME: MSA: UFL uses shape (1,) now, can we remove the special case here then?
+        if self.gdim == 1:  # FIXME: MSA: UFL uses shape (1,) now, can we remove the special case here then?
             normal_component = format["normal component"](self.restriction, "")
         else:
             normal_component = format["normal component"](self.restriction, components[0])
         self.trans_set.add(normal_component)
 
-        return {():normal_component}
+        return {(): normal_component}
 
-    def cell_normal(self, o): # FIXME
+    def cell_normal(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
     def cell_volume(self, o):
@@ -430,14 +430,14 @@ class QuadratureTransformer(QuadratureTransformerBase):
         volume = format["cell volume"](self.restriction)
         self.trans_set.add(volume)
 
-        return {():volume}
+        return {(): volume}
 
     def circumradius(self, o):
         # FIXME: KBO: This has to change for higher order elements
         circumradius = format["circumradius"](self.restriction)
         self.trans_set.add(circumradius)
 
-        return {():circumradius}
+        return {(): circumradius}
 
     def facet_area(self, o):
         # FIXME: KBO: This has to change for higher order elements
@@ -448,7 +448,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         area = format["facet area"]
         self.trans_set.add(area)
 
-        return {():area}
+        return {(): area}
 
     def min_facet_edge_length(self, o):
         # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL).
@@ -460,7 +460,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         edgelen = format["min facet edge length"](self.restriction)
         self.trans_set.add(edgelen)
 
-        return {():edgelen}
+        return {(): edgelen}
 
     def max_facet_edge_length(self, o):
         # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL).
@@ -472,12 +472,12 @@ class QuadratureTransformer(QuadratureTransformerBase):
         edgelen = format["max facet edge length"](self.restriction)
         self.trans_set.add(edgelen)
 
-        return {():edgelen}
+        return {(): edgelen}
 
-    def cell_orientation(self, o): # FIXME
+    def cell_orientation(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
-    def quadrature_weight(self, o): # FIXME
+    def quadrature_weight(self, o):  # FIXME
         error("This object should be implemented by the child class.")
 
     # -------------------------------------------------------------------------
@@ -488,12 +488,12 @@ class QuadratureTransformer(QuadratureTransformerBase):
         "Create code for basis functions, and update relevant tables of used basis."
 
         # Prefetch formats to speed up code generation.
-        f_group         = format["grouping"]
-        f_add           = format["add"]
-        f_mult          = format["multiply"]
-        f_transform     = format["transform"]
-        f_detJ          = format["det(J)"]
-        f_inv           = format["inverse"]
+        f_group = format["grouping"]
+        f_add = format["add"]
+        f_mult = format["multiply"]
+        f_transform = format["transform"]
+        f_detJ = format["det(J)"]
+        f_inv = format["inverse"]
 
         # Reset code
         code = {}
@@ -508,18 +508,24 @@ class QuadratureTransformer(QuadratureTransformerBase):
                     deriv = []
 
                 # Create mapping and basis name.
-                #print "component = ", component
-                mapping, basis = self._create_mapping_basis(component, deriv, avg, ufl_argument, ffc_element)
-                if not mapping in code:
+                # print "component = ", component
+                mapping, basis = self._create_mapping_basis(component, deriv,
+                                                            avg, ufl_argument,
+                                                            ffc_element)
+                if mapping not in code:
                     code[mapping] = []
 
                 if basis is not None:
                     # Add transformation
-                    code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim))
+                    code[mapping].append(self.__apply_transform(basis,
+                                                                derivatives,
+                                                                multi, tdim,
+                                                                gdim))
 
         # Handle non-affine mappings.
         else:
-            ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.")
+            ffc_assert(avg is None,
+                       "Taking average is not supported for non-affine mappings.")
 
             # Loop derivatives and get multi indices.
             for multi in multiindices:
@@ -527,11 +533,12 @@ class QuadratureTransformer(QuadratureTransformerBase):
                 if not any(deriv):
                     deriv = []
 
-                if 'piola' in transformation:
+                if transformation in ["covariant piola",
+                                      "contravariant piola"]:
                     for c in range(tdim):
                         # Create mapping and basis name.
                         mapping, basis = self._create_mapping_basis(c + local_offset, deriv, avg, ufl_argument, ffc_element)
-                        if not mapping in code:
+                        if mapping not in code:
                             code[mapping] = []
                         if basis is not None:
                             # Multiply basis by appropriate transform.
@@ -547,21 +554,23 @@ class QuadratureTransformer(QuadratureTransformerBase):
                                 basis = f_mult([detJ, dXdx, basis])
                             # Add transformation if needed.
                             code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim))
-                elif transformation == "pullback as metric":
+                elif transformation == "double covariant piola":
                     # g_ij = (Jinv)_ki G_kl (Jinv)lj
                     i = local_comp // tdim
-                    j = local_comp %  tdim
+                    j = local_comp % tdim
                     for k in range(tdim):
                         for l in range(tdim):
                             # Create mapping and basis name.
                             mapping, basis = self._create_mapping_basis(
                                 k * tdim + l + local_offset,
                                 deriv, avg, ufl_argument, ffc_element)
-                            if not mapping in code:
+                            if mapping not in code:
                                 code[mapping] = []
                             if basis is not None:
-                                J1 = f_transform("JINV", k, i, tdim, gdim, self.restriction)
-                                J2 = f_transform("JINV", l, j, tdim, gdim, self.restriction)
+                                J1 = f_transform("JINV", k, i, tdim, gdim,
+                                                 self.restriction)
+                                J2 = f_transform("JINV", l, j, tdim, gdim,
+                                                 self.restriction)
                                 self.trans_set.add(J1)
                                 self.trans_set.add(J2)
                                 basis = f_mult([J1, basis, J2])
@@ -570,8 +579,37 @@ class QuadratureTransformer(QuadratureTransformerBase):
                                     self.__apply_transform(
                                         basis, derivatives, multi,
                                         tdim, gdim))
+                elif transformation == "double contravariant piola":
+                    # g_ij = (detJ)^(-2) J_ik G_kl J_jl
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            mapping, basis = self._create_mapping_basis(
+                                k * tdim + l + local_offset,
+                                deriv, avg, ufl_argument, ffc_element)
+                            if mapping not in code:
+                                code[mapping] = []
+                            if basis is not None:
+                                J1 = f_transform("J", i, k, gdim, tdim,
+                                                 self.restriction)
+                                J2 = f_transform("J", j, l, gdim, tdim,
+                                                 self.restriction)
+                                self.trans_set.add(J1)
+                                self.trans_set.add(J2)
+                                self.trans_set.add(f_detJ(self.restriction))
+                                invdetJ = f_inv(f_detJ(self.restriction))
+                                basis = f_mult([invdetJ, invdetJ, J1, basis,
+                                                J2])
+                                # Add transformation if needed.
+                                code[mapping].append(
+                                    self.__apply_transform(
+                                        basis, derivatives, multi,
+                                        tdim, gdim))
                 else:
-                    error("Transformation is not supported: " + repr(transformation))
+                    error("Transformation is not supported: " +
+                          repr(transformation))
 
         # Add sums and group if necessary.
         for key, val in list(code.items()):
@@ -592,10 +630,10 @@ class QuadratureTransformer(QuadratureTransformerBase):
         ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.")
 
         # Prefetch formats to speed up code generation.
-        f_mult          = format["multiply"]
-        f_transform     = format["transform"]
-        f_detJ          = format["det(J)"]
-        f_inv           = format["inverse"]
+        f_mult = format["multiply"]
+        f_transform = format["transform"]
+        f_detJ = format["det(J)"]
+        f_inv = format["inverse"]
 
         # Reset code
         code = []
@@ -624,7 +662,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
                 if not any(deriv):
                     deriv = []
 
-                if 'piola' in transformation:
+                if transformation in ["covariant piola", "contravariant piola"]:
                     # Vectors
                     for c in range(tdim):
                         function_name = self._create_function_name(c + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element)
@@ -645,10 +683,10 @@ class QuadratureTransformer(QuadratureTransformerBase):
 
                             # Add transformation if needed.
                             code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim))
-                elif transformation == "pullback as metric":
+                elif transformation == "double covariant piola":
                     # g_ij = (Jinv)_ki G_kl (Jinv)lj
                     i = local_comp // tdim
-                    j = local_comp %  tdim
+                    j = local_comp % tdim
                     for k in range(tdim):
                         for l in range(tdim):
                             # Create mapping and basis name.
@@ -660,6 +698,31 @@ class QuadratureTransformer(QuadratureTransformerBase):
                             function_name = f_mult([J1, function_name, J2])
                             # Add transformation if needed.
                             code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim))
+                elif transformation == "double contravariant piola":
+                    # g_ij = (detJ)^(-2) J_ik G_kl J_jl
+                    i = local_comp // tdim
+                    j = local_comp % tdim
+                    for k in range(tdim):
+                        for l in range(tdim):
+                            # Create mapping and basis name.
+                            function_name = self._create_function_name(
+                                k * tdim + l + local_offset,
+                                deriv, avg, is_quad_element,
+                                ufl_function, ffc_element)
+                            J1 = f_transform("J", i, k, tdim, gdim,
+                                             self.restriction)
+                            J2 = f_transform("J", j, l, tdim, gdim,
+                                             self.restriction)
+                            invdetJ = f_inv(f_detJ(self.restriction))
+                            self.trans_set.add(J1)
+                            self.trans_set.add(J2)
+                            function_name = f_mult([invdetJ, invdetJ, J1,
+                                                    function_name, J2])
+                            # Add transformation if needed.
+                            code.append(self.__apply_transform(function_name,
+                                                               derivatives,
+                                                               multi, tdim,
+                                                               gdim))
                 else:
                     error("Transformation is not supported: " + repr(transformation))
 
@@ -675,10 +738,9 @@ class QuadratureTransformer(QuadratureTransformerBase):
     # -------------------------------------------------------------------------
     # Helper functions for Argument and Coefficient
     # -------------------------------------------------------------------------
-    def __apply_transform(self, function, derivatives, multi, tdim, gdim): # XXX UFLACS REUSE
+    def __apply_transform(self, function, derivatives, multi, tdim, gdim):  # XXX UFLACS REUSE
         "Apply transformation (from derivatives) to basis or function."
-        f_mult          = format["multiply"]
-        f_transform     = format["transform"]
+        f_transform = format["transform"]
 
         # Add transformation if needed.
         transforms = []
@@ -709,21 +771,21 @@ class QuadratureTransformer(QuadratureTransformerBase):
     # Helper functions for transformation of UFL objects in base class
     # -------------------------------------------------------------------------
     def _create_symbol(self, symbol, domain):
-        return {():symbol}
+        return {(): symbol}
 
     def _create_product(self, symbols):
         return format["multiply"](symbols)
 
     def _format_scalar_value(self, value):
-        #print("format_scalar_value: %d" % value)
+        # print("format_scalar_value: %d" % value)
         if value is None:
-            return {():None}
+            return {(): None}
         # TODO: Handle value < 0 better such that we don't have + -2 in the code.
-        return {():format["floating point"](value)}
+        return {(): format["floating point"](value)}
 
     def _math_function(self, operands, format_function):
         # TODO: Are these safety checks needed?
-        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \
+        ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1,
                    "MathFunctions expect one operand of function type: " + repr(operands))
         # Use format function on value of operand.
         new_operand = {}
@@ -740,17 +802,17 @@ class QuadratureTransformer(QuadratureTransformerBase):
             x1 = format["floating point"](0.0)
         if x2 is None:
             x2 = format["floating point"](0.0)
-        return {():format_function(x1, x2)}
+        return {(): format_function(x1, x2)}
 
     def _bessel_function(self, operands, format_function):
         # TODO: Are these safety checks needed?
-        ffc_assert(len(operands) == 2,\
-          "BesselFunctions expect two operands of function type: " + repr(operands))
+        ffc_assert(len(operands) == 2,
+                   "BesselFunctions expect two operands of function type: " + repr(operands))
         nu, x = operands
-        ffc_assert(len(nu) == 1 and () in nu,\
-          "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu))
-        ffc_assert(len(x) == 1 and () in x,\
-          "Expecting one operand of function type as second argument to BesselFunction : " + repr(x))
+        ffc_assert(len(nu) == 1 and () in nu,
+                   "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu))
+        ffc_assert(len(x) == 1 and () in x,
+                   "Expecting one operand of function type as second argument to BesselFunction : " + repr(x))
         nu = nu[()]
         x = x[()]
         if nu is None:
@@ -762,7 +824,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
         # NOTE: Order of nu and x is reversed compared to the UFL and C++
         # function calls because of how Symbol treats exponents.
         # this will change once quadrature optimisations has been cleaned up.
-        return {():format_function(x, nu)}
+        return {(): format_function(x, nu)}
 
     # -------------------------------------------------------------------------
     # Helper functions for code_generation()
diff --git a/ffc/quadrature/quadraturetransformerbase.py b/ffc/quadrature/quadraturetransformerbase.py
index fe2903b..1b3a875 100644
--- a/ffc/quadrature/quadraturetransformerbase.py
+++ b/ffc/quadrature/quadraturetransformerbase.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """QuadratureTransformerBase, a common class for quadrature
 transformers to translate UFL expressions."""
 
@@ -18,7 +19,7 @@ transformers to translate UFL expressions."""
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes, 2013
+# Modified by Martin Sandve Alnæs, 2013
 # Modified by Garth N. Wells, 2013
 # Modified by Lizao Li, 2015
 # Modified by Anders Logg, 2015
@@ -37,7 +38,7 @@ from ufl import custom_integral_types
 from ufl.algorithms import Transformer
 
 # FFC modules.
-from ffc.log import ffc_assert, error, info
+from ffc.log import ffc_assert, error
 from ffc.fiatinterface import create_element, map_facet_points
 from ffc.mixedelement import MixedElement
 from ffc.cpp import format
@@ -48,9 +49,11 @@ from ffc.representationutils import transform_component
 
 # Utility and optimisation functions for quadraturegenerator.
 from ffc.quadrature.quadratureutils import create_psi_tables
-from ffc.quadrature.symbolics import BASIS, IP, GEO, CONST
+from ffc.quadrature.symbolics import BASIS, IP, GEO
+
 
 class QuadratureTransformerBase(Transformer):
+
     "Transform UFL representation to quadrature code."
 
     def __init__(self,
@@ -67,10 +70,10 @@ class QuadratureTransformerBase(Transformer):
         # Save optimise_parameters, weights and fiat_elements_map.
         self.optimise_parameters = optimise_parameters
 
-        # Map from original functions with possibly incomplete elements
-        # to functions with properly completed elements
+        # Map from original functions with possibly incomplete
+        # elements to functions with properly completed elements
         self._function_replace_map = function_replace_map
-        self._function_replace_values = set(function_replace_map.values()) # For assertions
+        self._function_replace_values = set(function_replace_map.values())  # For assertions
 
         # Create containers and variables.
         self.used_psi_tables = set()
@@ -93,7 +96,6 @@ class QuadratureTransformerBase(Transformer):
         self.coordinate = None
         self.conditionals = {}
         self.additional_includes_set = set()
-        self.__psi_tables = psi_tables # TODO: Unused? Remove?
 
         # Stacks.
         self._derivatives = []
@@ -101,14 +103,17 @@ class QuadratureTransformerBase(Transformer):
         self._components = Stack()
 
         self.element_map, self.name_map, self.unique_tables =\
-            create_psi_tables(psi_tables, self.optimise_parameters["eliminate zeros"], self.entity_type)
+            create_psi_tables(psi_tables,
+                              self.optimise_parameters["eliminate zeros"],
+                              self.entity_type)
 
         # Cache.
         self.argument_cache = {}
         self.function_cache = {}
 
     def update_cell(self):
-        ffc_assert(self.entity_type == "cell", "Not expecting update_cell on a %s." % self.entity_type)
+        ffc_assert(self.entity_type == "cell",
+                   "Not expecting update_cell on a %s." % self.entity_type)
         self.facet0 = None
         self.facet1 = None
         self.vertex = None
@@ -116,7 +121,8 @@ class QuadratureTransformerBase(Transformer):
         self.conditionals = {}
 
     def update_facets(self, facet0, facet1):
-        ffc_assert(self.entity_type == "facet", "Not expecting update_facet on a %s." % self.entity_type)
+        ffc_assert(self.entity_type == "facet",
+                   "Not expecting update_facet on a %s." % self.entity_type)
         self.facet0 = facet0
         self.facet1 = facet1
         self.vertex = None
@@ -124,7 +130,8 @@ class QuadratureTransformerBase(Transformer):
         self.conditionals = {}
 
     def update_vertex(self, vertex):
-        ffc_assert(self.entity_type == "vertex", "Not expecting update_vertex on a %s." % self.entity_type)
+        ffc_assert(self.entity_type == "vertex",
+                   "Not expecting update_vertex on a %s." % self.entity_type)
         self.facet0 = None
         self.facet1 = None
         self.vertex = vertex
@@ -147,7 +154,8 @@ class QuadratureTransformerBase(Transformer):
         print("\nQuadratureTransformer, element_map:\n", self.element_map)
         print("\nQuadratureTransformer, name_map:\n", self.name_map)
         print("\nQuadratureTransformer, unique_tables:\n", self.unique_tables)
-        print("\nQuadratureTransformer, used_psi_tables:\n", self.used_psi_tables)
+        print("\nQuadratureTransformer, used_psi_tables:\n",
+              self.used_psi_tables)
         print("\nQuadratureTransformer, psi_tables_map:\n", self.psi_tables_map)
         print("\nQuadratureTransformer, used_weights:\n", self.used_weights)
 
@@ -166,20 +174,23 @@ class QuadratureTransformerBase(Transformer):
     # -------------------------------------------------------------------------
     # Start handling UFL classes.
     # -------------------------------------------------------------------------
+
     # Nothing in expr.py is handled. Can only handle children of these clases.
     def expr(self, o):
         print("\n\nVisiting basic Expr:", repr(o), "with operands:")
         error("This expression is not handled: " + repr(o))
 
-    # Nothing in terminal.py is handled. Can only handle children of these clases.
+    # Nothing in terminal.py is handled. Can only handle children of
+    # these clases.
     def terminal(self, o):
         print("\n\nVisiting basic Terminal:", repr(o), "with operands:")
         error("This terminal is not handled: " + repr(o))
 
     # -------------------------------------------------------------------------
     # Things which should not be here (after expansion etc.) from:
-    # algebra.py, differentiation.py, finiteelement.py,
-    # form.py, geometry.py, indexing.py, integral.py, tensoralgebra.py, variable.py.
+    # algebra.py, differentiation.py, finiteelement.py, form.py,
+    # geometry.py, indexing.py, integral.py, tensoralgebra.py,
+    # variable.py.
     # -------------------------------------------------------------------------
     def derivative(self, o, *operands):
         print("\n\nVisiting Derivative: ", repr(o))
@@ -229,6 +240,7 @@ class QuadratureTransformerBase(Transformer):
     # -------------------------------------------------------------------------
     # Handlers that should be implemented by child classes.
     # -------------------------------------------------------------------------
+
     # -------------------------------------------------------------------------
     # AlgebraOperators (algebra.py).
     # -------------------------------------------------------------------------
@@ -327,11 +339,12 @@ class QuadratureTransformerBase(Transformer):
     # -------------------------------------------------------------------------
     # Things that can be handled by the base class.
     # -------------------------------------------------------------------------
+
     # -------------------------------------------------------------------------
     # Argument (basisfunction.py).
     # -------------------------------------------------------------------------
     def argument(self, o):
-        #print("\nVisiting Argument:" + repr(o))
+        # print("\nVisiting Argument:" + repr(o))
 
         # Create aux. info.
         components = self.component()
@@ -343,13 +356,17 @@ class QuadratureTransformerBase(Transformer):
 
         tdim = self.tdim
 
-        # FIXME: Why does using a code dict from cache make the expression manipulations blow (MemoryError) up later?
+        # FIXME: Why does using a code dict from cache make the
+        # expression manipulations blow (MemoryError) up later?
         if basis is None or self.optimise_parameters["optimisation"]:
             # Get auxiliary variables to generate basis
-            (component, local_elem, local_comp, local_offset,
-             ffc_element, transformation, multiindices) = self._get_auxiliary_variables(o, components, derivatives)
+            (component, local_elem, local_comp, local_offset, ffc_element,
+             transformation,
+             multiindices) = self._get_auxiliary_variables(o, components,
+                                                           derivatives)
 
-            # Create mapping and code for basis function and add to dict.
+            # Create mapping and code for basis function and add to
+            # dict.
             basis = self.create_argument(o, derivatives, component, local_comp,
                                          local_offset, ffc_element,
                                          transformation, multiindices,
@@ -362,7 +379,6 @@ class QuadratureTransformerBase(Transformer):
     # Constant values (constantvalue.py).
     # -------------------------------------------------------------------------
     def identity(self, o):
-        #print "\n\nVisiting Identity: ", repr(o)
 
         # Get components
         i, j = self.component()
@@ -375,18 +391,15 @@ class QuadratureTransformerBase(Transformer):
 
     def scalar_value(self, o):
         "ScalarValue covers IntValue and FloatValue"
-        #print "\n\nVisiting ScalarValue: ", repr(o)
         return self._format_scalar_value(o.value())
 
     def zero(self, o):
-        #print "\n\nVisiting Zero:", repr(o)
         return self._format_scalar_value(None)
 
     # -------------------------------------------------------------------------
     # Grad (differentiation.py).
     # -------------------------------------------------------------------------
     def grad(self, o):
-        #print("\n\nVisiting Grad: " + repr(o))
 
         # Get expression
         derivative_expr, = o.ufl_operands
@@ -396,14 +409,16 @@ class QuadratureTransformerBase(Transformer):
 
         en = len(derivative_expr.ufl_shape)
         cn = len(components)
-        ffc_assert(len(o.ufl_shape) == cn, "Expecting rank of grad expression to match components length.")
+        ffc_assert(len(o.ufl_shape) == cn,
+                   "Expecting rank of grad expression to match components length.")
 
         # Get direction of derivative
-        if cn == en+1:
+        if cn == en + 1:
             der = components[en]
             self._components.push(components[:en])
         elif cn == en:
-            # This happens in 1D, sligtly messy result of defining grad(f) == f.dx(0)
+            # This happens in 1D, sligtly messy result of defining
+            # grad(f) == f.dx(0)
             der = 0
         else:
             error("Unexpected rank %d and component length %d in grad expression." % (en, cn))
@@ -416,7 +431,7 @@ class QuadratureTransformerBase(Transformer):
 
         # Remove the direction from list of derivatives
         self._derivatives.pop()
-        if cn == en+1:
+        if cn == en + 1:
             self._components.pop()
         return code
 
@@ -424,7 +439,7 @@ class QuadratureTransformerBase(Transformer):
     # Coefficient and Constants (function.py).
     # -------------------------------------------------------------------------
     def coefficient(self, o):
-        #print("\nVisiting Coefficient: " + repr(o))
+        # print("\nVisiting Coefficient: " + repr(o))
 
         # Map o to object with proper element and count
         o = self._function_replace_map[o]
@@ -437,23 +452,32 @@ class QuadratureTransformerBase(Transformer):
         key = (o, components, derivatives, self.restriction, self.avg)
         function_code = self.function_cache.get(key)
 
-        # FIXME: Why does using a code dict from cache make the expression manipulations blow (MemoryError) up later?
+        # FIXME: Why does using a code dict from cache make the
+        # expression manipulations blow (MemoryError) up later?
         if function_code is None or self.optimise_parameters["optimisation"]:
             # Get auxiliary variables to generate function
             (component, local_elem, local_comp, local_offset,
-             ffc_element, transformation, multiindices) = self._get_auxiliary_variables(o, components, derivatives)
+             ffc_element, transformation,
+             multiindices) = self._get_auxiliary_variables(o, components,
+                                                           derivatives)
 
-            # Check that we don't take derivatives of QuadratureElements.
+            # Check that we don't take derivatives of
+            # QuadratureElements.
             is_quad_element = local_elem.family() == "Quadrature"
-            ffc_assert(not (derivatives and is_quad_element), \
+            ffc_assert(not (derivatives and is_quad_element),
                        "Derivatives of Quadrature elements are not supported: " + repr(o))
 
             tdim = self.tdim
 
-            # Create code for function and add empty tuple to cache dict.
+            # Create code for function and add empty tuple to cache
+            # dict.
             function_code = {(): self.create_function(o, derivatives, component,
-                                                      local_comp, local_offset, ffc_element, is_quad_element,
-                                                      transformation, multiindices, tdim, self.gdim, self.avg)}
+                                                      local_comp, local_offset,
+                                                      ffc_element,
+                                                      is_quad_element,
+                                                      transformation,
+                                                      multiindices, tdim,
+                                                      self.gdim, self.avg)}
 
             self.function_cache[key] = function_code
 
@@ -463,15 +487,13 @@ class QuadratureTransformerBase(Transformer):
     # SpatialCoordinate (geometry.py).
     # -------------------------------------------------------------------------
     def spatial_coordinate(self, o):
-        #print "\n\nVisiting SpatialCoordinate:", repr(o)
-        #print "\n\nVisiting SpatialCoordinate:", repr(operands)
 
         # Get the component.
         components = self.component()
         c, = components
 
         if self.vertex is not None:
-            error("Spatial coordinates (x) not implemented for point measure (dP)") # TODO: Implement this, should be just the point.
+            error("Spatial coordinates (x) not implemented for point measure (dP)")  # TODO: Implement this, should be just the point.
         else:
             # Generate the appropriate coordinate and update tables.
             coordinate = format["ip coordinates"](self.points, c)
@@ -482,7 +504,6 @@ class QuadratureTransformerBase(Transformer):
     # Indexed (indexed.py).
     # -------------------------------------------------------------------------
     def indexed(self, o):
-        #print("\n\nVisiting Indexed:" + repr(o))
 
         # Get indexed expression and index, map index to current value
         # and update components
@@ -501,7 +522,6 @@ class QuadratureTransformerBase(Transformer):
     # MultiIndex (indexing.py).
     # -------------------------------------------------------------------------
     def multi_index(self, o):
-        #print("\n\nVisiting MultiIndex:" + repr(o))
 
         # Loop all indices in MultiIndex and get current values
         subcomp = []
@@ -517,7 +537,6 @@ class QuadratureTransformerBase(Transformer):
     # IndexSum (indexsum.py).
     # -------------------------------------------------------------------------
     def index_sum(self, o):
-        #print("\n\nVisiting IndexSum: " + str(tree_format(o)))
 
         # Get expression and index that we're summing over
         summand, multiindex = o.ufl_operands
@@ -539,96 +558,74 @@ class QuadratureTransformerBase(Transformer):
     # MathFunctions (mathfunctions.py).
     # -------------------------------------------------------------------------
     def sqrt(self, o, *operands):
-        #print("\n\nVisiting Sqrt: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["sqrt"])
 
     def exp(self, o, *operands):
-        #print("\n\nVisiting Exp: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["exp"])
 
     def ln(self, o, *operands):
-        #print("\n\nVisiting Ln: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["ln"])
 
     def cos(self, o, *operands):
-        #print("\n\nVisiting Cos: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["cos"])
 
     def sin(self, o, *operands):
-        #print("\n\nVisiting Sin: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["sin"])
 
     def tan(self, o, *operands):
-        #print("\n\nVisiting Tan: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["tan"])
 
     def cosh(self, o, *operands):
-        #print("\n\nVisiting Cosh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["cosh"])
 
     def sinh(self, o, *operands):
-        #print("\n\nVisiting Sinh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["sinh"])
 
     def tanh(self, o, *operands):
-        #print("\n\nVisiting Tanh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["tanh"])
 
     def acos(self, o, *operands):
-        #print("\n\nVisiting Acos: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["acos"])
 
     def asin(self, o, *operands):
-        #print("\n\nVisiting Asin: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["asin"])
 
     def atan(self, o, *operands):
-        #print("\n\nVisiting Atan: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["atan"])
 
     def atan_2(self, o, *operands):
-        #print("\n\nVisiting Atan2: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._atan_2_function(operands, format["atan_2"])
 
     def erf(self, o, *operands):
-        #print("\n\nVisiting Erf: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
         self.additional_includes_set.add("#include <cmath>")
         return self._math_function(operands, format["erf"])
 
     def bessel_i(self, o, *operands):
-        #print("\n\nVisiting Bessel_I: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
-        #self.additional_includes_set.add("#include <tr1/cmath>")
         self.additional_includes_set.add("#include <boost/math/special_functions.hpp>")
         return self._bessel_function(operands, format["bessel_i"])
 
     def bessel_j(self, o, *operands):
-        #print("\n\nVisiting Bessel_J: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
-        #self.additional_includes_set.add("#include <tr1/cmath>")
         self.additional_includes_set.add("#include <boost/math/special_functions.hpp>")
         return self._bessel_function(operands, format["bessel_j"])
 
     def bessel_k(self, o, *operands):
-        #print("\n\nVisiting Bessel_K: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
-        #self.additional_includes_set.add("#include <tr1/cmath>")
         self.additional_includes_set.add("#include <boost/math/special_functions.hpp>")
         return self._bessel_function(operands, format["bessel_k"])
 
     def bessel_y(self, o, *operands):
-        #print("\n\nVisiting Bessel_Y: " + repr(o) + "with operands: " + "\n".join(map(repr,operands)))
-        #self.additional_includes_set.add("#include <tr1/cmath>")
         self.additional_includes_set.add("#include <boost/math/special_functions.hpp>")
         return self._bessel_function(operands, format["bessel_y"])
 
@@ -636,12 +633,13 @@ class QuadratureTransformerBase(Transformer):
     # PositiveRestricted and NegativeRestricted (restriction.py).
     # -------------------------------------------------------------------------
     def positive_restricted(self, o):
-        #print("\n\nVisiting PositiveRestricted: " + repr(o))
 
         # Just get the first operand, there should only be one.
         restricted_expr = o.ufl_operands
-        ffc_assert(len(restricted_expr) == 1, "Only expected one operand for restriction: " + repr(restricted_expr))
-        ffc_assert(self.restriction is None, "Expression is restricted twice: " + repr(restricted_expr))
+        ffc_assert(len(restricted_expr) == 1,
+                   "Only expected one operand for restriction: " + repr(restricted_expr))
+        ffc_assert(self.restriction is None,
+                   "Expression is restricted twice: " + repr(restricted_expr))
 
         # Set restriction, visit operand and reset restriction
         self.restriction = "+"
@@ -651,12 +649,13 @@ class QuadratureTransformerBase(Transformer):
         return code
 
     def negative_restricted(self, o):
-        #print("\n\nVisiting NegativeRestricted: " + repr(o))
 
         # Just get the first operand, there should only be one.
         restricted_expr = o.ufl_operands
-        ffc_assert(len(restricted_expr) == 1, "Only expected one operand for restriction: " + repr(restricted_expr))
-        ffc_assert(self.restriction is None, "Expression is restricted twice: " + repr(restricted_expr))
+        ffc_assert(len(restricted_expr) == 1,
+                   "Only expected one operand for restriction: " + repr(restricted_expr))
+        ffc_assert(self.restriction is None,
+                   "Expression is restricted twice: " + repr(restricted_expr))
 
         # Set restriction, visit operand and reset restriction
         self.restriction = "-"
@@ -680,7 +679,8 @@ class QuadratureTransformerBase(Transformer):
 
     def facet_avg(self, o):
         ffc_assert(self.avg is None, "Not expecting nested averages.")
-        ffc_assert(self.entity_type != "cell", "Cannot take facet_avg in a cell integral.")
+        ffc_assert(self.entity_type != "cell",
+                   "Cannot take facet_avg in a cell integral.")
 
         # Just get the first operand, there should only be one.
         expr, = o.ufl_operands
@@ -696,7 +696,6 @@ class QuadratureTransformerBase(Transformer):
     # ComponentTensor (tensors.py).
     # -------------------------------------------------------------------------
     def component_tensor(self, o):
-        #print("\n\nVisiting ComponentTensor:\n" + str(tree_format(o)))
 
         # Get expression and indices
         component_expr, indices = o.ufl_operands
@@ -704,11 +703,11 @@ class QuadratureTransformerBase(Transformer):
         # Get current component(s)
         components = self.component()
 
-        ffc_assert(len(components) == len(indices), \
+        ffc_assert(len(components) == len(indices),
                    "The number of known components must be equal to the number of components of the ComponentTensor for this to work.")
 
-        # Update the index dict (map index values of current known indices to
-        # those of the component tensor)
+        # Update the index dict (map index values of current known
+        # indices to those of the component tensor)
         for i, v in zip(indices._indices, components):
             self._index2value.push(i, v)
 
@@ -728,7 +727,6 @@ class QuadratureTransformerBase(Transformer):
         return code
 
     def list_tensor(self, o):
-        #print("\n\nVisiting ListTensor: " + repr(o))
 
         # Get the component
         component = self.component()
@@ -750,8 +748,6 @@ class QuadratureTransformerBase(Transformer):
     # Variable (variable.py).
     # -------------------------------------------------------------------------
     def variable(self, o):
-        #print("\n\nVisiting Variable: " + repr(o))
-        # Just get the expression associated with the variable
         return self.visit(o.expression())
 
     # -------------------------------------------------------------------------
@@ -769,8 +765,8 @@ class QuadratureTransformerBase(Transformer):
         # Get formatting
         f_nzc = format["nonzero columns"](0).split("0")[0]
 
-        # Loop code and add weight and scale factor to value and sort after
-        # loop ranges.
+        # Loop code and add weight and scale factor to value and sort
+        # after loop ranges.
         new_terms = {}
         for key, val in sorted(terms.items()):
             # If value was zero continue.
@@ -782,9 +778,10 @@ class QuadratureTransformerBase(Transformer):
             used_nzcs = set([int(k[1].split(f_nzc)[1].split("[")[0]) for k in key if f_nzc in k[1]])
             sets.append(used_nzcs)
 
-            # Create loop information and entry from key info and insert into dict.
+            # Create loop information and entry from key info and
+            # insert into dict.
             loop, entry = self._create_loop_entry(key, f_nzc)
-            if not loop in new_terms:
+            if loop not in new_terms:
                 sets.append({})
                 new_terms[loop] = [sets, [(entry, value, ops)]]
             else:
@@ -796,7 +793,8 @@ class QuadratureTransformerBase(Transformer):
 
     def _create_loop_entry(self, key, f_nzc):
 
-        indices = {0: format["first free index"],  1: format["second free index"]}
+        indices = {0: format["first free index"],
+                   1: format["second free index"]}
 
         # Create appropriate entries.
         # FIXME: We only support rank 0, 1 and 2.
@@ -807,19 +805,21 @@ class QuadratureTransformerBase(Transformer):
         elif len(key) == 1:
             key = key[0]
             # Checking if the basis was a test function.
-            # TODO: Make sure test function indices are always rearranged to 0.
-            ffc_assert(key[0] == -2 or key[0] == 0, \
-                        "Linear forms must be defined using test functions only: " + repr(key))
+            # TODO: Make sure test function indices are always
+            # rearranged to 0.
+            ffc_assert(key[0] == -2 or key[0] == 0,
+                       "Linear forms must be defined using test functions only: " + repr(key))
             index_j, entry, range_j, space_dim_j = key
             loop = ((indices[index_j], 0, range_j),)
             if range_j == 1 and self.optimise_parameters["ignore ones"] and not (f_nzc in entry):
                 loop = ()
         elif len(key) == 2:
-            # Extract test and trial loops in correct order and check if for is legal.
+            # Extract test and trial loops in correct order and check
+            # if for is legal.
             key0, key1 = (0, 0)
             for k in key:
-                ffc_assert(k[0] in indices, \
-                "Bilinear forms must be defined using test and trial functions (index -2, -1, 0, 1): " + repr(k))
+                ffc_assert(k[0] in indices,
+                           "Bilinear forms must be defined using test and trial functions (index -2, -1, 0, 1): " + repr(k))
                 if k[0] == -2 or k[0] == 0:
                     key0 = k
                 else:
@@ -828,7 +828,8 @@ class QuadratureTransformerBase(Transformer):
             index_k, entry_k, range_k, space_dim_k = key1
 
             loop = []
-            if not (range_j == 1 and self.optimise_parameters["ignore ones"]) or f_nzc in entry_j:
+            if not (range_j == 1 and
+                    self.optimise_parameters["ignore ones"]) or f_nzc in entry_j:
                 loop.append((indices[index_j], 0, range_j))
             if not (range_k == 1 and self.optimise_parameters["ignore ones"]) or f_nzc in entry_k:
                 loop.append((indices[index_k], 0, range_k))
@@ -862,31 +863,34 @@ class QuadratureTransformerBase(Transformer):
     def _atan_2_function(self, operands, format_function):
         error("This function should be implemented by the child class.")
 
-    def _get_auxiliary_variables(self,
-                                 ufl_function,
-                                 component,
-                                 derivatives):
+    def _get_auxiliary_variables(self, ufl_function, component, derivatives):
         "Helper function for both Coefficient and Argument."
 
         # Get UFL element.
         ufl_element = ufl_function.ufl_element()
 
-        # Get subelement and the relative (flattened) component (in case we have mixed elements).
+        # Get subelement and the relative (flattened) component (in
+        # case we have mixed elements).
         local_comp, local_elem = ufl_element.extract_component(component)
 
         # For basic tensor elements, local_comp should be flattened
         if len(local_comp) and len(local_elem.value_shape()) > 0:
-            # Map component using component map from UFL. (TODO: inefficient use of this function)
-            comp_map, _ = build_component_numbering(local_elem.value_shape(), local_elem.symmetry())
+            # Map component using component map from UFL. (TODO:
+            # inefficient use of this function)
+            comp_map, _ = build_component_numbering(local_elem.value_shape(),
+                                                    local_elem.symmetry())
             local_comp = comp_map[local_comp]
 
         # Set local_comp to 0 if it is ()
-        if not local_comp: local_comp = 0
+        if not local_comp:
+            local_comp = 0
 
-        # Check that component != not () since the UFL component map will turn
-        # it into 0, and () does not mean zeroth component in this context.
+        # Check that component != not () since the UFL component map
+        # will turn it into 0, and () does not mean zeroth component
+        # in this context.
         if len(component):
-            # Map component using component map from UFL. (TODO: inefficient use of this function)
+            # Map component using component map from UFL. (TODO:
+            # inefficient use of this function)
             comp_map, comp_num = build_component_numbering(ufl_element.value_shape(), ufl_element.symmetry())
             component = comp_map[component]
 
@@ -910,23 +914,27 @@ class QuadratureTransformerBase(Transformer):
 
         # Generate FFC multi index for derivatives.
         tdim = self.tdim
-        multiindices = FFCMultiIndex([list(range(tdim))]*len(derivatives)).indices
+        multiindices = FFCMultiIndex([list(range(tdim))] * len(derivatives)).indices
 
-        return (component, local_elem, local_comp, local_offset, ffc_element, transformation, multiindices)
+        return (component, local_elem, local_comp, local_offset, ffc_element,
+                transformation, multiindices)
 
     def _get_current_entity(self):
         if self.entity_type == "cell":
-            # If we add macro cell integration, I guess the 'current cell number' would go here?
+            # If we add macro cell integration, I guess the 'current
+            # cell number' would go here?
             return 0
         elif self.entity_type == "facet":
             # Handle restriction through facet.
-            return {"+": self.facet0, "-": self.facet1, None: self.facet0}[self.restriction]
+            return {"+": self.facet0, "-": self.facet1,
+                    None: self.facet0}[self.restriction]
         elif self.entity_type == "vertex":
             return self.vertex
         else:
             error("Unknown entity type %s." % self.entity_type)
 
-    def _create_mapping_basis(self, component, deriv, avg, ufl_argument, ffc_element):
+    def _create_mapping_basis(self, component, deriv, avg, ufl_argument,
+                              ffc_element):
         "Create basis name and mapping from given basis_info."
 
         # Get string for integration points.
@@ -947,8 +955,9 @@ class QuadratureTransformerBase(Transformer):
         element_counter = self.element_map[1 if avg else self.points][ufl_argument.ufl_element()]
         loop_index = indices[ufl_argument.number()]
 
-        # Offset element space dimension in case of negative restriction,
-        # need to use the complete element for offset in case of mixed element.
+        # Offset element space dimension in case of negative
+        # restriction, need to use the complete element for offset in
+        # case of mixed element.
         space_dim = ffc_element.space_dimension()
         offset = {"+": "", "-": str(space_dim), None: ""}[self.restriction]
 
@@ -956,10 +965,12 @@ class QuadratureTransformerBase(Transformer):
         if self.restriction in ("+", "-"):
             space_dim *= 2
 
-        # Create basis access, we never need to map the entry in the basis table
-        # since we will either loop the entire space dimension or the non-zeros.
+        # Create basis access, we never need to map the entry in the
+        # basis table since we will either loop the entire space
+        # dimension or the non-zeros.
         if self.restriction in ("+", "-") and self.integral_type in custom_integral_types and offset != "":
-            # Special case access for custom integrals (all basis functions stored in flattened array)
+            # Special case access for custom integrals (all basis
+            # functions stored in flattened array)
             basis_access = format["component"]("", [f_ip, format["add"]([loop_index, offset])])
         else:
             # Normal basis function access
@@ -967,21 +978,23 @@ class QuadratureTransformerBase(Transformer):
 
         # Get current cell entity, with current restriction considered
         entity = self._get_current_entity()
-        name = generate_psi_name(element_counter, self.entity_type, entity, component, deriv, avg)
+        name = generate_psi_name(element_counter, self.entity_type, entity,
+                                 component, deriv, avg)
         name, non_zeros, zeros, ones = self.name_map[name]
         loop_index_range = shape(self.unique_tables[name])[1]
 
         # If domain type is custom, then special-case set loop index
         # range since table is empty
         if self.integral_type in custom_integral_types:
-            loop_index_range = ffc_element.space_dimension() # different from `space_dimension`...
+            loop_index_range = ffc_element.space_dimension()  # different from `space_dimension`...
 
         basis = ""
         # Ignore zeros if applicable
         if zeros and (self.optimise_parameters["ignore zero tables"] or self.optimise_parameters["remove zero terms"]):
             basis = self._format_scalar_value(None)[()]
-        # If the loop index range is one we can look up the first component
-        # in the psi array. If we only have ones we don't need the basis.
+        # If the loop index range is one we can look up the first
+        # component in the psi array. If we only have ones we don't
+        # need the basis.
         elif self.optimise_parameters["ignore ones"] and loop_index_range == 1 and ones:
             loop_index = "0"
             basis = self._format_scalar_value(1.0)[()]
@@ -990,7 +1003,8 @@ class QuadratureTransformerBase(Transformer):
             basis = self._create_symbol(name + basis_access, BASIS)[()]
             self.psi_tables_map[basis] = name
 
-        # Create the correct mapping of the basis function into the local element tensor.
+        # Create the correct mapping of the basis function into the
+        # local element tensor.
         basis_map = loop_index
         if non_zeros and basis_map == "0":
             basis_map = str(non_zeros[1][0])
@@ -1009,11 +1023,13 @@ class QuadratureTransformerBase(Transformer):
         # Example dx and ds: (0, j, 3, 3)
         # Example dS: (0, (j + 3), 3, 6), 6=2*space_dim
         # Example dS optimised: (0, (nz2[j] + 3), 2, 6), 6=2*space_dim
-        mapping = ((ufl_argument.number(), basis_map, loop_index_range, space_dim),)
+        mapping = ((ufl_argument.number(), basis_map, loop_index_range,
+                    space_dim),)
 
         return (mapping, basis)
 
-    def _create_function_name(self, component, deriv, avg, is_quad_element, ufl_function, ffc_element):
+    def _create_function_name(self, component, deriv, avg, is_quad_element,
+                              ufl_function, ffc_element):
         ffc_assert(ufl_function in self._function_replace_values,
                    "Expecting ufl_function to have been mapped prior to this call.")
 
@@ -1031,7 +1047,8 @@ class QuadratureTransformerBase(Transformer):
 
         # Create basis name and map to correct basis and get info.
         generate_psi_name = format["psi name"]
-        psi_name = generate_psi_name(element_counter, self.entity_type, entity, component, deriv, avg)
+        psi_name = generate_psi_name(element_counter, self.entity_type, entity,
+                                     component, deriv, avg)
         psi_name, non_zeros, zeros, ones = self.name_map[psi_name]
 
         # If all basis are zero we just return None.
@@ -1048,8 +1065,8 @@ class QuadratureTransformerBase(Transformer):
 
         # Create loop index
         if loop_index_range > 1:
-            # Pick first free index of secondary type
-            # (could use primary indices, but it's better to avoid confusion).
+            # Pick first free index of secondary type (could use
+            # primary indices, but it's better to avoid confusion).
             loop_index = format["free indices"][0]
 
         # If we have a quadrature element we can use the ip number to look
@@ -1057,7 +1074,8 @@ class QuadratureTransformerBase(Transformer):
         if is_quad_element:
             quad_offset = 0
             if component:
-                # FIXME: Should we add a member function elements() to FiniteElement?
+                # FIXME: Should we add a member function elements() to
+                # FiniteElement?
                 if isinstance(ffc_element, MixedElement):
                     for i in range(component):
                         quad_offset += ffc_element.elements()[i].space_dimension()
@@ -1069,32 +1087,37 @@ class QuadratureTransformerBase(Transformer):
                 coefficient_access = format["add"]([f_ip, str(quad_offset)])
             else:
                 if non_zeros and f_ip == "0":
-                    # If we have non zero column mapping but only one value just pick it.
-                    # MSA: This should be an exact refactoring of the previous logic,
-                    #      but I'm not sure if these lines were originally intended
-                    #      here in the quad_element section, or what this even does:
+                    # If we have non zero column mapping but only one
+                    # value just pick it.
+                    # MSA: This should be an exact refactoring of the
+                    #      previous logic, but I'm not sure if these
+                    #      lines were originally intended here in the
+                    #      quad_element section, or what this even
+                    #      does:
                     coefficient_access = str(non_zeros[1][0])
                 else:
                     coefficient_access = f_ip
 
         elif non_zeros:
             if loop_index_range == 1:
-                # If we have non zero column mapping but only one value just pick it.
+                # If we have non zero column mapping but only one
+                # value just pick it.
                 coefficient_access = str(non_zeros[1][0])
             else:
                 used_nzcs.add(non_zeros[0])
                 coefficient_access = format["component"](format["nonzero columns"](non_zeros[0]), loop_index)
 
         elif loop_index_range == 1:
-            # If the loop index range is one we can look up the first component
-            # in the coefficient array.
+            # If the loop index range is one we can look up the first
+            # component in the coefficient array.
             coefficient_access = "0"
 
         else:
             # Or just set default coefficient access.
             coefficient_access = loop_index
 
-        # Offset by element space dimension in case of negative restriction.
+        # Offset by element space dimension in case of negative
+        # restriction.
         offset = {"+": "", "-": str(ffc_element.space_dimension()), None: ""}[self.restriction]
         if offset:
             coefficient_access = format["add"]([coefficient_access, offset])
@@ -1106,25 +1129,30 @@ class QuadratureTransformerBase(Transformer):
         except:
             C_ACCESS = IP
         # Format coefficient access
-        coefficient = format["coefficient"](str(ufl_function.count()), coefficient_access)
+        coefficient = format["coefficient"](str(ufl_function.count()),
+                                            coefficient_access)
 
         # Build and cache some function data only if we need the basis
-        # MSA: I don't understand the mix of loop index range check and ones check here, but that's how it was.
+        # MSA: I don't understand the mix of loop index range check
+        # and ones check here, but that's how it was.
         if is_quad_element or (loop_index_range == 1 and ones and self.optimise_parameters["ignore ones"]):
-            # If we only have ones or if we have a quadrature element we don't need the basis.
+            # If we only have ones or if we have a quadrature element
+            # we don't need the basis.
             function_symbol_name = coefficient
             F_ACCESS = C_ACCESS
 
         else:
-            # Add basis name to set of used tables and add matrix access.
-            # TODO: We should first add this table if the function is used later
-            # in the expressions. If some term is multiplied by zero and it falls
-            # away there is no need to compute the function value
+            # Add basis name to set of used tables and add matrix
+            # access.
+            # TODO: We should first add this table if the function is
+            # used later in the expressions. If some term is
+            # multiplied by zero and it falls away there is no need to
+            # compute the function value
             self.used_psi_tables.add(psi_name)
 
-            # Create basis access, we never need to map the entry in the basis
-            # table since we will either loop the entire space dimension or the
-            # non-zeros.
+            # Create basis access, we never need to map the entry in
+            # the basis table since we will either loop the entire
+            # space dimension or the non-zeros.
             basis_index = "0" if loop_index_range == 1 else loop_index
             basis_access = format["component"]("", [f_ip, basis_index])
             basis_name = psi_name + basis_access
@@ -1140,8 +1168,9 @@ class QuadratureTransformerBase(Transformer):
             function_expr = self._create_product([self._create_symbol(basis_name, B_ACCESS)[()],
                                                   self._create_symbol(coefficient, C_ACCESS)[()]])
 
-            # Check if the expression to compute the function value is already in
-            # the dictionary of used function. If not, generate a new name and add.
+            # Check if the expression to compute the function value is
+            # already in the dictionary of used function. If not,
+            # generate a new name and add.
             data = self.function_data.get(function_expr)
             if data is None:
                 function_count = len(self.function_data)
@@ -1151,21 +1180,25 @@ class QuadratureTransformerBase(Transformer):
                 self.function_data[function_expr] = data
             function_symbol_name = format["function value"](data[0])
 
-        # TODO: This access stuff was changed subtly during my refactoring, the
-        # X_ACCESS vars is an attempt at making it right, make sure it is correct now!
+        # TODO: This access stuff was changed subtly during my
+        # refactoring, the
+        # X_ACCESS vars is an attempt at making it right, make sure it
+        # is correct now!
         return self._create_symbol(function_symbol_name, F_ACCESS)[()]
 
     def _generate_affine_map(self):
-        """Generate psi table for affine map, used by spatial coordinate to map
-        integration point to physical element."""
+        """Generate psi table for affine map, used by spatial coordinate to
+        map integration point to physical element.
+
+        """
 
-        # TODO: KBO: Perhaps it is better to create a fiat element and tabulate
-        # the values at the integration points?
+        # TODO: KBO: Perhaps it is better to create a fiat element and
+        # tabulate the values at the integration points?
         f_FEA = format["affine map table"]
-        f_ip  = format["integration points"]
+        f_ip = format["integration points"]
 
-        affine_map = {1: lambda x: [1.0 - x[0],               x[0]],
-                      2: lambda x: [1.0 - x[0] - x[1],        x[0], x[1]],
+        affine_map = {1: lambda x: [1.0 - x[0], x[0]],
+                      2: lambda x: [1.0 - x[0] - x[1], x[0], x[1]],
                       3: lambda x: [1.0 - x[0] - x[1] - x[2], x[0], x[1], x[2]]}
 
         num_ip = self.points
@@ -1175,8 +1208,7 @@ class QuadratureTransformerBase(Transformer):
             points = map_facet_points(points, self.facet0)
             name = f_FEA(num_ip, self.facet0)
         elif self.vertex is not None:
-            error("Spatial coordinates (x) not implemented for point measure (dP)") # TODO: Implement this, should be just the point.
-            #name = f_FEA(num_ip, self.vertex)
+            error("Spatial coordinates (x) not implemented for point measure (dP)")  # TODO: Implement this, should be just the point.
         else:
             name = f_FEA(num_ip, 0)
 
diff --git a/ffc/quadrature/quadratureutils.py b/ffc/quadrature/quadratureutils.py
index b4f3885..2e19526 100644
--- a/ffc/quadrature/quadratureutils.py
+++ b/ffc/quadrature/quadratureutils.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Utility functions for quadrature representation."
 
 # Copyright (C) 2007-2010 Kristian B. Oelgaard
@@ -28,31 +29,34 @@
 import numpy
 
 # FFC modules.
-from ffc.log import debug, error, ffc_assert
+from ffc.log import debug, error
 from ffc.cpp import format
 
+
 def create_psi_tables(tables, eliminate_zeros, entity_type):
     "Create names and maps for tables and non-zero entries if appropriate."
 
     debug("\nQG-utils, psi_tables:\n" + str(tables))
 
-    # Create element map {points:{element:number,},}
-    # and a plain dictionary {name:values,}.
+    # Create element map {points:{element:number,},} and a plain
+    # dictionary {name:values,}.
     element_map, flat_tables = flatten_psi_tables(tables, entity_type)
     debug("\nQG-utils, psi_tables, flat_tables:\n" + str(flat_tables))
 
-    # Reduce tables such that we only have those tables left with unique values
-    # Create a name map for those tables that are redundant.
+    # Reduce tables such that we only have those tables left with
+    # unique values. Create a name map for those tables that are
+    # redundant.
     name_map, unique_tables = unique_psi_tables(flat_tables, eliminate_zeros)
     debug("\nQG-utils, psi_tables, unique_tables:\n" + str(unique_tables))
     debug("\nQG-utils, psi_tables, name_map:\n" + str(name_map))
 
     return (element_map, name_map, unique_tables)
 
+
 def flatten_psi_tables(tables, entity_type):
     """Create a 'flat' dictionary of tables with unique names and a name
-    map that maps number of quadrature points and element name to a unique
-    element number.
+    map that maps number of quadrature points and element name to a
+    unique element number.
 
     Input tables on the format for scalar and non-scalar elements respectively:
       tables[num_points][element][entity][derivs][ip][dof]
@@ -65,6 +69,7 @@ def flatten_psi_tables(tables, entity_type):
     Returns:
       element_map - { num_quad_points: {ufl_element: element_number} }.
       flat_tables - { unique_table_name: values[ip,dof] }.
+
     """
 
     generate_psi_name = format["psi name"]
@@ -82,20 +87,23 @@ def flatten_psi_tables(tables, entity_type):
         element_map[num_points] = {}
 
         # There's a set of tables for each element
-        for element, avg_tables in sorted_items(element_tables, key=lambda x: str(x)):
+        for element, avg_tables in sorted_items(element_tables,
+                                                key=lambda x: str(x)):
             element_map[num_points][element] = counter
 
-            # There's a set of tables for non-averaged and averaged (averaged only occurs with num_points == 1)
+            # There's a set of tables for non-averaged and averaged
+            # (averaged only occurs with num_points == 1)
             for avg, entity_tables in sorted_items(avg_tables):
 
-                # There's a set of tables for each entity number (only 1 for the cell, >1 for facets and vertices)
+                # There's a set of tables for each entity number (only
+                # 1 for the cell, >1 for facets and vertices)
                 for entity, derivs_tables in sorted_items(entity_tables):
 
                     # There's a set of tables for each derivative combination
                     for derivs, fiat_tables in sorted_items(derivs_tables):
                         # Flatten fiat_table for tensor-valued basis
-                        # This is necessary for basic (non-tensor-product)
-                        # tensor elements
+                        # This is necessary for basic
+                        # (non-tensor-product) tensor elements
 
                         if len(numpy.shape(fiat_tables)) > 3:
                             shape = fiat_tables.shape
@@ -104,26 +112,31 @@ def flatten_psi_tables(tables, entity_type):
                                                                numpy.product(value_shape),
                                                                shape[-1]))
 
-                        # Transform fiat_tables to a list of tables on the form psi_table[dof][ip] for each scalar component
+                        # Transform fiat_tables to a list of tables on
+                        # the form psi_table[dof][ip] for each scalar
+                        # component
                         if element.value_shape():
-                            # Table is on the form fiat_tables[ip][component][dof].
-                            transposed_table = numpy.transpose(fiat_tables, (1,2,0))
+                            # Table is on the form
+                            # fiat_tables[ip][component][dof].
+                            transposed_table = numpy.transpose(fiat_tables, (1, 2, 0))
                             component_tables = list(enumerate(transposed_table))
-                            #component_tables = [numpy.transpose(fiat_tables[:,i,:] for i in range(fiat_tables.shape[1]))]
                         else:
-                            # Scalar element, table is on the form fiat_tables[ip][dof].
-                            # Using () for the component because generate_psi_name expects that
-                            component_tables = [((), numpy.transpose(fiat_tables))]
-
-                        # Iterate over the innermost tables for each scalar component
+                            # Scalar element, table is on the form
+                            # fiat_tables[ip][dof].  Using () for the
+                            # component because generate_psi_name
+                            # expects that
+                            component_tables = [((),
+                                                 numpy.transpose(fiat_tables))]
+
+                        # Iterate over the innermost tables for each
+                        # scalar component
                         for component, psi_table in component_tables:
 
                             # Generate the table name.
-                            name = generate_psi_name(counter, entity_type, entity, component, derivs, avg)
+                            name = generate_psi_name(counter, entity_type,
+                                                     entity, component,
+                                                     derivs, avg)
 
-                            # Verify shape of basis (can be omitted for speed if needed).
-                            #if not (num_points is None or (len(numpy.shape(psi_table)) == 2 and numpy.shape(psi_table)[0] == num_points)):
-                            #    error("This table has the wrong shape: " + str(psi_table))
                             # Verify uniqueness of names
                             if name in flat_tables:
                                 error("Table name is not unique, something is wrong:\n  name = %s\n  table = %s\n" % (name, flat_tables))
@@ -136,14 +149,18 @@ def flatten_psi_tables(tables, entity_type):
 
     return (element_map, flat_tables)
 
+
 def unique_psi_tables(tables, eliminate_zeros):
-    """Returns a name map and a dictionary of unique tables. The function checks
-    if values in the tables are equal, if this is the case it creates a name
-    mapping. It also create additional information (depending on which parameters
-    are set) such as if the table contains all ones, or only zeros, and a list
-    on non-zero columns.
-    unique_tables - {name:values,}.
-    name_map      - {original_name:[new_name, non-zero-columns (list), is zero (bool), is ones (bool)],}."""
+    """Returns a name map and a dictionary of unique tables. The function
+    checks if values in the tables are equal, if this is the case it
+    creates a name mapping. It also create additional information
+    (depending on which parameters are set) such as if the table
+    contains all ones, or only zeros, and a list on non-zero columns.
+    unique_tables - {name:values,}.  name_map -
+    {original_name:[new_name, non-zero-columns (list), is zero (bool),
+    is ones (bool)],}.
+
+    """
 
     # Get unique tables (from old table utility).
     name_map, inverse_name_map = unique_tables(tables)
@@ -163,9 +180,8 @@ def unique_psi_tables(tables, eliminate_zeros):
                     vals[r][c] = 0
         tables[name] = vals
 
-    # Extract the column numbers that are non-zero.
-    # If optimisation option is set
-    # counter for non-zero column arrays.
+    # Extract the column numbers that are non-zero.  If optimisation
+    # option is set counter for non-zero column arrays.
     i = 0
     non_zero_columns = {}
     if eliminate_zeros:
@@ -181,12 +197,13 @@ def unique_psi_tables(tables, eliminate_zeros):
             # Use the first row as reference.
             non_zeros = list(vals[0].nonzero()[0])
 
-            # If all columns in the first row are non zero, there's no point
-            # in continuing.
+            # If all columns in the first row are non zero, there's no
+            # point in continuing.
             if len(non_zeros) == numpy.shape(vals)[1]:
                 continue
 
-            # If we only have one row (IP) we just need the nonzero columns.
+            # If we only have one row (IP) we just need the nonzero
+            # columns.
             if numpy.shape(vals)[0] == 1:
                 if list(non_zeros):
                     non_zeros.sort()
@@ -196,19 +213,22 @@ def unique_psi_tables(tables, eliminate_zeros):
                     tables[name] = vals[:, non_zeros]
                     i += 1
 
-            # Check if the remaining rows are nonzero in the same positions, else expand.
+            # Check if the remaining rows are nonzero in the same
+            # positions, else expand.
             else:
                 for j in range(1, numpy.shape(vals)[0]):
-                    # All rows must have the same non-zero columns
-                    # for the optimization to work (at this stage).
+                    # All rows must have the same non-zero columns for
+                    # the optimization to work (at this stage).
                     new_non_zeros = list(vals[j].nonzero()[0])
                     if non_zeros != new_non_zeros:
-                        non_zeros = non_zeros + [c for c in new_non_zeros if not c in non_zeros]
-                        # If this results in all columns being non-zero, continue.
+                        non_zeros = non_zeros + [c for c in new_non_zeros if c not in non_zeros]
+                        # If this results in all columns being
+                        # non-zero, continue.
                         if len(non_zeros) == numpy.shape(vals)[1]:
                             continue
 
-                # Only add nonzeros if it results in a reduction of columns.
+                # Only add nonzeros if it results in a reduction of
+                # columns.
                 if len(non_zeros) != numpy.shape(vals)[1]:
                     if list(non_zeros):
                         non_zeros.sort()
@@ -224,8 +244,9 @@ def unique_psi_tables(tables, eliminate_zeros):
     # Get names of tables with all ones.
     names_ones = get_ones(tables)
 
-    # Add non-zero column, zero and ones info to inverse_name_map
-    # (so we only need to pass around one name_map to code generating functions).
+    # Add non-zero column, zero and ones info to inverse_name_map (so
+    # we only need to pass around one name_map to code generating
+    # functions).
     for name in inverse_name_map:
         if inverse_name_map[name] in non_zero_columns:
             nzc = non_zero_columns[inverse_name_map[name]]
@@ -237,11 +258,12 @@ def unique_psi_tables(tables, eliminate_zeros):
             ones = inverse_name_map[name] in names_ones
             inverse_name_map[name] = [inverse_name_map[name], (), zero, ones]
 
-    # If we found non zero columns we might be able to reduce number of tables further.
+    # If we found non zero columns we might be able to reduce number
+    # of tables further.
     if non_zero_columns:
-        # Try reducing the tables. This is possible if some tables have become
-        # identical as a consequence of compressing the tables.
-        # This happens with e.g., gradients of linear basis
+        # Try reducing the tables. This is possible if some tables
+        # have become identical as a consequence of compressing the
+        # tables.  This happens with e.g., gradients of linear basis
         # FE0 = {-1,0,1}, nzc0 = [0,2]
         # FE1 = {-1,1,0}, nzc1 = [0,1]  -> FE0 = {-1,1}, nzc0 = [0,2], nzc1 = [0,1].
 
@@ -255,7 +277,7 @@ def unique_psi_tables(tables, eliminate_zeros):
         for name in nm:
             maps = nm[name]
             for m in maps:
-                if not name in name_map:
+                if name not in name_map:
                     name_map[name] = []
                 if m in name_map:
                     name_map[name] += name_map[m] + [m]
@@ -263,13 +285,15 @@ def unique_psi_tables(tables, eliminate_zeros):
                 else:
                     name_map[name].append(m)
 
-        # Get new names of tables with all ones (for vector constants).
+        # Get new names of tables with all ones (for vector
+        # constants).
         names = get_ones(tables)
 
-        # Because these tables now contain ones as a consequence of compression
-        # we still need to consider the non-zero columns when looking up values
-        # in coefficient arrays. The psi entries can however we neglected and we
-        # don't need to tabulate the values (if option is set).
+        # Because these tables now contain ones as a consequence of
+        # compression we still need to consider the non-zero columns
+        # when looking up values in coefficient arrays. The psi
+        # entries can however we neglected and we don't need to
+        # tabulate the values (if option is set).
         for name in names:
             if name in name_map:
                 maps = name_map[name]
@@ -282,9 +306,11 @@ def unique_psi_tables(tables, eliminate_zeros):
     for name in inverse_name_map:
         inverse_name_map[name] = tuple(inverse_name_map[name])
 
-    # Note: inverse_name_map here is called name_map in create_psi_tables and the quadraturetransformerbase class
+    # Note: inverse_name_map here is called name_map in
+    # create_psi_tables and the quadraturetransformerbase class
     return (inverse_name_map, tables)
 
+
 def unique_tables(tables):
     """Removes tables with redundant values and returns a name_map and a
     inverse_name_map. E.g.,
@@ -293,7 +319,9 @@ def unique_tables(tables):
     results in:
     tables = {a:[0,1,2], b:[0,2,3]}
     name_map = {a:[c,d]}
-    inverse_name_map = {a:a, b:b, c:a, d:a}."""
+    inverse_name_map = {a:a, b:b, c:a, d:a}.
+
+    """
 
     format_epsilon = format["epsilon"]
 
@@ -330,11 +358,12 @@ def unique_tables(tables):
 
     # Add self.
     for name in tables:
-        if not name in inverse_name_map:
+        if name not in inverse_name_map:
             inverse_name_map[name] = name
 
     return (name_map, inverse_name_map)
 
+
 def get_ones(tables):
     "Return names of tables for which all values are 1.0."
     f_epsilon = format["epsilon"]
@@ -345,6 +374,7 @@ def get_ones(tables):
             names.append(name)
     return names
 
+
 def contains_zeros(tables):
     "Checks if any tables contains only zeros."
     f_epsilon = format["epsilon"]
@@ -355,6 +385,7 @@ def contains_zeros(tables):
             names.append(name)
     return names
 
+
 def create_permutations(expr):
 
     # This is probably not used.
@@ -395,13 +426,14 @@ def create_permutations(expr):
                     key1 = [key1]
                 if not isinstance(val1, list):
                     val1 = [val1]
-                ffc_assert(tuple(key0 + key1) not in new, "This is not supposed to happen.")
+                if tuple(key0 + key1) in new:
+                    error("This is not supposed to happen.")
                 new[tuple(key0 + key1)] = val0 + val1
 
         return new
 
-    # Create permutations by calling this function recursively.
-    # This is only used for rank > 2 tensors I think.
-    if len(expr) > 2:
-        new = permutations(expr[0:2])
-        return permutations(new + expr[2:])
+    # Create permutations by calling this function recursively.  This
+    # is only used for rank > 2 tensors I think.
+    # if len(expr) > 2:
+    #     new = permutations(expr[0:2])
+    #     return permutations(new + expr[2:])
diff --git a/ffc/quadrature/reduce_operations.py b/ffc/quadrature/reduce_operations.py
index 9346267..6e8b306 100644
--- a/ffc/quadrature/reduce_operations.py
+++ b/ffc/quadrature/reduce_operations.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Some simple functions for manipulating expressions symbolically"
 
 # Copyright (C) 2008-2010 Kristian B. Oelgaard
@@ -24,7 +25,8 @@ from ffc.log import error
 
 from collections import deque
 
-def split_expression(expression, format, operator, allow_split = False):
+
+def split_expression(expression, format, operator, allow_split=False):
     """Split the expression at the given operator, return list.
     Do not split () or [] unless told to split (). This is to enable easy count
     of double operations which can be in (), but in [] we only have integer operations."""
@@ -42,45 +44,59 @@ def split_expression(expression, format, operator, allow_split = False):
     new_prods = [prods.popleft()]
 
     while prods:
-        # Continue while we still have list of potential products
-        # p is the first string in the product
+        # Continue while we still have list of potential products p is
+        # the first string in the product
         p = prods.popleft()
-        # If the number of "[" and "]" doesn't add up in the last entry of the
-        # new_prods list, add p and see if it helps for next iteration
+        # If the number of "[" and "]" doesn't add up in the last
+        # entry of the new_prods list, add p and see if it helps for
+        # next iteration
         if new_prods[-1].count(la) != new_prods[-1].count(ra):
             new_prods[-1] = operator.join([new_prods[-1], p])
-        # If the number of "(" and ")" doesn't add up (and we didn't allow a split)
-        # in the last entry of the new_prods list, add p and see if it helps for next iteration
+        # If the number of "(" and ")" doesn't add up (and we didn't
+        # allow a split) in the last entry of the new_prods list, add
+        # p and see if it helps for next iteration
         elif new_prods[-1].count(lg) != new_prods[-1].count(rg) and not allow_split:
             new_prods[-1] = operator.join([new_prods[-1], p])
-        # If everything was fine, we can start a new entry in the new_prods list
-        else: new_prods.append(p)
+        # If everything was fine, we can start a new entry in the
+        # new_prods list
+        else:
+            new_prods.append(p)
 
     return new_prods
 
+
 def operation_count(expression, format):
-    """This function returns the number of double operations in an expression.
-    We do split () but not [] as we only have unsigned integer operations in []."""
+    """This function returns the number of double operations in an
+    expression.  We do split () but not [] as we only have unsigned
+    integer operations in [].
+
+    """
 
-    # Note we do not subtract 1 for the additions, because there is also an
-    # assignment involved
-    adds = len(split_expression(expression, format, format["add"](["", ""]), True)) - 1
-    mults = len(split_expression(expression, format, format["multiply"](["", ""]), True)) - 1
+    # Note we do not subtract 1 for the additions, because there is
+    # also an assignment involved
+    adds = len(split_expression(expression, format, format["add"](["", ""]),
+                                True)) - 1
+    mults = len(split_expression(expression, format,
+                                 format["multiply"](["", ""]), True)) - 1
     return mults + adds
 
+
 def get_simple_variables(expression, format):
     """This function takes as argument an expression (preferably expanded):
       expression = "x*x + y*x + x*y*z"
+
     returns a list of products and a dictionary:
+
       prods = ["x*x", "y*x", "x*y*z"]
       variables = {variable: [num_occurences, [pos_in_prods]]}
-      variables = {"x":[3, [0,1,2]], "y":[2, [1,2]], "z":[1, [2]]}"""
+      variables = {"x":[3, [0,1,2]], "y":[2, [1,2]], "z":[1, [2]]}
+
+    """
 
     # Get formats
-    add           = format["add"](["", ""])
-    mult          = format["multiply"](["", ""])
-    group         = format["grouping"]("")
-    format_float  = format["floating point"]
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
+    format_float = format["floating point"]
 
     prods = split_expression(expression, format, add)
     prods = [p for p in prods if p]
@@ -88,7 +104,7 @@ def get_simple_variables(expression, format):
     variables = {}
     for i, p in enumerate(prods):
         # Only extract unique variables
-        vrs = list(set( split_expression(p, format, mult) ))
+        vrs = list(set(split_expression(p, format, mult)))
         for v in vrs:
             # Try to convert variable to floats and back (so '2' == '2.0' etc.)
             try:
@@ -102,6 +118,7 @@ def get_simple_variables(expression, format):
                 variables[v] = [1, [i]]
     return (prods, variables)
 
+
 def group_vars(expr, format):
     """Group variables in an expression, such that:
     "x + y + z + 2*y + 6*z" = "x + 3*y + 7*z"
@@ -113,8 +130,8 @@ def group_vars(expr, format):
 
     # Get formats
     format_float = format["floating point"]
-    add   = format["add"](["", ""])
-    mult  = format["multiply"](["", ""])
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
 
     new_prods = {}
 
@@ -128,7 +145,8 @@ def group_vars(expr, format):
         factor = 1
         new_var = []
 
-        # Try to multiply factor with variable, else variable must be multiplied by factor later
+        # Try to multiply factor with variable, else variable must be
+        # multiplied by factor later
         # If we don't have a variable, set factor to zero and break
         for v in vrs:
             if v:
@@ -141,8 +159,9 @@ def group_vars(expr, format):
                 factor = 0
                 break
 
-        # Create new variable that must be multiplied with factor. Add this
-        # variable to dictionary, if it already exists add factor to other factors
+        # Create new variable that must be multiplied with factor. Add
+        # this variable to dictionary, if it already exists add factor
+        # to other factors
         new_var.sort()
         new_var = mult.join(new_var)
         if new_var in new_prods:
@@ -169,8 +188,11 @@ def group_vars(expr, format):
 
 
 def reduction_possible(variables):
-    """Find the variable that occurs in the most products, if more variables
-    occur the same number of times and in the same products add them to list."""
+    """Find the variable that occurs in the most products, if more
+    variables occur the same number of times and in the same products
+    add them to list.
+
+    """
 
     # Find the variable that appears in the most products
     max_val = 1
@@ -181,8 +203,8 @@ def reduction_possible(variables):
             max_val = val[0]
             max_var = key
 
-    # If we found a variable that appears in products multiple times, check if
-    # other variables appear in the exact same products
+    # If we found a variable that appears in products multiple times,
+    # check if other variables appear in the exact same products
     if max_var:
         for key, val in sorted_by_key(variables):
             # Check if we have more variables in the same products
@@ -190,23 +212,27 @@ def reduction_possible(variables):
                 max_vars.append(key)
     return max_vars
 
-def is_constant(variable, format, constants = [], from_is_constant = False):
-    """Determine if a variable is constant or not.
-    The function accepts an optional list of variables (loop indices) that will
-    be regarded as constants for the given variable. If none are supplied it is
-    assumed that all array accesses will result in a non-constant variable.
+
+def is_constant(variable, format, constants=[], from_is_constant=False):
+    """Determine if a variable is constant or not.  The function accepts
+    an optional list of variables (loop indices) that will be regarded
+    as constants for the given variable. If none are supplied it is
+    assumed that all array accesses will result in a non-constant
+    variable.
 
     v = 2.0,          is constant
     v = Jinv_00*det,  is constant
     v = w[0][1],      is constant
     v = 2*w[0][1],    is constant
     v = W0[ip],       is constant if constants = ['ip'] else not
-    v = P_t0[ip][j],  is constant if constants = ['j','ip'] else not"""
+    v = P_t0[ip][j],  is constant if constants = ['j','ip'] else not
+
+    """
 
     # Get formats
-    access    = format["array access"]("")
-    add       = format["add"](["", ""])
-    mult      = format["multiply"](["", ""])
+    access = format["array access"]("")
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
 
     l = access[0]
     r = access[1]
@@ -219,14 +245,13 @@ def is_constant(variable, format, constants = [], from_is_constant = False):
     variable = expand_operations(variable, format)
 
     prods = split_expression(variable, format, add)
-    new_prods = []
 
     # Loop all products and variables and check if they're constant
     for p in prods:
         vrs = split_expression(p, format, mult)
         for v in vrs:
-            # Check if each variable is constant, if just one fails the entire
-            # variable is considered not to be constant
+            # Check if each variable is constant, if just one fails
+            # the entire variable is considered not to be constant
             const_var = False
 
             # If variable is in constants, well....
@@ -240,7 +265,8 @@ def is_constant(variable, format, constants = [], from_is_constant = False):
                 const_var = True
                 continue
 
-            # If we have an array access variable, see if the index is regarded a constant
+            # If we have an array access variable, see if the index is
+            # regarded a constant
             elif v.count(l):
 
                 # Check if access is OK ('[' is before ']')
@@ -249,17 +275,22 @@ def is_constant(variable, format, constants = [], from_is_constant = False):
                     error("Something is wrong with the array access")
 
                 # Auxiliary variables
-                index = ""; left = 0; inside = False; indices = []
+                index = ""
+                left = 0
+                inside = False
+                indices = []
 
                 # Loop all characters in variable and find indices
                 for c in v:
 
                     # If character is ']' reduce left count
-                    if c == r: left -= 1
+                    if c == r:
+                        left -= 1
 
-                    # If the '[' count has returned to zero, we have a complete index
+                    # If the '[' count has returned to zero, we have a
+                    # complete index
                     if left == 0 and inside:
-                        const_index = False # Aux. var
+                        const_index = False  # Aux. var
                         if index in constants:
                             const_index = True
 
@@ -285,7 +316,8 @@ def is_constant(variable, format, constants = [], from_is_constant = False):
                     if inside:
                         index += c
 
-                    # If character is '[' increase the count, and we're inside an access
+                    # If character is '[' increase the count, and
+                    # we're inside an access
                     if c == l:
                         inside = True
                         left += 1
@@ -304,13 +336,15 @@ def is_constant(variable, format, constants = [], from_is_constant = False):
                 except:
                     pass
 
-            # I no tests resulted in a constant variable, there is no need to continue
+            # I no tests resulted in a constant variable, there is no
+            # need to continue
             if not const_var:
                 return False
 
     # If all variables were constant return True
     return True
 
+
 def expand_operations(expression, format):
     """This function expands an expression and returns the value. E.g.,
     ((x + y))             --> x + y
@@ -320,13 +354,14 @@ def expand_operations(expression, format):
     z*((y + 3)*x + 2) + 1 --> 1 + 2*z + x*y*z + x*z*3"""
 
     # Get formats
-    add   = format["add"](["", ""])
-    mult  = format["multiply"](["", ""])
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
     group = format["grouping"]("")
     l = group[0]
     r = group[1]
 
-    # Check that we have the same number of left/right parenthesis in expression
+    # Check that we have the same number of left/right parenthesis in
+    # expression
     if not expression.count(l) == expression.count(r):
         error("Number of left/right parenthesis do not match")
 
@@ -340,8 +375,7 @@ def expand_operations(expression, format):
 
     # Loop additions and get products
     for a in adds:
-        prods = split_expression(a, format, mult)
-        prods.sort()
+        prods = sorted(split_expression(a, format, mult))
         new_prods = []
 
         # FIXME: Should we use deque here?
@@ -349,10 +383,10 @@ def expand_operations(expression, format):
         for i, p in enumerate(prods):
             # If we have a group, expand inner expression
             if p[0] == l and p[-1] == r:
-                # Add remaining products to new products and multiply with all
-                # terms from expanded variable
+                # Add remaining products to new products and multiply
+                # with all terms from expanded variable
                 expanded_var = expand_operations(p[1:-1], format)
-                expanded.append( split_expression(expanded_var, format, add) )
+                expanded.append(split_expression(expanded_var, format, add))
 
             # Else, just add variable to list of new products
             else:
@@ -367,25 +401,28 @@ def expand_operations(expression, format):
             new_adds += [mult.join(new_prods + [e]) for e in expanded[0]]
         else:
             # Else, just multiply products and add to list of products
-            new_adds.append( mult.join(new_prods) )
+            new_adds.append(mult.join(new_prods))
 
     # Group variables and return
     return group_vars(add.join(new_adds), format)
 
+
 def reduce_operations(expression, format):
-    """This function reduces the number of opertions needed to compute a given
-    expression. It looks for the variable that appears the most and groups terms
-    containing this variable inside parenthesis. The function is called recursively
-    until no further reductions are possible.
+    """This function reduces the number of opertions needed to compute a
+    given expression. It looks for the variable that appears the most
+    and groups terms containing this variable inside parenthesis. The
+    function is called recursively until no further reductions are
+    possible.
 
     "x + y + x" = 2*x + y
     "x*x + 2.0*x*y + y*y" = y*y + (2.0*y + x)*x, not (x + y)*(x + y) as it should be!!
-    z*x*y + z*x*3 + 2*z + 1" = z*(x*(y + 3) + 2) + 1"""
+    z*x*y + z*x*3 + 2*z + 1" = z*(x*(y + 3) + 2) + 1
+
+    """
 
     # Get formats
-    add   = format["add"](["", ""])
-    mult  = format["multiply"](["", ""])
-    group = format["grouping"]("")
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
 
     # Be sure that we have an expanded expression
     expression = expand_operations(expression, format)
@@ -406,20 +443,20 @@ def reduce_operations(expression, format):
     if max_vars:
         for p in prods:
             # Get the list of variables in current product
-            li = split_expression(p, format, mult)
-            li.sort()
+            li = sorted(split_expression(p, format, mult))
 
-            # If the list of products is the same as what we intend of moving
-            # outside the parenthesis, leave it
-            # (because x + x*x + x*y should be x + (x + y)*x NOT (1.0 + x + y)*x)
+            # If the list of products is the same as what we intend of
+            # moving outside the parenthesis, leave it (because x +
+            # x*x + x*y should be x + (x + y)*x NOT (1.0 + x + y)*x)
             if li == max_vars:
                 no_mult.append(p)
                 continue
             else:
-                # Get list of all variables from max_vars that are in li
+                # Get list of all variables from max_vars that are in
+                # li
                 indices = [i for i in max_vars if i in li]
-                # If not all were present add to list of terms that shouldn't be
-                # multiplied with variables and continue
+                # If not all were present add to list of terms that
+                # shouldn't be multiplied with variables and continue
                 if indices != max_vars:
                     no_mult.append(p)
                     continue
@@ -452,29 +489,29 @@ def reduce_operations(expression, format):
     if len_new_prods > 1:
         g = format["grouping"](new_prods)
 
-    # The new expression is the sum of terms that couldn't be reduced and terms
-    # that could be reduced multiplied by the reduction e.g.,
-    # expr = z + (x + y)*x
+    # The new expression is the sum of terms that couldn't be reduced
+    # and terms that could be reduced multiplied by the reduction
+    # e.g., expr = z + (x + y)*x
     new_expression = add.join(no_mult + [mult.join([g, mult.join(max_vars)])])
 
     return new_expression
 
+
 def get_geo_terms(expression, geo_terms, offset, format):
-    """This function returns a new expression where all geometry terms have
-    been substituted with geometry declarations, these declarations are added
-    to the geo_terms dictionary. """
+    """This function returns a new expression where all geometry terms
+    have been substituted with geometry declarations, these
+    declarations are added to the geo_terms dictionary.
+
+    """
 
     # Get formats
-    add       = format["add"](["", ""])
-    mult      = format["multiply"](["", ""])
-    access    = format["array access"]("")
-    grouping  = format["grouping"]
-    group     = grouping("")
-    format_G  = format["geometry tensor"]
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
+    grouping = format["grouping"]
+    group = grouping("")
+    format_G = format["geometry tensor"]
     gl = group[0]
     gr = group[1]
-    l = access[0]
-    r = access[1]
 
     # Get the number of geometry declaration, possibly offset value
     num_geo = offset + len(geo_terms)
@@ -493,7 +530,8 @@ def get_geo_terms(expression, geo_terms, offset, format):
         new_vrs = []
         for v in vrs:
 
-            # If variable is a group, get the geometry terms and update geo number
+            # If variable is a group, get the geometry terms and
+            # update geo number
             if v[0] == gl and v[-1] == gr:
                 v = get_geo_terms(v[1:-1], geo_terms, offset, format)
                 num_geo = offset + len(geo_terms)
@@ -511,7 +549,8 @@ def get_geo_terms(expression, geo_terms, offset, format):
                 geos.append(v)
 
         # Update variable list
-        vrs = new_vrs; vrs.sort()
+        vrs = new_vrs
+        vrs.sort()
 
         # Sort geo and create geometry term
         geos.sort()
@@ -523,7 +562,7 @@ def get_geo_terms(expression, geo_terms, offset, format):
                 if len(geos) > 1:
                     for g in geos:
                         vrs.remove(g)
-                    if not geo in geo_terms:
+                    if geo not in geo_terms:
                         geo_terms[geo] = format_G + str(num_geo)
                         num_geo += 1
                     vrs.append(geo_terms[geo])
@@ -538,29 +577,26 @@ def get_geo_terms(expression, geo_terms, offset, format):
             c = grouping(add.join(consts))
         else:
             c = add.join(consts)
-        if not c in geo_terms:
+        if c not in geo_terms:
             geo_terms[c] = format_G + str(num_geo)
             num_geo += 1
         consts = [geo_terms[c]]
 
     return add.join(new_prods + consts)
 
-def get_constants(expression, const_terms, format, constants = []):
-    """This function returns a new expression where all geometry terms have
-    been substituted with geometry declarations, these declarations are added
-    to the const_terms dictionary. """
+
+def get_constants(expression, const_terms, format, constants=[]):
+    """This function returns a new expression where all geometry terms
+    have been substituted with geometry declarations, these
+    declarations are added to the const_terms dictionary.
+
+    """
 
     # Get formats
-    add       = format["add"](["", ""])
-    mult      = format["multiply"](["", ""])
-    access    = format["array access"]("")
-    grouping  = format["grouping"]
-    group     = grouping("")
-    format_G  = format["geometry tensor"] + "".join(constants) #format["geometry tensor"]
-    gl = group[0]
-    gr = group[1]
-    l = access[0]
-    r = access[1]
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
+    grouping = format["grouping"]
+    format_G = format["geometry tensor"] + "".join(constants)  # format["geometry tensor"]
 
     # Get the number of geometry declaration, possibly offset value
     num_geo = len(const_terms)
@@ -587,7 +623,8 @@ def get_constants(expression, const_terms, format, constants = []):
             new_vrs.append(v)
 
         # Update variable list
-        vrs = new_vrs; vrs.sort()
+        vrs = new_vrs
+        vrs.sort()
 
         # Sort geo and create geometry term
         geos.sort()
@@ -596,7 +633,7 @@ def get_constants(expression, const_terms, format, constants = []):
             if geos != vrs:
                 for g in geos:
                     vrs.remove(g)
-                if not geo in const_terms:
+                if geo not in const_terms:
                     const_terms[geo] = format_G + str(num_geo)
                     num_geo += 1
                 vrs.append(const_terms[geo])
@@ -611,24 +648,25 @@ def get_constants(expression, const_terms, format, constants = []):
             c = grouping(add.join(consts))
         else:
             c = add.join(consts)
-        if not c in const_terms:
+        if c not in const_terms:
             const_terms[c] = format_G + str(num_geo)
             num_geo += 1
         consts = [const_terms[c]]
 
     return add.join(new_prods + consts)
 
-def get_indices(variable, format, from_get_indices = False):
+
+def get_indices(variable, format, from_get_indices=False):
     """This function returns the indices of a given variable. E.g.,
     P[0][j],            returns ['j']
     P[ip][k],           returns ['ip','k']
     P[ip][nzc0[j] + 3], returns ['ip','j']
     w[0][j + 2]         , returns [j]"""
 
-    add           = format["add"](["", ""])
-    mult          = format["multiply"](["", ""])
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
     format_access = format["array access"]
-    access        = format_access("")
+    access = format_access("")
 
     l = access[0]
     r = access[1]
@@ -644,17 +682,20 @@ def get_indices(variable, format, from_get_indices = False):
                 try:
                     float(m)
                 except:
-                    if not m in indices:
+                    if m not in indices:
                         indices.append(m)
     else:
-        index = ""; left = 0; inside = False;
+        index = ""
+        left = 0
+        inside = False
         # Loop all characters in variable and find indices
         for c in variable:
             # If character is ']' reduce left count
             if c == r:
                 left -= 1
 
-            # If the '[' count has returned to zero, we have a complete index
+            # If the '[' count has returned to zero, we have a
+            # complete index
             if left == 0 and inside:
                 try:
                     eval(index)
@@ -667,32 +708,30 @@ def get_indices(variable, format, from_get_indices = False):
             if inside:
                 index += c
 
-            # If character is '[' increase the count, and we're inside an access
+            # If character is '[' increase the count, and we're inside
+            # an access
             if c == l:
                 inside = True
                 left += 1
 
     return indices
 
-def get_variables(expression, variables, format, constants = []):
-    """This function returns a new expression where all geometry terms have
-    been substituted with geometry declarations, these declarations are added
-    to the const_terms dictionary. """
+
+def get_variables(expression, variables, format, constants=[]):
+    """This function returns a new expression where all geometry terms
+    have been substituted with geometry declarations, these
+    declarations are added to the const_terms dictionary.
+
+    """
 
     # Get formats
-    add           = format["add"](["", ""])
-    mult          = format["multiply"](["", ""])
+    add = format["add"](["", ""])
+    mult = format["multiply"](["", ""])
     format_access = format["array access"]
-    access        = format_access("")
-    grouping      = format["grouping"]
-    group         = grouping("")
-    format_F      = format["function value"]
-    format_ip     = format["integration points"]
+    access = format_access("")
+    format_F = format["function value"]
 
-    gl = group[0]
-    gr = group[1]
     l = access[0]
-    r = access[1]
 
     # If we don't have any access operators in expression,
     # we don't have any variables
@@ -706,7 +745,6 @@ def get_variables(expression, variables, format, constants = []):
 
     # Split the expression into products
     prods = split_expression(expression, format, add)
-    consts = []
 
     # Loop products and check if the variables are constant
     for p in prods:
@@ -717,13 +755,14 @@ def get_variables(expression, variables, format, constants = []):
         # Generate geo code for constant coefficients e.g., w[0][5]
         new_vrs = []
         for v in vrs:
-            # If we don't have any access operators, we don't have a variable
+            # If we don't have any access operators, we don't have a
+            # variable
             if v.count(l) == 0:
                 new_vrs.append(v)
                 continue
 
-            # Check if we have a variable that depends on one of the constants
-            # First check the easy way
+            # Check if we have a variable that depends on one of the
+            # constants First check the easy way
             is_var = False
             for c in constants:
                 if format_access(c) in v:
@@ -745,18 +784,18 @@ def get_variables(expression, variables, format, constants = []):
         variables_of_interest.sort()
         variables_of_interest = mult.join(variables_of_interest)
 
-        # If we have some variables, declare new variable if needed and add
-        # to list of variables
+        # If we have some variables, declare new variable if needed
+        # and add to list of variables
         if variables_of_interest:
             # If we didn't already declare this variable do so
-            if not variables_of_interest in variables:
+            if variables_of_interest not in variables:
                 variables[variables_of_interest] = format_F + str(num_var)
                 num_var += 1
 
             # Get mapped variable
             mv = variables[variables_of_interest]
             new_vrs.append(mv)
-            if not mv in used_vars:
+            if mv not in used_vars:
                 used_vars.append(mv)
 
         # Sort variables and add to list of products
diff --git a/ffc/quadrature/sumobj.py b/ffc/quadrature/sumobj.py
index 4cb38b3..76bf51d 100644
--- a/ffc/quadrature/sumobj.py
+++ b/ffc/quadrature/sumobj.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a class to represent a sum."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -17,6 +18,8 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
+import six
+
 from ufl.utils.sorting import sorted_by_key
 
 # FFC modules.
@@ -29,12 +32,13 @@ from .symbolics import create_product
 from .symbolics import create_sum
 from .symbolics import create_fraction
 from .expr import Expr
-import six
 
-#global ind
-#ind = ""
+from .floatvalue import FloatValue
+
+
 class Sum(Expr):
     __slots__ = ("vrs", "_expanded", "_reduced")
+
     def __init__(self, variables):
         """Initialise a Sum object, it derives from Expr and contains the
         additional variables:
@@ -65,15 +69,15 @@ class Sum(Expr):
                 # Skip zero terms.
                 if abs(var.val) < EPS:
                     continue
-                elif var._prec == 0: # float
+                elif var._prec == 0:  # float
                     float_val += var.val
                     continue
-                elif var._prec == 3: # sum
+                elif var._prec == 3:  # sum
                     # Loop and handle variables of nested sum.
                     for v in var.vrs:
                         if abs(v.val) < EPS:
                             continue
-                        elif v._prec == 0: # float
+                        elif v._prec == 0:  # float
                             float_val += v.val
                             continue
                         self.vrs.append(v)
@@ -97,7 +101,6 @@ class Sum(Expr):
         # Type is equal to the smallest type in both lists.
         self.t = min([v.t for v in self.vrs])
 
-
         # Sort variables, (for representation).
         self.vrs.sort()
 
@@ -141,7 +144,7 @@ class Sum(Expr):
         # NOTE: We expect expanded sub-expressions with no nested operators.
         # Create list of new products using the '*' operator
         # TODO: Is this efficient?
-        new_prods = [v*other for v in self.vrs]
+        new_prods = [v * other for v in self.vrs]
 
         # Remove zero valued terms.
         # TODO: Can this still happen?
@@ -173,13 +176,13 @@ class Sum(Expr):
         # NOTE: Expect that other is expanded i.e., x + x -> 2*x which can be handled.
         # TODO: Fix (1 + y) / (x + x*y) -> 1 / x
         # Will this be handled when reducing operations on a fraction?
-        if other._prec == 3: # sum
+        if other._prec == 3:  # sum
             return create_fraction(self, other)
 
         # NOTE: We expect expanded sub-expressions with no nested operators.
         # Create list of new products using the '*' operator.
         # TODO: Is this efficient?
-        new_fracs = [v/other for v in self.vrs]
+        new_fracs = [v / other for v in self.vrs]
 
         # Remove zero valued terms.
         # TODO: Can this still happen?
@@ -206,35 +209,36 @@ class Sum(Expr):
 
         # TODO: This function might need some optimisation.
 
-        # Sort variables into symbols, products and fractions (add floats
-        # directly to new list, will be handled later). Add fractions if
-        # possible else add to list.
+        # Sort variables into symbols, products and fractions (add
+        # floats directly to new list, will be handled later). Add
+        # fractions if possible else add to list.
         new_variables = []
         syms = []
         prods = []
-        frac_groups = {}
-        # TODO: Rather than using '+', would it be more efficient to collect
-        # the terms first?
+        # TODO: Rather than using '+', would it be more efficient to
+        # collect the terms first?
         for var in self.vrs:
             exp = var.expand()
-            # TODO: Should we also group fractions, or put this in a separate function?
-            if exp._prec in (0, 4): # float or frac
+            # TODO: Should we also group fractions, or put this in a
+            # separate function?
+            if exp._prec in (0, 4):  # float or frac
                 new_variables.append(exp)
-            elif exp._prec == 1: # sym
+            elif exp._prec == 1:  # sym
                 syms.append(exp)
-            elif exp._prec == 2: # prod
+            elif exp._prec == 2:  # prod
                 prods.append(exp)
-            elif exp._prec == 3: # sum
+            elif exp._prec == 3:  # sum
                 for v in exp.vrs:
-                    if v._prec in (0, 4): # float or frac
+                    if v._prec in (0, 4):  # float or frac
                         new_variables.append(v)
-                    elif v._prec == 1: # sym
+                    elif v._prec == 1:  # sym
                         syms.append(v)
-                    elif v._prec == 2: # prod
+                    elif v._prec == 2:  # prod
                         prods.append(v)
 
-        # Sort all variables in groups: [2*x, -7*x], [(x + y), (2*x + 4*y)] etc.
-        # First handle product in order to add symbols if possible.
+        # Sort all variables in groups: [2*x, -7*x], [(x + y), (2*x +
+        # 4*y)] etc.  First handle product in order to add symbols if
+        # possible.
         prod_groups = {}
         for v in prods:
             if v.get_vrs() in prod_groups:
@@ -256,20 +260,19 @@ class Sum(Expr):
                 sym_groups[v] = v
 
         # Loop groups and add to new variable list.
-        for k,v in sorted_by_key(sym_groups):
+        for k, v in sorted_by_key(sym_groups):
             new_variables.append(v)
-        for k,v in sorted_by_key(prod_groups):
+        for k, v in sorted_by_key(prod_groups):
             new_variables.append(v)
-#        for k,v in frac_groups.iteritems():
-#            new_variables.append(v)
-#            append(v)
 
         if len(new_variables) > 1:
-            # Return new sum (will remove multiple instances of floats during construction).
+            # Return new sum (will remove multiple instances of floats
+            # during construction).
             self._expanded = create_sum(sorted(new_variables))
             return self._expanded
         elif new_variables:
-            # If we just have one variable left, return it since it is already expanded.
+            # If we just have one variable left, return it since it is
+            # already expanded.
             self._expanded = new_variables[0]
             return self._expanded
         error("Where did the variables go?")
@@ -283,10 +286,14 @@ class Sum(Expr):
         return var
 
     def get_var_occurrences(self):
-        """Determine the number of minimum number of times all variables occurs
-        in the expression. Returns a dictionary of variables and the number of
-        times they occur. x*x + x returns {x:1}, x + y returns {}."""
-        # NOTE: This function is only used if the numerator of a Fraction is a Sum.
+        """Determine the number of minimum number of times all variables
+        occurs in the expression. Returns a dictionary of variables
+        and the number of times they occur. x*x + x returns {x:1}, x +
+        y returns {}.
+
+        """
+        # NOTE: This function is only used if the numerator of a
+        # Fraction is a Sum.
 
         # Get occurrences in first expression.
         d0 = self.vrs[0].get_var_occurrences()
@@ -295,9 +302,10 @@ class Sum(Expr):
             d = var.get_var_occurrences()
             # Delete those variables in d0 that are not in d.
             for k, v in list(d0.items()):
-                if not k in d:
+                if k not in d:
                     del d0[k]
-            # Set the number of occurrences equal to the smallest number.
+            # Set the number of occurrences equal to the smallest
+            # number.
             for k, v in sorted_by_key(d):
                 if k in d0:
                     d0[k] = min(d0[k], v)
@@ -305,7 +313,8 @@ class Sum(Expr):
 
     def ops(self):
         "Return number of operations to compute value of sum."
-        # Subtract one operation as it only takes n-1 ops to sum n members.
+        # Subtract one operation as it only takes n-1 ops to sum n
+        # members.
         op = -1
 
         # Add the number of operations from sub-expressions.
@@ -316,9 +325,6 @@ class Sum(Expr):
 
     def reduce_ops(self):
         "Reduce the number of operations needed to evaluate the sum."
-#        global ind
-#        ind += " "
-#        print "\n%sreduce_ops, start" % ind
 
         if self._reduced:
             return self._reduced
@@ -327,130 +333,108 @@ class Sum(Expr):
 
         # TODO: The entire function looks expensive, can it be optimised?
 
-        # TODO: It is not necessary to create a new Sum if we do not have more
-        # than one Fraction.
+        # TODO: It is not necessary to create a new Sum if we do not
+        # have more than one Fraction.
+
         # First group all fractions in the sum.
         new_sum = _group_fractions(self)
-        if new_sum._prec != 3: # sum
+        if new_sum._prec != 3:  # sum
             self._reduced = new_sum.reduce_ops()
             return self._reduced
-        # Loop all variables of the sum and collect the number of common
-        # variables that can be factored out.
+        # Loop all variables of the sum and collect the number of
+        # common variables that can be factored out.
         common_vars = {}
         for var in new_sum.vrs:
-            # Get dictonary of occurrences and add the variable and the number
-            # of occurrences to common dictionary.
+            # Get dictonary of occurrences and add the variable and
+            # the number of occurrences to common dictionary.
             for k, v in sorted_by_key(var.get_var_occurrences()):
-#                print
-#                print ind + "var: ", var
-#                print ind + "k: ", k
-#                print ind + "v: ", v
                 if k in common_vars:
                     common_vars[k].append((v, var))
                 else:
                     common_vars[k] = [(v, var)]
-#        print
-#        print "common vars: "
-#        for k,v in common_vars.items():
-#            print "k: ", k
-#            print "v: ", v
-#        print
-        # Determine the maximum reduction for each variable
-        # sorted as: {(x*x*y, x*y*z, 2*y):[2, [y]]}.
+
+        # Determine the maximum reduction for each variable sorted as:
+        # {(x*x*y, x*y*z, 2*y):[2, [y]]}.
         terms_reductions = {}
         for k, v in sorted_by_key(common_vars):
-#            print
-#            print ind + "k: ", k
-#            print ind + "v: ", v
-            # If the number of expressions that can be reduced is only one
-            # there is nothing to be done.
+            # If the number of expressions that can be reduced is only
+            # one there is nothing to be done.
             if len(v) > 1:
-                # TODO: Is there a better way to compute the reduction gain
-                # and the number of occurrences we should remove?
+                # TODO: Is there a better way to compute the reduction
+                # gain and the number of occurrences we should remove?
 
-                # Get the list of number of occurences of 'k' in expressions
-                # in 'v'.
+                # Get the list of number of occurences of 'k' in
+                # expressions in 'v'.
                 occurrences = [t[0] for t in v]
 
-                # Determine the favorable number of occurences and an estimate
-                # of the maximum reduction for current variable.
+                # Determine the favorable number of occurences and an
+                # estimate of the maximum reduction for current
+                # variable.
                 fav_occur = 0
                 reduc = 0
                 for i in set(occurrences):
-                    # Get number of terms that has a number of occcurences equal
-                    # to or higher than the current number.
+                    # Get number of terms that has a number of
+                    # occcurences equal to or higher than the current
+                    # number.
                     num_terms = len([o for o in occurrences if o >= i])
 
                     # An estimate of the reduction in operations is:
                     # (number_of_terms - 1) * number_occurrences.
-                    new_reduc = (num_terms-1)*i
+                    new_reduc = (num_terms - 1) * i
                     if new_reduc > reduc:
                         reduc = new_reduc
                         fav_occur = i
 
-                # Extract the terms of v where the number of occurrences is
-                # equal to or higher than the most favorable number of occurrences.
+                # Extract the terms of v where the number of
+                # occurrences is equal to or higher than the most
+                # favorable number of occurrences.
                 terms = sorted([t[1] for t in v if t[0] >= fav_occur])
 
-                # We need to reduce the expression with the favorable number of
-                # occurrences of the current variable.
-                red_vars = [k]*fav_occur
+                # We need to reduce the expression with the favorable
+                # number of occurrences of the current variable.
+                red_vars = [k] * fav_occur
 
-                # If the list of terms is already present in the dictionary,
-                # add the reduction count and the variables.
+                # If the list of terms is already present in the
+                # dictionary, add the reduction count and the
+                # variables.
                 if tuple(terms) in terms_reductions:
                     terms_reductions[tuple(terms)][0] += reduc
                     terms_reductions[tuple(terms)][1] += red_vars
                 else:
                     terms_reductions[tuple(terms)] = [reduc, red_vars]
-#        print "\nterms_reductions: "
-#        for k,v in terms_reductions.items():
-#            print "k: ", create_sum(k)
-#            print "v: ", v
-#        print "red: self: ", self
+
         if terms_reductions:
             # Invert dictionary of terms.
-            reductions_terms = dict([((v[0], tuple(v[1])), k) for k, v in six.iteritems(terms_reductions)])
-
-            # Create a sorted list of those variables that give the highest
-            # reduction.
-            sorted_reduc_var = sorted(six.iterkeys(reductions_terms), reverse=True)
-#            sorted_reduc_var = [k for k, v in six.iteritems(reductions_terms)]
-#            print
-#            print ind + "raw"
-#            for k in sorted_reduc_var:
-#                print ind, k[0], k[1]
-#            sorted_reduc_var.sort()
-#            sorted_reduc_var.sort(lambda x, y: cmp(x[0], y[0]))
-#            sorted_reduc_var.reverse()
-#            print ind + "sorted"
-#            for k in sorted_reduc_var:
-#                print ind, k[0], k[1]
-
-            # Create a new dictionary of terms that should be reduced, if some
-            # terms overlap, only pick the one which give the highest reduction to
-            # ensure that a*x*x + b*x*x + x*x*y + 2*y -> x*x*(a + b + y) + 2*y NOT
-            # x*x*(a + b) + y*(2 + x*x).
+            reductions_terms = dict([((v[0], tuple(v[1])), k) for k,
+                                     v in six.iteritems(terms_reductions)])
+
+            # Create a sorted list of those variables that give the
+            # highest reduction.
+            sorted_reduc_var = sorted(six.iterkeys(reductions_terms),
+                                      reverse=True)
+
+            # Create a new dictionary of terms that should be reduced,
+            # if some terms overlap, only pick the one which give the
+            # highest reduction to ensure that a*x*x + b*x*x + x*x*y +
+            # 2*y -> x*x*(a + b + y) + 2*y NOT x*x*(a + b) + y*(2 +
+            # x*x).
             reduction_vars = {}
             rejections = {}
             for var in sorted_reduc_var:
                 terms = reductions_terms[var]
-                if _overlap(terms, reduction_vars) or _overlap(terms, rejections):
+                if _overlap(terms, reduction_vars) or _overlap(terms,
+                                                               rejections):
                     rejections[var[1]] = terms
                 else:
                     reduction_vars[var[1]] = terms
 
-#            print "\nreduction_vars: "
-#            for k,v in reduction_vars.items():
-#                print "k: ", k
-#                print "v: ", v
-
             # Reduce each set of terms with appropriate variables.
             all_reduced_terms = []
             reduced_expressions = []
             for reduc_var, terms in sorted(six.iteritems(reduction_vars)):
 
-                # Add current terms to list of all variables that have been reduced.
+                # Add current terms to list of all variables that have
+                # been reduced.
                 all_reduced_terms += list(terms)
 
                 # Create variable that we will use to reduce the terms.
@@ -467,23 +451,27 @@ class Sum(Expr):
                 reduced_expr = None
                 if len(reduced_terms) > 1:
                     # Try to reduce the reduced terms further.
-                    reduced_expr = create_product([reduction_var, create_sum(reduced_terms).reduce_ops()])
+                    reduced_expr = create_product([reduction_var,
+                                                   create_sum(reduced_terms).reduce_ops()])
                 else:
-                    reduced_expr = create_product(reduction_var, reduced_terms[0])
+                    reduced_expr = create_product(reduction_var,
+                                                  reduced_terms[0])
 
-                # Add reduced expression to list of reduced expressions.
+                # Add reduced expression to list of reduced
+                # expressions.
                 reduced_expressions.append(reduced_expr)
 
             # Create list of terms that should not be reduced.
             dont_reduce_terms = []
             for v in new_sum.vrs:
-                if not v in all_reduced_terms:
+                if v not in all_reduced_terms:
                     dont_reduce_terms.append(v)
 
             # Create expression from terms that was not reduced.
             not_reduced_expr = None
             if dont_reduce_terms and len(dont_reduce_terms) > 1:
-                # Try to reduce the remaining terms that were not reduced at first.
+                # Try to reduce the remaining terms that were not
+                # reduced at first.
                 not_reduced_expr = create_sum(dont_reduce_terms).reduce_ops()
             elif dont_reduce_terms:
                 not_reduced_expr = dont_reduce_terms[0]
@@ -495,42 +483,26 @@ class Sum(Expr):
                 self._reduced = create_sum(reduced_expressions)
             else:
                 self._reduced = reduced_expressions[0]
-#            # NOTE: Only switch on for debugging.
-#            if not self._reduced.expand() == self.expand():
-#                print reduced_expressions[0]
-#                print reduced_expressions[0].expand()
-#                print "self: ", self
-#                print "red:  ", repr(self._reduced)
-#                print "self.exp: ", self.expand()
-#                print "red.exp:  ", self._reduced.expand()
-#                error("Reduced expression is not equal to original expression.")
+
             return self._reduced
 
-        # Return self if we don't have any variables for which we can reduce
-        # the sum.
+        # Return self if we don't have any variables for which we can
+        # reduce the sum.
         self._reduced = self
         return self._reduced
 
     def reduce_vartype(self, var_type):
         """Reduce expression with given var_type. It returns a list of tuples
-        [(found, remain)], where 'found' is an expression that only has variables
-        of type == var_type. If no variables are found, found=(). The 'remain'
-        part contains the leftover after division by 'found' such that:
-        self = Sum([f*r for f,r in self.reduce_vartype(Type)])."""
+        [(found, remain)], where 'found' is an expression that only
+        has variables of type == var_type. If no variables are found,
+        found=(). The 'remain' part contains the leftover after
+        division by 'found' such that: self = Sum([f*r for f,r in
+        self.reduce_vartype(Type)]).
+
+        """
         found = {}
-#        print "\nself: ", self
         # Loop members and reduce them by vartype.
         for v in self.vrs:
-#            print "v: ", v
-#            print "red: ", v.reduce_vartype(var_type)
-#            red = v.reduce_vartype(var_type)
-#            f, r = v.reduce_vartype(var_type)
-#            print "len red: ", len(red)
-#            print "red: ", red
-#            if len(red) == 2:
-#                f, r = red
-#            else:
-#                raise RuntimeError
             for f, r in v.reduce_vartype(var_type):
                 if f in found:
                     found[f].append(r)
@@ -542,13 +514,13 @@ class Sum(Expr):
         for f, r in sorted_by_key(found):
             if len(r) > 1:
                 # Use expand to group expressions.
-#                r = create_sum(r).expand()
                 r = create_sum(r)
             elif r:
                 r = r.pop()
             returns.append((f, r))
         return sorted(returns)
 
+
 def _overlap(l, d):
     "Check if a member in list l is in the value (list) of dictionary d."
     for m in l:
@@ -557,16 +529,17 @@ def _overlap(l, d):
                 return True
     return False
 
+
 def _group_fractions(expr):
     "Group Fractions in a Sum: 2/x + y/x -> (2 + y)/x."
-    if expr._prec != 3: # sum
+    if expr._prec != 3:  # sum
         return expr
 
     # Loop variables and group those with common denominator.
     not_frac = []
     fracs = {}
     for v in expr.vrs:
-        if v._prec == 4: # frac
+        if v._prec == 4:  # frac
             if v.denom in fracs:
                 fracs[v.denom][1].append(v.num)
                 fracs[v.denom][0] += 1
@@ -577,11 +550,12 @@ def _group_fractions(expr):
     if not fracs:
         return expr
 
-    # Loop all fractions and create new ones using an appropriate numerator.
+    # Loop all fractions and create new ones using an appropriate
+    # numerator.
     for k, v in sorted(six.iteritems(fracs)):
         if v[0] > 1:
-            # TODO: Is it possible to avoid expanding the Sum?
-            # I think we have to because x/a + 2*x/a -> 3*x/a.
+            # TODO: Is it possible to avoid expanding the Sum?  I
+            # think we have to because x/a + 2*x/a -> 3*x/a.
             not_frac.append(create_fraction(create_sum(v[1]).expand(), k))
         else:
             not_frac.append(v[2])
@@ -590,8 +564,3 @@ def _group_fractions(expr):
     if len(not_frac) > 1:
         return create_sum(not_frac)
     return not_frac[0]
-
-from .floatvalue import FloatValue
-from .symbol     import Symbol
-from .product    import Product
-from .fraction   import Fraction
diff --git a/ffc/quadrature/symbol.py b/ffc/quadrature/symbol.py
index e1d1e53..6fd4bfb 100644
--- a/ffc/quadrature/symbol.py
+++ b/ffc/quadrature/symbol.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file implements a class to represent a symbol."
 
 # Copyright (C) 2009-2011 Kristian B. Oelgaard
@@ -22,7 +23,6 @@
 
 # FFC modules.
 from ffc.log import error
-from ffc.cpp import format
 
 # FFC quadrature modules.
 from .symbolics import type_to_string
@@ -32,8 +32,10 @@ from .symbolics import create_sum
 from .symbolics import create_fraction
 from .expr import Expr
 
+
 class Symbol(Expr):
     __slots__ = ("v", "base_expr", "base_op", "exp", "cond")
+
     def __init__(self, variable, symbol_type, base_expr=None, base_op=0):
         """Initialise a Symbols object, it derives from Expr and contains
         the additional variables:
@@ -67,9 +69,9 @@ class Symbol(Expr):
         # Compute the representation now, such that we can use it directly
         # in the __eq__ and __ne__ methods (improves performance a bit, but
         # only when objects are cached).
-        if self.base_expr:# and self.exp is None:
-            self._repr = "Symbol('%s', %s, %s, %d)" % (self.v, type_to_string[self.t],\
-                         self.base_expr._repr, self.base_op)
+        if self.base_expr:  # and self.exp is None:
+            self._repr = "Symbol('%s', %s, %s, %d)" % (self.v, type_to_string[self.t],
+                                                       self.base_expr._repr, self.base_op)
         else:
             self._repr = "Symbol('%s', %s)" % (self.v, type_to_string[self.t])
 
@@ -90,7 +92,7 @@ class Symbol(Expr):
         # Returns x + x -> 2*x, x + 2*x -> 3*x.
         if self._repr == other._repr:
             return create_product([create_float(2), self])
-        elif other._prec == 2: # prod
+        elif other._prec == 2:  # prod
             return other.__add__(self)
         return create_sum([self, other])
 
@@ -100,7 +102,7 @@ class Symbol(Expr):
         # symbols, if other is a product, try to let product handle the addition.
         if self._repr == other._repr:
             return create_float(0)
-        elif other._prec == 2: # prod
+        elif other._prec == 2:  # prod
             if other.get_vrs() == (self,):
                 return create_product([create_float(1.0 - other.val), self]).expand()
         return create_sum([self, create_product([create_float(-1), other])])
@@ -113,11 +115,11 @@ class Symbol(Expr):
             return create_float(0)
 
         # If other is Sum or Fraction let them handle the multiply.
-        if other._prec in (3, 4): # sum or frac
+        if other._prec in (3, 4):  # sum or frac
             return other.__mul__(self)
 
         # If other is a float or symbol, create simple product.
-        if other._prec in (0, 1): # float or sym
+        if other._prec in (0, 1):  # float or sym
             return create_product([self, other])
 
         # Else add variables from product.
@@ -136,7 +138,7 @@ class Symbol(Expr):
 
         # If other is a Sum we can only return a fraction.
         # TODO: Refine this later such that x / (x + x*y) -> 1 / (1 + y)?
-        if other._prec == 3: # sum
+        if other._prec == 3:  # sum
             return create_fraction(self, other)
 
         # Handle division by FloatValue, Symbol, Product and Fraction.
@@ -145,9 +147,9 @@ class Symbol(Expr):
         denom = []
 
         # Add floatvalue, symbol and products to the list of denominators.
-        if other._prec in (0, 1): # float or sym
+        if other._prec in (0, 1):  # float or sym
             denom = [other]
-        elif other._prec == 2: # prod
+        elif other._prec == 2:  # prod
             # Need copies, so can't just do denom = other.vrs.
             denom += other.vrs
         # fraction.
@@ -166,8 +168,8 @@ class Symbol(Expr):
         for d in denom:
             # Add the inverse of a float to the numerator, remove it from
             # the denominator and continue.
-            if d._prec == 0: # float
-                num.append(create_float(1.0/other.val))
+            if d._prec == 0:  # float
+                num.append(create_float(1.0 / other.val))
                 denom.remove(d)
                 continue
 
@@ -210,7 +212,7 @@ class Symbol(Expr):
         """Determine the number of times all variables occurs in the expression.
         Returns a dictionary of variables and the number of times they occur."""
         # There is only one symbol.
-        return {self:1}
+        return {self: 1}
 
     def ops(self):
         "Returning the number of floating point operation for symbol."
@@ -219,9 +221,3 @@ class Symbol(Expr):
         if self.base_expr:
             return self.base_op + self.base_expr.ops()
         return self.base_op
-
-from .floatvalue import FloatValue
-from .product    import Product
-from .sumobj    import Sum
-from .fraction   import Fraction
-
diff --git a/ffc/quadrature/symbolics.py b/ffc/quadrature/symbolics.py
index b3e088b..9b8cd24 100644
--- a/ffc/quadrature/symbolics.py
+++ b/ffc/quadrature/symbolics.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This file contains functions to optimise the code generated for quadrature representation."
 
 # Copyright (C) 2009-2010 Kristian B. Oelgaard
@@ -20,7 +21,7 @@
 from ufl.utils.sorting import sorted_by_key
 
 # FFC modules
-from ffc.log import debug, error
+from ffc.log import error
 from ffc.cpp import format
 
 # TODO: Use proper errors, not just RuntimeError.
@@ -28,76 +29,77 @@ from ffc.cpp import format
 
 # Some basic variables.
 BASIS = 0
-IP  = 1
+IP = 1
 GEO = 2
 CONST = 3
-type_to_string = {BASIS:"BASIS", IP:"IP",GEO:"GEO", CONST:"CONST"}
+type_to_string = {BASIS: "BASIS", IP: "IP", GEO: "GEO", CONST: "CONST"}
 
 # Functions and dictionaries for cache implementation.
 # Increases speed and should also reduce memory consumption.
 _float_cache = {}
+
+
 def create_float(val):
     if val in _float_cache:
-#        print "found %f in cache" %val
         return _float_cache[val]
     float_val = FloatValue(val)
     _float_cache[val] = float_val
     return float_val
 
 _symbol_cache = {}
+
+
 def create_symbol(variable, symbol_type, base_expr=None, base_op=0):
     key = (variable, symbol_type, base_expr, base_op)
     if key in _symbol_cache:
-#        print "found %s in cache" %variable
         return _symbol_cache[key]
     symbol = Symbol(variable, symbol_type, base_expr, base_op)
     _symbol_cache[key] = symbol
     return symbol
 
 _product_cache = {}
+
+
 def create_product(variables):
     # NOTE: If I switch on the sorted line, it might be possible to find more
     # variables in the cache, but it adds some overhead so I don't think it
     # pays off. The member variables are also sorted in the classes
     # (Product and Sum) so the list 'variables' is probably already sorted.
-#    key = tuple(sorted(variables))
     key = tuple(variables)
     if key in _product_cache:
-#        print "found %s in cache" %str(key)
-#        print "found product in cache"
         return _product_cache[key]
     product = Product(key)
     _product_cache[key] = product
     return product
 
 _sum_cache = {}
+
+
 def create_sum(variables):
-    # NOTE: If I switch on the sorted line, it might be possible to find more
-    # variables in the cache, but it adds some overhead so I don't think it
-    # pays off. The member variables are also sorted in the classes
-    # (Product and Sum) so the list 'variables' is probably already sorted.
-#    key = tuple(sorted(variables))
+    # NOTE: If I switch on the sorted line, it might be possible to
+    # find more variables in the cache, but it adds some overhead so I
+    # don't think it pays off. The member variables are also sorted in
+    # the classes (Product and Sum) so the list 'variables' is
+    # probably already sorted.
     key = tuple(variables)
     if key in _sum_cache:
-#        print "found %s in cache" %str(key)
-#        print "found sum in cache"
         return _sum_cache[key]
     s = Sum(key)
     _sum_cache[key] = s
     return s
 
 _fraction_cache = {}
+
+
 def create_fraction(num, denom):
     key = (num, denom)
     if key in _fraction_cache:
-#        print "found %s in cache" %str(key)
-#        print "found fraction in cache"
         return _fraction_cache[key]
     fraction = Fraction(num, denom)
     _fraction_cache[key] = fraction
     return fraction
 
-# NOTE: We use commented print for debug, since debug will make the code run slower.
+
 def generate_aux_constants(constant_decl, name, var_type, print_ops=False):
     "A helper tool to generate code for constant declarations."
     format_comment = format["comment"]
@@ -105,18 +107,13 @@ def generate_aux_constants(constant_decl, name, var_type, print_ops=False):
     append = code.append
     ops = 0
     for num, expr in sorted((v, k) for k, v in sorted_by_key(constant_decl)):
-#        debug("expr orig: " + str(expr))
-#        print "\nnum: ", num
-#        print "expr orig: " + repr(expr)
-#        print "expr exp: " + str(expr.expand())
-        # Expand and reduce expression (If we don't already get reduced expressions.)
+        # Expand and reduce expression (If we don't already get
+        # reduced expressions.)
         expr = expr.expand().reduce_ops()
-#        debug("expr opt:  " + str(expr))
-#        print "expr opt:  " + str(expr)
         if print_ops:
             op = expr.ops()
             ops += op
-            append(format_comment("Number of operations: %d" %op))
+            append(format_comment("Number of operations: %d" % op))
             append(var_type(name(num), str(expr)))
             append("")
         else:
@@ -125,18 +122,18 @@ def generate_aux_constants(constant_decl, name, var_type, print_ops=False):
 
     return (ops, code)
 
-# NOTE: We use commented print for debug, since debug will make the code run slower.
+
 def optimise_code(expr, ip_consts, geo_consts, trans_set):
     """Optimise a given expression with respect to, basis functions,
-    integration points variables and geometric constants.
-    The function will update the dictionaries ip_const and geo_consts with new
-    declarations and update the trans_set (used transformations)."""
+    integration points variables and geometric constants.  The
+    function will update the dictionaries ip_const and geo_consts with
+    new declarations and update the trans_set (used
+    transformations).
 
-#    print "expr: ", repr(expr)
+    """
 
-    format_G  = format["geometry constant"]
-#    format_ip = format["integration points"]
-    format_I  = format["ip constant"]
+    format_G = format["geometry constant"]
+    format_I = format["ip constant"]
     trans_set_update = trans_set.update
 
     # Return constant symbol if expanded value is zero.
@@ -147,7 +144,8 @@ def optimise_code(expr, ip_consts, geo_consts, trans_set):
     # Reduce expression with respect to basis function variable.
     basis_expressions = exp_expr.reduce_vartype(BASIS)
 
-    # If we had a product instance we'll get a tuple back so embed in list.
+    # If we had a product instance we'll get a tuple back so embed in
+    # list.
     if not isinstance(basis_expressions, list):
         basis_expressions = [basis_expressions]
 
@@ -155,36 +153,13 @@ def optimise_code(expr, ip_consts, geo_consts, trans_set):
     # Process each instance of basis functions.
     for basis, ip_expr in basis_expressions:
         # Get the basis and the ip expression.
-#        debug("\nbasis\n" + str(basis))
-#        debug("ip_epxr\n" + str(ip_expr))
-#        print "\nbasis\n" + str(basis)
-#        print "ip_epxr\n" + str(ip_expr)
-#        print "ip_epxr\n" + repr(ip_expr)
-#        print "ip_epxr\n" + repr(ip_expr.expand())
 
         # If we have no basis (like functionals) create a const.
         if not basis:
             basis = create_float(1)
-        # NOTE: Useful for debugging to check that terms where properly reduced.
-#        if Product([basis, ip_expr]).expand() != expr.expand():
-#            prod = Product([basis, ip_expr]).expand()
-#            print "prod == sum: ", isinstance(prod, Sum)
-#            print "expr == sum: ", isinstance(expr, Sum)
-
-#            print "prod.vrs: ", prod.vrs
-#            print "expr.vrs: ", expr.vrs
-#            print "expr.vrs = prod.vrs: ", expr.vrs == prod.vrs
-
-#            print "equal: ", prod == expr
-
-#            print "\nprod:    ", prod
-#            print "\nexpr:    ", expr
-#            print "\nbasis:   ", basis
-#            print "\nip_expr: ", ip_expr
-#            error("Not equal")
-
-        # If the ip expression doesn't contain any operations skip remainder.
-#        if not ip_expr:
+
+        # If the ip expression doesn't contain any operations skip
+        # remainder
         if not ip_expr or ip_expr.val == 0.0:
             basis_vals.append(basis)
             continue
@@ -199,82 +174,38 @@ def optimise_code(expr, ip_consts, geo_consts, trans_set):
         if not isinstance(ip_expressions, list):
             ip_expressions = [ip_expressions]
 
-#        # Debug code to check that reduction didn't screw up anything
-#        for ip in ip_expressions:
-#            ip_dec, geo = ip
-#            print "geo: ", geo
-#            print "ip_dec: ", ip_dec
-#        vals = []
-#        for ip in ip_expressions:
-#            ip_dec, geo = ip
-#            if ip_dec and geo:
-#                vals.append(Product([ip_dec, geo]))
-#            elif geo:
-#                vals.append(geo)
-#            elif ip_dec:
-#                vals.append(ip_dec)
-
-#        if Sum(vals).expand() != ip_expr.expand():
-##        if Sum([Product([ip, geo]) for ip, geo in ip_expressions]).expand() != ip_expr.expand():
-#            print "\nip_expr: ", repr(ip_expr)
-##            print "\nip_expr: ", str(ip_expr)
-##            print "\nip_dec: ", repr(ip_dec)
-##            print "\ngeo: ", repr(geo)
-#            for ip in ip_expressions:
-#                ip_dec, geo = ip
-#                print "geo: ", geo
-#                print "ip_dec: ", ip_dec
-#            error("Not equal")
-
         ip_vals = []
         # Loop ip expressions.
         for ip in sorted(ip_expressions):
             ip_dec, geo = ip
-#            debug("\nip_dec: " + str(ip_dec))
-#            debug("\ngeo: " + str(geo))
-#            print "\nip_dec: " + repr(ip_dec)
-#            print "\ngeo: " + repr(geo)
-#            print "exp:  ", geo.expand()
-#            print "val:  ", geo.expand().val
-#            print "repx: ", repr(geo.expand())
-            # NOTE: Useful for debugging to check that terms where properly reduced.
-#            if Product([ip_dec, geo]).expand() != ip_expr.expand():
-#                print "\nip_expr: ", repr(ip_expr)
-#                print "\nip_dec: ", repr(ip_dec)
-#                print "\ngeo: ", repr(geo)
-#                error("Not equal")
-
-            # Update transformation set with those values that might be embedded in IP terms.
-#            if ip_dec:
+
+            # Update transformation set with those values that might
+            # be embedded in IP terms.
             if ip_dec and ip_dec.val != 0.0:
                 trans_set_update([str(x) for x in ip_dec.get_unique_vars(GEO)])
 
             # Append and continue if we did not have any geo values.
-#            if not geo:
             if not geo or geo.val == 0.0:
                 if ip_dec and ip_dec.val != 0.0:
                     ip_vals.append(ip_dec)
                 continue
 
-            # Update the transformation set with the variables in the geo term.
+            # Update the transformation set with the variables in the
+            # geo term.
             trans_set_update([str(x) for x in geo.get_unique_vars(GEO)])
 
-            # Only declare auxiliary geo terms if we can save operations.
-#            geo = geo.expand().reduce_ops()
+            # Only declare auxiliary geo terms if we can save
+            # operations.
             if geo.ops() > 0:
-#                debug("geo: " + str(geo))
-#                print "geo: " + str(geo)
                 # If the geo term is not in the dictionary append it.
-#                if not geo in geo_consts:
-                if not geo in geo_consts:
+                if geo not in geo_consts:
                     geo_consts[geo] = len(geo_consts)
 
                 # Substitute geometry expression.
                 geo = create_symbol(format_G(geo_consts[geo]), GEO)
 
-            # If we did not have any ip_declarations use geo, else create a
-            # product and append to the list of ip_values.
-#            if not ip_dec:
+            # If we did not have any ip_declarations use geo, else
+            # create a product and append to the list of ip_values.
             if not ip_dec or ip_dec.val == 0.0:
                 ip_dec = geo
             else:
@@ -287,23 +218,16 @@ def optimise_code(expr, ip_consts, geo_consts, trans_set):
         elif ip_vals:
             ip_expr = ip_vals.pop()
 
-        # If we can save operations by declaring it as a constant do so, if it
-        # is not in IP dictionary, add it and use new name.
-#        ip_expr = ip_expr.expand().reduce_ops()
-#        if ip_expr.ops() > 0:
+        # If we can save operations by declaring it as a constant do
+        # so, if it is not in IP dictionary, add it and use new name.
         if ip_expr.ops() > 0 and ip_expr.val != 0.0:
-#            if not ip_expr in ip_consts:
-            if not ip_expr in ip_consts:
+            if ip_expr not in ip_consts:
                 ip_consts[ip_expr] = len(ip_consts)
 
             # Substitute ip expression.
-#            ip_expr = create_symbol(format_G + format_ip + str(ip_consts[ip_expr]), IP)
             ip_expr = create_symbol(format_I(ip_consts[ip_expr]), IP)
 
         # Multiply by basis and append to basis vals.
-#        prod = create_product([basis, ip_expr])
-#        if prod.expand().val != 0.0:
-#            basis_vals.append(prod)
         basis_vals.append(create_product([basis, ip_expr]))
 
     # Return (possible) sum of basis values.
@@ -311,11 +235,13 @@ def optimise_code(expr, ip_consts, geo_consts, trans_set):
         return create_sum(basis_vals)
     elif basis_vals:
         return basis_vals[0]
+
     # Where did the values go?
     error("Values disappeared.")
 
+
 from .floatvalue import FloatValue
-from .symbol     import Symbol
-from .product    import Product
-from .sumobj     import Sum
-from .fraction   import Fraction
+from .symbol import Symbol
+from .product import Product
+from .sumobj import Sum
+from .fraction import Fraction
diff --git a/ffc/quadrature/tabulate_basis.py b/ffc/quadrature/tabulate_basis.py
index 679e218..7c02e79 100644
--- a/ffc/quadrature/tabulate_basis.py
+++ b/ffc/quadrature/tabulate_basis.py
@@ -1,4 +1,4 @@
-"Quadrature representation class for UFL"
+# -*- coding: utf-8 -*-
 
 # Copyright (C) 2009-2014 Kristian B. Oelgaard
 #
@@ -18,24 +18,27 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Anders Logg, 2009, 2015
-# Modified by Martin Alnaes, 2013-2014
+# Modified by Martin Sandve Alnæs, 2013-2014
 
-import numpy, itertools
+"Quadrature representation class."
+
+import numpy
+import itertools
 
 # UFL modules
-import ufl
-from ufl.cell import Cell, num_cell_entities
+from ufl.cell import num_cell_entities
 from ufl.classes import ReferenceGrad, Grad, CellAvg, FacetAvg
 from ufl.algorithms import extract_unique_elements, extract_type, extract_elements
 from ufl import custom_integral_types
 
 # FFC modules
-from ffc.log import ffc_assert, info, error, warning
-from ffc.utils import product
+from ffc.log import error
+from ffc.utils import product, insert_nested_dict
 from ffc.fiatinterface import create_element
 from ffc.fiatinterface import map_facet_points, reference_cell_vertices
-from ffc.quadrature_schemes import create_quadrature
 from ffc.representationutils import create_quadrature_points_and_weights
+from ffc.representationutils import integral_type_to_entity_dim
+
 
 def _find_element_derivatives(expr, elements, element_replace_map):
     "Find the highest derivatives of given elements in expression."
@@ -43,49 +46,33 @@ def _find_element_derivatives(expr, elements, element_replace_map):
     #       derivative of an element, but it works!
 
     # Initialise dictionary of elements and the number of derivatives.
-    # (Note that elements are already mapped through the element_replace_map)
+    # (Note that elements are already mapped through the
+    # element_replace_map)
     num_derivatives = dict((e, 0) for e in elements)
 
     # Extract the derivatives from the integral.
-    derivatives = set(extract_type(expr, Grad)) | set(extract_type(expr, ReferenceGrad))
+    derivatives = set(extract_type(expr, Grad)) | set(extract_type(expr,
+                                                                   ReferenceGrad))
 
     # Loop derivatives and extract multiple derivatives.
     for d in list(derivatives):
-        # After UFL has evaluated derivatives, only one element
-        # can be found inside any single Grad expression
+        # After UFL has evaluated derivatives, only one element can be
+        # found inside any single Grad expression
         elem, = extract_elements(d.ufl_operands[0])
         elem = element_replace_map[elem]
-        # Set the number of derivatives to the highest value encountered so far.
-        num_derivatives[elem] = max(num_derivatives[elem], len(extract_type(d, Grad)), len(extract_type(d, ReferenceGrad)))
+        # Set the number of derivatives to the highest value
+        # encountered so far.
+        num_derivatives[elem] = max(num_derivatives[elem],
+                                    len(extract_type(d, Grad)),
+                                    len(extract_type(d, ReferenceGrad)))
     return num_derivatives
 
-def domain_to_entity_dim(integral_type, tdim):
-    if integral_type == "cell":
-        entity_dim = tdim
-    elif (integral_type == "exterior_facet" or integral_type == "interior_facet"):
-        entity_dim = tdim - 1
-    elif integral_type == "vertex":
-        entity_dim = 0
-    elif integral_type in custom_integral_types:
-        entity_dim = tdim
-    else:
-        error("Unknown integral_type: %s" % integral_type)
-    return entity_dim
-
-def _map_entity_points(cellname, tdim, points, entity_dim, entity):
-    # Not sure if this is useful anywhere else than in _tabulate_psi_table!
-    if entity_dim == tdim:
-        return points
-    elif entity_dim == tdim-1:
-        return map_facet_points(points, entity)
-    elif entity_dim == 0:
-        return (reference_cell_vertices(cellname)[entity],)
 
 def _tabulate_empty_psi_table(tdim, deriv_order, element):
-    "Tabulate psi table when there are no points"
+    "Tabulate psi table when there are no points (custom integrals)."
 
     # All combinations of partial derivatives up to given order
-    gdim = tdim # hack, consider passing gdim variable here
+    gdim = tdim  # hack, consider passing gdim variable here
     derivs = [d for d in itertools.product(*(gdim*[list(range(0, deriv_order + 1))]))]
     derivs = [d for d in derivs if sum(d) <= deriv_order]
 
@@ -99,54 +86,45 @@ def _tabulate_empty_psi_table(tdim, deriv_order, element):
             value_size = product(value_shape)
             table[d] = [[[] for c in range(value_size)]]
 
-    return {None: table}
+    # Let entity be 0 even for non-cells, this is for
+    # custom integrals where we don't need tables to
+    # contain multiple entitites
+    entity = 0
+    return {entity: table}
 
-def _tabulate_psi_table(integral_type, cellname, tdim, element, deriv_order, points):
-    "Tabulate psi table for different integral types."
-    # MSA: I attempted to generalize this function, could this way of
-    # handling domain types generically extend to other parts of the code?
 
+def _map_entity_points(cellname, tdim, points, entity_dim, entity):
+    # Not sure if this is useful anywhere else than in _tabulate_psi_table!
+    if entity_dim == tdim:
+        assert entity == 0
+        return points
+    elif entity_dim == tdim-1:
+        return map_facet_points(points, entity)
+    elif entity_dim == 0:
+        return (reference_cell_vertices(cellname)[entity],)
+
+
+def _tabulate_psi_table(integral_type, cellname, tdim,
+                        element, deriv_order, points):
+    "Tabulate psi table for different integral types."
     # Handle case when list of points is empty
     if points is None:
         return _tabulate_empty_psi_table(tdim, deriv_order, element)
+
     # Otherwise, call FIAT to tabulate
-    entity_dim = domain_to_entity_dim(integral_type, tdim)
+    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
     num_entities = num_cell_entities[cellname][entity_dim]
     psi_table = {}
     for entity in range(num_entities):
         entity_points = _map_entity_points(cellname, tdim, points, entity_dim, entity)
-        # TODO: Use 0 as key for cell and we may be able to generalize other places:
-        key = None if integral_type == "cell" else entity
-        psi_table[key] = element.tabulate(deriv_order, entity_points)
-
+        psi_table[entity] = element.tabulate(deriv_order, entity_points)
     return psi_table
 
-def _tabulate_entities(integral_type, cellname, tdim):
-    "Tabulate psi table for different integral types."
-    # MSA: I attempted to generalize this function, could this way of
-    # handling domain types generically extend to other parts of the code?
-    entity_dim = domain_to_entity_dim(integral_type, tdim)
-    num_entities = num_cell_entities[cellname][entity_dim]
-    entities = set()
-    for entity in range(num_entities):
-        # TODO: Use 0 as key for cell and we may be able to generalize other places:
-        key = None if integral_type == "cell" else entity
-        entities.add(key)
-    return entities
-
-def insert_nested_dict(root, keys, value):
-    for k in keys[:-1]:
-        d = root.get(k)
-        if d is None:
-            d = {}
-            root[k] = d
-        root = d
-    root[keys[-1]] = value
-
-
-# MSA: This function is in serious need for some refactoring and splitting up.
-#      Or perhaps I should just add a new implementation for uflacs,
-#      but I'd rather not have two versions to maintain.
+
+# MSA: This function is in serious need for some refactoring and
+#      splitting up.  Or perhaps I should just add a new
+#      implementation for uflacs, but I'd rather not have two versions
+#      to maintain.
 def tabulate_basis(sorted_integrals, form_data, itg_data):
     "Tabulate the basisfunctions and derivatives."
 
@@ -158,40 +136,44 @@ def tabulate_basis(sorted_integrals, form_data, itg_data):
     quadrature_rules = {}
     psi_tables = {}
     integrals = {}
-    avg_elements = { "cell": [], "facet": [] }
+    avg_elements = {"cell": [], "facet": []}
 
     # Get some useful variables in short form
     integral_type = itg_data.integral_type
     cell = itg_data.domain.ufl_cell()
     cellname = cell.cellname()
     tdim = itg_data.domain.topological_dimension()
+    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
+    num_entities = num_cell_entities[cellname][entity_dim]
 
     # Create canonical ordering of quadrature rules
     rules = sorted(sorted_integrals.keys())
 
     # Loop the quadrature points and tabulate the basis values.
-    for degree, scheme in rules:
+    for rule in rules:
+        scheme, degree = rule
 
         # --------- Creating quadrature rule
         # Make quadrature rule and get points and weights.
-        (points, weights) = create_quadrature_points_and_weights(integral_type, cell, degree, scheme)
+        (points, weights) = create_quadrature_points_and_weights(integral_type,
+                                                                 cell, degree,
+                                                                 scheme)
 
         # The TOTAL number of weights/points
-        len_weights = None if weights is None else len(weights)
+        num_points = None if weights is None else len(weights)
 
         # Add points and rules to dictionary
-        ffc_assert(len_weights not in quadrature_rules,
-                   "This number of points is already present in the weight table: " + repr(quadrature_rules))
-        quadrature_rules[len_weights] = (weights, points)
-
+        if num_points in quadrature_rules:
+            error("This number of points is already present in the weight table: " + repr(quadrature_rules))
+        quadrature_rules[num_points] = (weights, points)
 
         # --------- Store integral
-        # Add the integral with the number of points as a key to the return integrals.
-        integral = sorted_integrals[(degree, scheme)]
-        ffc_assert(len_weights not in integrals, \
-                   "This number of points is already present in the integrals: " + repr(integrals))
-        integrals[len_weights] = integral
-
+        # Add the integral with the number of points as a key to the
+        # return integrals.
+        integral = sorted_integrals[rule]
+        if num_points in integrals:
+            error("This number of points is already present in the integrals: " + repr(integrals))
+        integrals[num_points] = integral
 
         # --------- Analyse UFL elements in integral
 
@@ -200,17 +182,18 @@ def tabulate_basis(sorted_integrals, form_data, itg_data):
                         for e in extract_unique_elements(integral)]
 
         # Insert elements for x and J
-        domain = integral.ufl_domain() # FIXME: For all domains to be sure? Better to rewrite though.
+        domain = integral.ufl_domain()  # FIXME: For all domains to be sure? Better to rewrite though.
         x_element = domain.ufl_coordinate_element()
         if x_element not in ufl_elements:
             if integral_type in custom_integral_types:
                 # FIXME: Not yet implemented, in progress
-                #warning("Vector elements not yet supported in custom integrals so element for coordinate function x will not be generated.")
+                # warning("Vector elements not yet supported in custom integrals so element for coordinate function x will not be generated.")
                 pass
             else:
                 ufl_elements.append(x_element)
 
-        # Find all CellAvg and FacetAvg in integrals and extract elements
+        # Find all CellAvg and FacetAvg in integrals and extract
+        # elements
         for avg, AvgType in (("cell", CellAvg), ("facet", FacetAvg)):
             expressions = extract_type(integral, AvgType)
             avg_elements[avg] = [form_data.element_replace_map[e]
@@ -218,33 +201,38 @@ def tabulate_basis(sorted_integrals, form_data, itg_data):
                                  for e in extract_unique_elements(expr)]
 
         # Find the highest number of derivatives needed for each element
-        num_derivatives = _find_element_derivatives(integral.integrand(), ufl_elements,
+        num_derivatives = _find_element_derivatives(integral.integrand(),
+                                                    ufl_elements,
                                                     form_data.element_replace_map)
         # Need at least 1 for the Jacobian
-        num_derivatives[x_element] = max(num_derivatives.get(x_element,0), 1)
-
+        num_derivatives[x_element] = max(num_derivatives.get(x_element, 0), 1)
 
-        # --------- Evaluate FIAT elements in quadrature points and store in tables
+        # --------- Evaluate FIAT elements in quadrature points and
+        # --------- store in tables
 
-        # Add the number of points to the psi tables dictionary.
-        ffc_assert(len_weights not in psi_tables, \
-                   "This number of points is already present in the psi table: " + repr(psi_tables))
-        psi_tables[len_weights] = {}
+        # Add the number of points to the psi tables dictionary
+        if num_points in psi_tables:
+            error("This number of points is already present in the psi table: " + repr(psi_tables))
+        psi_tables[num_points] = {}
 
         # Loop FIAT elements and tabulate basis as usual.
         for ufl_element in ufl_elements:
             fiat_element = create_element(ufl_element)
 
-            # Tabulate table of basis functions and derivatives in points
-            psi_table = _tabulate_psi_table(integral_type, cellname, tdim, fiat_element,
-                                        num_derivatives[ufl_element], points)
-
-            # Insert table into dictionary based on UFL elements. (None=not averaged)
-            psi_tables[len_weights][ufl_element] = { None: psi_table }
+            # Tabulate table of basis functions and derivatives in
+            # points
+            psi_table = _tabulate_psi_table(integral_type, cellname, tdim,
+                                            fiat_element,
+                                            num_derivatives[ufl_element],
+                                            points)
 
+            # Insert table into dictionary based on UFL elements
+            # (None=not averaged)
+            avg = None
+            psi_tables[num_points][ufl_element] = { avg: psi_table }
 
     # Loop over elements found in CellAvg and tabulate basis averages
-    len_weights = 1
+    num_points = 1
     for avg in ("cell", "facet"):
         # Doesn't matter if it's exterior or interior
         if avg == "cell":
@@ -259,38 +247,44 @@ def tabulate_basis(sorted_integrals, form_data, itg_data):
             (points, weights) = create_quadrature_points_and_weights(avg_integral_type, cell, element.degree(), "default")
             wsum = sum(weights)
 
-            # Tabulate table of basis functions and derivatives in points
-            entity_psi_tables = _tabulate_psi_table(avg_integral_type, cellname, tdim,
-                                                fiat_element, 0, points)
+            # Tabulate table of basis functions and derivatives in
+            # points
+            entity_psi_tables = _tabulate_psi_table(avg_integral_type,
+                                                    cellname, tdim,
+                                                    fiat_element, 0, points)
             rank = len(element.value_shape())
 
-            # Hack, duplicating table with per-cell values for each facet in the case of cell_avg(f) in a facet integral
-            actual_entities = _tabulate_entities(integral_type, cellname, tdim)
-            if len(actual_entities) > len(entity_psi_tables):
+            # Hack, duplicating table with per-cell values for each
+            # facet in the case of cell_avg(f) in a facet integral
+            if num_entities > len(entity_psi_tables):
                 assert len(entity_psi_tables) == 1
                 assert avg_integral_type == "cell"
                 assert "facet" in integral_type
                 v, = sorted(entity_psi_tables.values())
-                entity_psi_tables = dict((e, v) for e in actual_entities)
+                entity_psi_tables = dict((e, v) for e in range(num_entities))
 
             for entity, deriv_table in sorted(entity_psi_tables.items()):
-                deriv, = sorted(deriv_table.keys()) # Not expecting derivatives of averages
+                deriv, = sorted(deriv_table.keys())  # Not expecting derivatives of averages
                 psi_table = deriv_table[deriv]
 
                 if rank:
                     # Compute numeric integral
                     num_dofs, num_components, num_points = psi_table.shape
-                    ffc_assert(num_points == len(weights), "Weights and table shape does not match.")
-                    avg_psi_table = numpy.asarray([[[numpy.dot(psi_table[j,k,:], weights) / wsum]
-                                                   for k in range(num_components)]
+                    if num_points != len(weights):
+                        error("Weights and table shape does not match.")
+                    avg_psi_table = numpy.asarray([[[numpy.dot(psi_table[j, k, :], weights) / wsum]
+                                                    for k in range(num_components)]
                                                    for j in range(num_dofs)])
                 else:
                     # Compute numeric integral
                     num_dofs, num_points = psi_table.shape
-                    ffc_assert(num_points == len(weights), "Weights and table shape does not match.")
-                    avg_psi_table = numpy.asarray([[numpy.dot(psi_table[j,:], weights) / wsum] for j in range(num_dofs)])
-
-                # Insert table into dictionary based on UFL elements.
-                insert_nested_dict(psi_tables, (len_weights, element, avg, entity, deriv), avg_psi_table)
+                    if num_points != len(weights):
+                        error("Weights and table shape does not match.")
+                    avg_psi_table = numpy.asarray([[numpy.dot(psi_table[j, :],
+                                                              weights) / wsum] for j in range(num_dofs)])
+
+                # Insert table into dictionary based on UFL elements
+                insert_nested_dict(psi_tables, (num_points, element, avg,
+                                                entity, deriv), avg_psi_table)
 
     return (integrals, psi_tables, quadrature_rules)
diff --git a/ffc/quadrature_schemes.py b/ffc/quadrature_schemes.py
deleted file mode 100644
index 1d227a1..0000000
--- a/ffc/quadrature_schemes.py
+++ /dev/null
@@ -1,313 +0,0 @@
-"""Quadrature schemes on cells
-
-This module generates quadrature schemes on reference cells that integrate
-exactly a polynomial of a given degree using a specified scheme. The
-UFC definition of a reference cell is used.
-
-Scheme options are:
-
-  scheme="default"
-
-  scheme="canonical" (collapsed Gauss scheme supplied by FIAT)
-
-Background on the schemes:
-
-  Keast rules for tetrahedra:
-    Keast, P. Moderate-degree tetrahedral quadrature formulas, Computer
-    Methods in Applied Mechanics and Engineering 55(3):339-348, 1986.
-    http://dx.doi.org/10.1016/0045-7825(86)90059-9
-"""
-
-# Copyright (C) 2011 Garth N. Wells
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2011-04-19
-# Last changed: 2011-04-19
-
-# NumPy
-from numpy import array, arange, float64
-
-# UFL
-import ufl
-
-# FFC modules
-from ffc.log import debug, error
-from ffc.fiatinterface import reference_cell
-from ffc.fiatinterface import create_quadrature as fiat_create_quadrature
-
-# Dictionary mapping from cellname to dimension
-from ufl.cell import cellname2dim
-
-def create_quadrature(shape, degree, scheme="default"):
-    """
-    Generate quadrature rule (points, weights) for given shape
-    that will integrate an polynomial of order 'degree' exactly.
-    """
-
-    # FIXME: KBO: Can this be handled more elegantly?
-    # Handle point case
-    if isinstance(shape, int) and shape == 0 or cellname2dim[shape] == 0:
-        return ([()], array([1.0,]))
-
-    if scheme == "default":
-        if shape == "tetrahedron":
-            return _tetrahedron_scheme(degree)
-        elif shape == "triangle":
-            return _triangle_scheme(degree)
-        else:
-            return _fiat_scheme(shape, degree)
-    elif scheme == "vertex":
-        # The vertex scheme, i.e., averaging the function value in the vertices
-        # and multiplying with the simplex volume, is only of order 1 and
-        # inferior to other generic schemes in terms of error reduction.
-        # Equation systems generated with the vertex scheme have some
-        # properties that other schemes lack, e.g., the mass matrix is
-        # a simple diagonal matrix. This may be prescribed in certain cases.
-        #
-        if degree > 1:
-            from warnings import warn
-            warn(("Explicitly selected vertex quadrature (degree 1), "
-                 +"but requested degree is %d.") % degree)
-        if shape == "tetrahedron":
-            return (array([ [0.0, 0.0, 0.0],
-                            [1.0, 0.0, 0.0],
-                            [0.0, 1.0, 0.0],
-                            [0.0, 0.0, 1.0] ]),
-                    array([1.0/24.0, 1.0/24.0, 1.0/24.0, 1.0/24.0])
-                    )
-        elif shape == "triangle":
-            return (array([ [0.0, 0.0],
-                            [1.0, 0.0],
-                            [0.0, 1.0] ]),
-                    array([1.0/6.0, 1.0/6.0, 1.0/6.0])
-                    )
-        else:
-            # Trapezoidal rule.
-            return (array([ [0.0, 0.0],
-                            [0.0, 1.0] ]),
-                    array([1.0/2.0, 1.0/2.0])
-                    )
-    elif scheme == "canonical":
-        return _fiat_scheme(shape, degree)
-    else:
-        error("Unknown quadrature scheme: %s." % scheme)
-
-def _fiat_scheme(shape, degree):
-    """Get quadrature scheme from FIAT interface"""
-
-    # Number of points per axis for exact integration
-    num_points_per_axis = (degree + 1 + 1) // 2
-
-    # Create and return FIAT quadrature rulet
-    return fiat_create_quadrature(shape, num_points_per_axis)
-
-
-def _triangle_scheme(degree):
-    """Return a quadrature scheme on a triangle of specified order. Falls
-    back on canonical rule for higher orders."""
-
-    if degree == 0 or degree == 1:
-        # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1
-        x = array([ [1.0/3.0, 1.0/3.0] ])
-        w = array([0.5])
-    elif degree == 2:
-        # Scheme from Strang and Fix, 3 points, degree of precision 2
-        x = array([ [1.0/6.0, 1.0/6.0],
-                          [1.0/6.0, 2.0/3.0],
-                          [2.0/3.0, 1.0/6.0] ])
-        w = arange(3, dtype=float64)
-        w[:] = 1.0/6.0
-    elif degree == 3:
-        # Scheme from Strang and Fix, 6 points, degree of precision 3
-        x = array([ [0.659027622374092, 0.231933368553031],
-                    [0.659027622374092, 0.109039009072877],
-                    [0.231933368553031, 0.659027622374092],
-                    [0.231933368553031, 0.109039009072877],
-                    [0.109039009072877, 0.659027622374092],
-                    [0.109039009072877, 0.231933368553031] ])
-        w = arange(6, dtype=float64)
-        w[:] = 1.0/12.0
-    elif degree == 4:
-        # Scheme from Strang and Fix, 6 points, degree of precision 4
-        x = array([ [0.816847572980459, 0.091576213509771],
-                    [0.091576213509771, 0.816847572980459],
-                    [0.091576213509771, 0.091576213509771],
-                    [0.108103018168070, 0.445948490915965],
-                    [0.445948490915965, 0.108103018168070],
-                    [0.445948490915965, 0.445948490915965] ])
-        w = arange(6, dtype=float64)
-        w[0:3] = 0.109951743655322
-        w[3:6] = 0.223381589678011
-        w = w/2.0
-    elif degree == 5:
-        # Scheme from Strang and Fix, 7 points, degree of precision 5
-        x = array([ [0.33333333333333333, 0.33333333333333333],
-                    [0.79742698535308720, 0.10128650732345633],
-                    [0.10128650732345633, 0.79742698535308720],
-                    [0.10128650732345633, 0.10128650732345633],
-                    [0.05971587178976981, 0.47014206410511505],
-                    [0.47014206410511505, 0.05971587178976981],
-                    [0.47014206410511505, 0.47014206410511505] ])
-        w = arange(7, dtype=float64)
-        w[0]   = 0.22500000000000000
-        w[1:4] = 0.12593918054482717
-        w[4:7] = 0.13239415278850616
-        w = w/2.0
-    elif degree == 6:
-        # Scheme from Strang and Fix, 12 points, degree of precision 6
-        x = array([ [0.873821971016996, 0.063089014491502],
-                    [0.063089014491502, 0.873821971016996],
-                    [0.063089014491502, 0.063089014491502],
-                    [0.501426509658179, 0.249286745170910],
-                    [0.249286745170910, 0.501426509658179],
-                    [0.249286745170910, 0.249286745170910],
-                    [0.636502499121399, 0.310352451033785],
-                    [0.636502499121399, 0.053145049844816],
-                    [0.310352451033785, 0.636502499121399],
-                    [0.310352451033785, 0.053145049844816],
-                    [0.053145049844816, 0.636502499121399],
-                    [0.053145049844816, 0.310352451033785] ])
-        w = arange(12, dtype=float64)
-        w[0:3]  = 0.050844906370207
-        w[3:6]  = 0.116786275726379
-        w[6:12] = 0.082851075618374
-        w = w/2.0
-    else:
-        # Get canonical scheme
-        x, w = _fiat_scheme("triangle", degree)
-
-    # Return scheme
-    return x, w
-
-
-def _tetrahedron_scheme(degree):
-    """Return a quadrature scheme on a tetrahedron of specified
-    degree. Falls back on canonical rule for higher orders"""
-
-    if degree == 0 or degree == 1:
-        # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1
-        x = array([ [1.0/4.0, 1.0/4.0, 1.0/4.0] ])
-        w = array([1.0/6.0])
-    elif degree == 2:
-        # Scheme from Zienkiewicz and Taylor, 4 points, degree of precision 2
-        a, b = 0.585410196624969, 0.138196601125011
-        x = array([ [a, b, b],
-                    [b, a, b],
-                    [b, b, a],
-                    [b, b, b] ])
-        w = arange(4, dtype=float64)
-        w[:] = 1.0/24.0
-    elif degree == 3:
-        # Scheme from Zienkiewicz and Taylor, 5 points, degree of precision 3
-        # Note: this scheme has a negative weight
-        x = array([ [0.2500000000000000, 0.2500000000000000, 0.2500000000000000],
-                    [0.5000000000000000, 0.1666666666666666, 0.1666666666666666],
-                    [0.1666666666666666, 0.5000000000000000, 0.1666666666666666],
-                    [0.1666666666666666, 0.1666666666666666, 0.5000000000000000],
-                    [0.1666666666666666, 0.1666666666666666, 0.1666666666666666] ])
-        w = arange(5, dtype=float64)
-        w[0]   = -0.8
-        w[1:5] =  0.45
-        w = w/6.0
-    elif degree == 4:
-        # Keast rule, 14 points, degree of precision 4
-        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
-        # (KEAST5)
-        x = array([ [0.0000000000000000, 0.5000000000000000, 0.5000000000000000],
-                    [0.5000000000000000, 0.0000000000000000, 0.5000000000000000],
-                    [0.5000000000000000, 0.5000000000000000, 0.0000000000000000],
-                    [0.5000000000000000, 0.0000000000000000, 0.0000000000000000],
-                    [0.0000000000000000, 0.5000000000000000, 0.0000000000000000],
-                    [0.0000000000000000, 0.0000000000000000, 0.5000000000000000],
-                    [0.6984197043243866, 0.1005267652252045, 0.1005267652252045],
-                    [0.1005267652252045, 0.1005267652252045, 0.1005267652252045],
-                    [0.1005267652252045, 0.1005267652252045, 0.6984197043243866],
-                    [0.1005267652252045, 0.6984197043243866, 0.1005267652252045],
-                    [0.0568813795204234, 0.3143728734931922, 0.3143728734931922],
-                    [0.3143728734931922, 0.3143728734931922, 0.3143728734931922],
-                    [0.3143728734931922, 0.3143728734931922, 0.0568813795204234],
-                    [0.3143728734931922, 0.0568813795204234, 0.3143728734931922] ])
-        w = arange(14, dtype=float64)
-        w[0:6]   = 0.0190476190476190
-        w[6:10]  = 0.0885898247429807
-        w[10:14] = 0.1328387466855907
-        w = w/6.0
-    elif degree == 5:
-        # Keast rule, 15 points, degree of precision 5
-        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
-        # (KEAST6)
-        x = array([ [0.2500000000000000, 0.2500000000000000, 0.2500000000000000],
-                    [0.0000000000000000, 0.3333333333333333, 0.3333333333333333],
-                    [0.3333333333333333, 0.3333333333333333, 0.3333333333333333],
-                    [0.3333333333333333, 0.3333333333333333, 0.0000000000000000],
-                    [0.3333333333333333, 0.0000000000000000, 0.3333333333333333],
-                    [0.7272727272727273, 0.0909090909090909, 0.0909090909090909],
-                    [0.0909090909090909, 0.0909090909090909, 0.0909090909090909],
-                    [0.0909090909090909, 0.0909090909090909, 0.7272727272727273],
-                    [0.0909090909090909, 0.7272727272727273, 0.0909090909090909],
-                    [0.4334498464263357, 0.0665501535736643, 0.0665501535736643],
-                    [0.0665501535736643, 0.4334498464263357, 0.0665501535736643],
-                    [0.0665501535736643, 0.0665501535736643, 0.4334498464263357],
-                    [0.0665501535736643, 0.4334498464263357, 0.4334498464263357],
-                    [0.4334498464263357, 0.0665501535736643, 0.4334498464263357],
-                    [0.4334498464263357, 0.4334498464263357, 0.0665501535736643] ])
-        w = arange(15, dtype=float64)
-        w[0]    = 0.1817020685825351
-        w[1:5]  = 0.0361607142857143
-        w[5:9]  = 0.0698714945161738
-        w[9:15] = 0.0656948493683187
-        w = w/6.0
-    elif degree == 6:
-        # Keast rule, 24 points, degree of precision 6
-        # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
-        # (KEAST7)
-        x = array([ [0.3561913862225449, 0.2146028712591517, 0.2146028712591517],
-                    [0.2146028712591517, 0.2146028712591517, 0.2146028712591517],
-                    [0.2146028712591517, 0.2146028712591517, 0.3561913862225449],
-                    [0.2146028712591517, 0.3561913862225449, 0.2146028712591517],
-                    [0.8779781243961660, 0.0406739585346113, 0.0406739585346113],
-                    [0.0406739585346113, 0.0406739585346113, 0.0406739585346113],
-                    [0.0406739585346113, 0.0406739585346113, 0.8779781243961660],
-                    [0.0406739585346113, 0.8779781243961660, 0.0406739585346113],
-                    [0.0329863295731731, 0.3223378901422757, 0.3223378901422757],
-                    [0.3223378901422757, 0.3223378901422757, 0.3223378901422757],
-                    [0.3223378901422757, 0.3223378901422757, 0.0329863295731731],
-                    [0.3223378901422757, 0.0329863295731731, 0.3223378901422757],
-                    [0.2696723314583159, 0.0636610018750175, 0.0636610018750175],
-                    [0.0636610018750175, 0.2696723314583159, 0.0636610018750175],
-                    [0.0636610018750175, 0.0636610018750175, 0.2696723314583159],
-                    [0.6030056647916491, 0.0636610018750175, 0.0636610018750175],
-                    [0.0636610018750175, 0.6030056647916491, 0.0636610018750175],
-                    [0.0636610018750175, 0.0636610018750175, 0.6030056647916491],
-                    [0.0636610018750175, 0.2696723314583159, 0.6030056647916491],
-                    [0.2696723314583159, 0.6030056647916491, 0.0636610018750175],
-                    [0.6030056647916491, 0.0636610018750175, 0.2696723314583159],
-                    [0.0636610018750175, 0.6030056647916491, 0.2696723314583159],
-                    [0.2696723314583159, 0.0636610018750175, 0.6030056647916491],
-                    [0.6030056647916491, 0.2696723314583159, 0.0636610018750175] ])
-        w = arange(24, dtype=float64)
-        w[0:4]   = 0.0399227502581679
-        w[4:8]   = 0.0100772110553207
-        w[8:12]  = 0.0553571815436544
-        w[12:24] = 0.0482142857142857
-        w = w/6.0
-    else:
-        # Get canonical scheme
-        x, w =  _fiat_scheme("tetrahedron", degree)
-
-    # Return scheme
-    return x, w
diff --git a/ffc/quadratureelement.py b/ffc/quadratureelement.py
index 44e6063..092ce2f 100644
--- a/ffc/quadratureelement.py
+++ b/ffc/quadratureelement.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2007-2010 Kristian B. Oelgaard
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2007-2016 Kristian B. Oelgaard
 #
 # This file is part of FFC.
 #
@@ -16,9 +18,6 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Garth N. Wells 2006-2009
-#
-# First added:  2007-12-10
-# Last changed: 2010-01-30
 
 # Python modules.
 import numpy
@@ -33,7 +32,9 @@ from .log import error, info_red
 default_quadrature_degree = 1
 default_quadrature_scheme = "canonical"
 
+
 class QuadratureElement:
+
     """Write description of QuadratureElement"""
 
     def __init__(self, ufl_element):
@@ -85,7 +86,7 @@ class QuadratureElement:
 
     def mapping(self):
         "The mapping is not really affine, but it is easier to handle the code generation this way."
-        return ["affine"]*self.space_dimension()
+        return ["affine"] * self.space_dimension()
 
     def dual_basis(self):
         "Return list of PointEvaluations"
@@ -120,19 +121,20 @@ class QuadratureElement:
         # Return the identity matrix of size len(self._points) in a
         # suitable format for tensor and quadrature representations.
         values = numpy.eye(len(self._points))
-        return {(0,)*self._geometric_dimension: values}
+        return {(0,) * self._geometric_dimension: values}
+
 
 def _create_entity_dofs(fiat_cell, num_dofs):
     "This function is ripped from FIAT/discontinuous_lagrange.py"
     entity_dofs = {}
     top = fiat_cell.get_topology()
-    for dim in sorted( top ):
+    for dim in sorted(top):
         entity_dofs[dim] = {}
-        for entity in sorted( top[dim] ):
-            entity_dofs[dim][entity]=[]
+        for entity in sorted(top[dim]):
+            entity_dofs[dim][entity] = []
     entity_dofs[dim][0] = list(range(num_dofs))
     return entity_dofs
 
 # FFC modules to avoid circular import
 from ffc.fiatinterface import reference_cell
-from ffc.quadrature_schemes import create_quadrature
+from ffc.fiatinterface import create_quadrature
diff --git a/ffc/representation.py b/ffc/representation.py
index 34c0c35..037acf5 100644
--- a/ffc/representation.py
+++ b/ffc/representation.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """
 Compiler stage 2: Code representation
 -------------------------------------
@@ -12,7 +13,8 @@ function "foo", one should only need to use the data stored
 in the intermediate representation under the key "foo".
 """
 
-# Copyright (C) 2009-2015 Anders Logg
+# Copyright (C) 2009-2016 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes,
+# Kristian B. Oelgaard, and others
 #
 # This file is part of FFC.
 #
@@ -28,11 +30,6 @@ in the intermediate representation under the key "foo".
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Marie E. Rognes 2010
-# Modified by Kristian B. Oelgaard 2010
-# Modified by Martin Alnaes, 2013-2015
-# Modified by Lizao Li 2015
 
 # Python modules
 from itertools import chain
@@ -43,15 +40,16 @@ import ufl
 
 # FFC modules
 from ffc.utils import compute_permutations, product
-from ffc.log import info, error, begin, end, debug_ir, ffc_assert, warning
+from ffc.log import info, error, begin, end, debug_ir, warning
 from ffc.fiatinterface import create_element, reference_cell
 from ffc.mixedelement import MixedElement
 from ffc.enrichedelement import EnrichedElement, SpaceOfReals
-from ffc.fiatinterface import DiscontinuousLagrangeTrace
+from ffc.fiatinterface import HDivTrace
 from ffc.quadratureelement import QuadratureElement
 from ffc.cpp import set_float_formatting
 from ffc.cpp import make_classname, make_integral_classname
 
+
 # List of supported integral types
 ufc_integral_types = ("cell",
                       "exterior_facet",
@@ -62,23 +60,73 @@ ufc_integral_types = ("cell",
                       "interface",
                       "overlap")
 
+
 def pick_representation(representation):
     "Return one of the specialized code generation modules from a representation string."
     if representation == "quadrature":
-        from ffc import quadrature
-        r = quadrature
+        from ffc import quadrature as r
     elif representation == "tensor":
-        from ffc import tensor
-        r = tensor
+        from ffc import tensor as r
     elif representation == "uflacs":
-        from ffc import uflacsrepr
-        r = uflacsrepr
+        from ffc import uflacs as r
     else:
         error("Unknown representation: %s" % str(representation))
     return r
 
 
-def compute_ir(analysis, prefix, parameters):
+def make_finite_element_jit_classname(ufl_element, parameters):
+    from ffc.jitcompiler import compute_jit_prefix  # FIXME circular file dependency
+    kind, prefix = compute_jit_prefix(ufl_element, parameters)
+    return make_classname(prefix, "finite_element", "main")
+
+
+def make_dofmap_jit_classname(ufl_element, parameters):
+    from ffc.jitcompiler import compute_jit_prefix  # FIXME circular file dependency
+    kind, prefix = compute_jit_prefix(ufl_element, parameters)
+    return make_classname(prefix, "dofmap", "main")
+
+
+def make_coordinate_mapping_jit_classname(ufl_mesh, parameters):
+    from ffc.jitcompiler import compute_jit_prefix  # FIXME circular file dependency
+    kind, prefix = compute_jit_prefix(ufl_mesh, parameters, kind="coordinate_mapping")
+    return make_classname(prefix, "coordinate_mapping", "main")
+
+
+def make_all_element_classnames(prefix, elements, coordinate_elements,
+                                element_numbers, parameters, jit):
+    if jit:
+        # Make unique classnames to match separately jit-compiled
+        # module
+        classnames = {
+            "finite_element": {
+                e: make_finite_element_jit_classname(e, parameters)
+                for e in elements },
+            "dofmap": {
+                e: make_dofmap_jit_classname(e, parameters)
+                for e in elements },
+            "coordinate_mapping": {
+                e: make_coordinate_mapping_jit_classname(e, parameters)
+                for e in coordinate_elements },
+            }
+    else:
+        # Make unique classnames only within this module (using a
+        # shared prefix and element numbers that are only unique
+        # within this module)
+        classnames = {
+            "finite_element": {
+                e: make_classname(prefix, "finite_element", element_numbers[e])
+                for e in elements },
+            "dofmap": {
+                e: make_classname(prefix, "dofmap", element_numbers[e])
+                for e in elements },
+            "coordinate_mapping": {
+                e: make_classname(prefix, "coordinate_mapping", element_numbers[e])
+                for e in coordinate_elements },
+            }
+    return classnames
+
+
+def compute_ir(analysis, prefix, parameters, jit=False):
     "Compute intermediate representation."
 
     begin("Compiler stage 2: Computing intermediate representation")
@@ -90,38 +138,66 @@ def compute_ir(analysis, prefix, parameters):
     # Extract data from analysis
     form_datas, elements, element_numbers, coordinate_elements = analysis
 
+    # Construct classnames for all element objects and coordinate mappings
+    classnames = make_all_element_classnames(prefix, elements,
+                                             coordinate_elements,
+                                             element_numbers,
+                                             parameters, jit)
+
+    # Skip processing elements if jitting forms
+    # NB! it's important that this happens _after_ the element numbers and classnames
+    # above have been created.
+    if jit and form_datas:
+        # While we may get multiple forms during command line action,
+        # not so during jit
+        assert len(form_datas) == 1, "Expecting only one form data instance during jit."
+        # Drop some processing
+        elements = []
+        coordinate_elements = []
+    elif jit and coordinate_elements:
+        # While we may get multiple coordinate elements during command
+        # line action, or during form jit, not so during coordinate
+        # mapping jit
+        assert len(coordinate_elements) == 1, "Expecting only one form data instance during jit."
+        # Drop some processing
+        elements = []
+    elif jit and elements:
+        # Assuming a topological sorting of the elements,
+        # only process the last (main) element from here on
+        elements = [elements[-1]]
+
     # Compute representation of elements
     info("Computing representation of %d elements" % len(elements))
-    ir_elements = [_compute_element_ir(e, prefix, element_numbers)
+    ir_elements = [_compute_element_ir(e, element_numbers, classnames, jit)
                    for e in elements]
 
     # Compute representation of dofmaps
     info("Computing representation of %d dofmaps" % len(elements))
-    ir_dofmaps = [_compute_dofmap_ir(e, prefix, element_numbers)
+    ir_dofmaps = [_compute_dofmap_ir(e, element_numbers, classnames, jit)
                   for e in elements]
 
     # Compute representation of coordinate mappings
     info("Computing representation of %d coordinate mappings" % len(coordinate_elements))
-    ir_compute_coordinate_mappings = [_compute_coordinate_mapping_ir(e, prefix, element_numbers)
-                                      for e in coordinate_elements]
+    ir_coordinate_mappings = [_compute_coordinate_mapping_ir(e, element_numbers, classnames, jit)
+                              for e in coordinate_elements]
 
     # Compute and flatten representation of integrals
     info("Computing representation of integrals")
-    irs = [_compute_integral_ir(fd, i, prefix, element_numbers, parameters)
-           for (i, fd) in enumerate(form_datas)]
-    ir_integrals = [ir for ir in chain(*irs) if not ir is None]
+    irs = [_compute_integral_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit)
+           for (form_id, fd) in enumerate(form_datas)]
+    ir_integrals = [ir for ir in chain(*irs) if ir is not None]
 
     # Compute representation of forms
     info("Computing representation of forms")
-    ir_forms = [_compute_form_ir(fd, i, prefix, element_numbers)
-                for (i, fd) in enumerate(form_datas)]
+    ir_forms = [_compute_form_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit)
+                for (form_id, fd) in enumerate(form_datas)]
 
     end()
 
-    return ir_elements, ir_dofmaps, ir_compute_coordinate_mappings, ir_integrals, ir_forms
+    return ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms
 
 
-def _compute_element_ir(ufl_element, prefix, element_numbers):
+def _compute_element_ir(ufl_element, element_numbers, classnames, jit):
     "Compute intermediate representation of element."
 
     # Create FIAT element
@@ -131,7 +207,10 @@ def _compute_element_ir(ufl_element, prefix, element_numbers):
 
     # Store id
     ir = {"id": element_numbers[ufl_element]}
-    ir["classname"] = make_classname(prefix, "finite_element", element_numbers[ufl_element])
+    ir["classname"] = classnames["finite_element"][ufl_element]
+
+    # Remember jit status
+    ir["jit"] = jit
 
     # Compute data for each function
     ir["signature"] = repr(ufl_element)
@@ -152,15 +231,15 @@ def _compute_element_ir(ufl_element, prefix, element_numbers):
     ir["tabulate_dof_coordinates"] = _tabulate_dof_coordinates(ufl_element,
                                                                fiat_element)
     ir["num_sub_elements"] = ufl_element.num_sub_elements()
-    ir["create_sub_element"] = [make_classname(prefix, "finite_element", element_numbers[e])
+    ir["create_sub_element"] = [classnames["finite_element"][e]
                                 for e in ufl_element.sub_elements()]
 
-    #debug_ir(ir, "finite_element")
+    # debug_ir(ir, "finite_element")
 
     return ir
 
 
-def _compute_dofmap_ir(ufl_element, prefix, element_numbers):
+def _compute_dofmap_ir(ufl_element, element_numbers, classnames, jit=False):
     "Compute intermediate representation of dofmap."
 
     # Create FIAT element
@@ -170,11 +249,18 @@ def _compute_dofmap_ir(ufl_element, prefix, element_numbers):
 
     # Precompute repeatedly used items
     num_dofs_per_entity = _num_dofs_per_entity(fiat_element)
+    entity_dofs = fiat_element.entity_dofs()
     facet_dofs = _tabulate_facet_dofs(fiat_element, cell)
-
+    entity_closure_dofs, num_dofs_per_entity_closure = \
+        _tabulate_entity_closure_dofs(fiat_element, cell)
+    
+    
     # Store id
     ir = {"id": element_numbers[ufl_element]}
-    ir["classname"] = make_classname(prefix, "dofmap", element_numbers[ufl_element])
+    ir["classname"] = classnames["dofmap"][ufl_element]
+
+    # Remember jit status
+    ir["jit"] = jit
 
     # Compute data for each function
     ir["signature"] = "FFC dofmap for " + repr(ufl_element)
@@ -185,11 +271,13 @@ def _compute_dofmap_ir(ufl_element, prefix, element_numbers):
     ir["num_element_dofs"] = fiat_element.space_dimension()
     ir["num_facet_dofs"] = len(facet_dofs[0])
     ir["num_entity_dofs"] = num_dofs_per_entity
+    ir["num_entity_closure_dofs"] = num_dofs_per_entity_closure
     ir["tabulate_dofs"] = _tabulate_dofs(fiat_element, cell)
     ir["tabulate_facet_dofs"] = facet_dofs
-    ir["tabulate_entity_dofs"] = (fiat_element.entity_dofs(), num_dofs_per_entity)
+    ir["tabulate_entity_dofs"] = (entity_dofs, num_dofs_per_entity)
+    ir["tabulate_entity_closure_dofs"] = (entity_closure_dofs, entity_dofs, num_dofs_per_entity)
     ir["num_sub_dofmaps"] = ufl_element.num_sub_elements()
-    ir["create_sub_dofmap"] = [make_classname(prefix, "dofmap", element_numbers[e])
+    ir["create_sub_dofmap"] = [classnames["dofmap"][e]
                                for e in ufl_element.sub_elements()]
 
     return ir
@@ -197,15 +285,18 @@ def _compute_dofmap_ir(ufl_element, prefix, element_numbers):
 
 _midpoints = {
     "interval": (0.5,),
-    "triangle": (1.0/3.0, 1.0/3.0),
+    "triangle": (1.0 / 3.0, 1.0 / 3.0),
     "tetrahedron": (0.25, 0.25, 0.25),
     "quadrilateral": (0.5, 0.5),
     "hexahedron": (0.5, 0.5, 0.5),
-    }
+}
+
+
 def cell_midpoint(cell):
     # TODO: Is this defined somewhere more central where we can get it from?
     return _midpoints[cell.cellname()]
 
+
 def _tabulate_coordinate_mapping_basis(ufl_element):
     # TODO: Move this function to a table generation module?
 
@@ -220,7 +311,7 @@ def _tabulate_coordinate_mapping_basis(ufl_element):
     tables = {}
 
     # Get points
-    origo = (0.0,)*tdim
+    origo = (0.0,) * tdim
     midpoint = cell_midpoint(cell)
 
     # Tabulate basis
@@ -228,23 +319,25 @@ def _tabulate_coordinate_mapping_basis(ufl_element):
     tm = fiat_element.tabulate(1, [midpoint])
 
     # Get basis values at cell origo
-    tables["x0"] = t0[(0,)*tdim][:,0]
+    tables["x0"] = t0[(0,) * tdim][:, 0]
 
     # Get basis values at cell midpoint
-    tables["xm"] = tm[(0,)*tdim][:,0]
+    tables["xm"] = tm[(0,) * tdim][:, 0]
 
     # Single direction derivatives, e.g. [(1,0), (0,1)] in 2d
-    derivatives = [(0,)*i + (1,) + (0,)*(tdim-1-i) for i in range(tdim)]
+    derivatives = [(0,) * i + (1,) + (0,) * (tdim - 1 - i) for i in range(tdim)]
 
     # Get basis derivative values at cell origo
-    tables["J0"] = numpy.asarray([t0[d][:,0] for d in derivatives])
+    tables["J0"] = numpy.asarray([t0[d][:, 0] for d in derivatives])
 
     # Get basis derivative values at cell midpoint
-    tables["Jm"] = numpy.asarray([tm[d][:,0] for d in derivatives])
+    tables["Jm"] = numpy.asarray([tm[d][:, 0] for d in derivatives])
 
     return tables
 
-def _compute_coordinate_mapping_ir(ufl_coordinate_element, prefix, element_numbers):
+
+def _compute_coordinate_mapping_ir(ufl_coordinate_element, element_numbers,
+                                   classnames, jit=False):
     "Compute intermediate representation of coordinate mapping."
 
     cell = ufl_coordinate_element.cell()
@@ -257,7 +350,10 @@ def _compute_coordinate_mapping_ir(ufl_coordinate_element, prefix, element_numbe
 
     # Store id
     ir = {"id": element_numbers[ufl_coordinate_element]}
-    ir["classname"] = make_classname(prefix, "coordinate_mapping", element_numbers[ufl_coordinate_element])
+    ir["classname"] = classnames["coordinate_mapping"][ufl_coordinate_element]
+
+    # Remember jit status
+    ir["jit"] = jit
 
     # Compute data for each function
     ir["signature"] = "FFC coordinate_mapping from " + repr(ufl_coordinate_element)
@@ -265,15 +361,17 @@ def _compute_coordinate_mapping_ir(ufl_coordinate_element, prefix, element_numbe
     ir["topological_dimension"] = cell.topological_dimension()
     ir["geometric_dimension"] = ufl_coordinate_element.value_size()
 
-    ir["create_coordinate_finite_element"] = make_classname(prefix, "finite_element", element_numbers[ufl_coordinate_element])
-    ir["create_coordinate_dofmap"] = make_classname(prefix, "dofmap", element_numbers[ufl_coordinate_element])
+    ir["create_coordinate_finite_element"] = \
+        classnames["finite_element"][ufl_coordinate_element]
+    ir["create_coordinate_dofmap"] = \
+        classnames["dofmap"][ufl_coordinate_element]
 
-    ir["compute_physical_coordinates"] = None # currently unused, corresponds to function name
-    ir["compute_reference_coordinates"] = None # currently unused, corresponds to function name
-    ir["compute_jacobians"] = None # currently unused, corresponds to function name
-    ir["compute_jacobian_determinants"] = None # currently unused, corresponds to function name
-    ir["compute_jacobian_inverses"] = None # currently unused, corresponds to function name
-    ir["compute_geometry"] = None # currently unused, corresponds to function name
+    ir["compute_physical_coordinates"] = None  # currently unused, corresponds to function name
+    ir["compute_reference_coordinates"] = None  # currently unused, corresponds to function name
+    ir["compute_jacobians"] = None  # currently unused, corresponds to function name
+    ir["compute_jacobian_determinants"] = None  # currently unused, corresponds to function name
+    ir["compute_jacobian_inverses"] = None  # currently unused, corresponds to function name
+    ir["compute_geometry"] = None  # currently unused, corresponds to function name
 
     # NB! The entries below breaks the pattern of using ir keywords == code keywords,
     # which I personally don't find very useful anyway (martinal).
@@ -284,7 +382,10 @@ def _compute_coordinate_mapping_ir(ufl_coordinate_element, prefix, element_numbe
     ir["num_scalar_coordinate_element_dofs"] = tables["x0"].shape[0]
 
     # Get classnames for coordinate element and its scalar subelement:
-    ir["scalar_coordinate_finite_element_classname"] = make_classname(prefix, "finite_element", element_numbers[ufl_coordinate_element.sub_elements()[0]])
+    ir["coordinate_finite_element_classname"] = \
+        classnames["finite_element"][ufl_coordinate_element]
+    ir["scalar_coordinate_finite_element_classname"] = \
+        classnames["finite_element"][ufl_coordinate_element.sub_elements()[0]]
 
     return ir
 
@@ -321,9 +422,15 @@ def _needs_mesh_entities(fiat_element):
         return [d > 0 for d in num_dofs_per_entity]
 
 
-def _compute_integral_ir(form_data, form_id, prefix, element_numbers, parameters):
+def _compute_integral_ir(form_data, form_id, prefix, element_numbers, classnames,
+                         parameters, jit):
     "Compute intermediate represention for form integrals."
 
+    # For consistency, all jit objects now have classnames with postfix "main"
+    if jit:
+        assert form_id == 0
+        form_id = "main"
+
     irs = []
 
     # Iterate over integrals
@@ -337,15 +444,26 @@ def _compute_integral_ir(form_data, form_id, prefix, element_numbers, parameters
         # Compute representation
         ir = r.compute_integral_ir(itg_data,
                                    form_data,
-                                   form_id,
+                                   form_id,     # FIXME: Can we remove this?
                                    element_numbers,
+                                   classnames,
                                    parameters)
 
         # Build classname
-        ir["classname"] = make_integral_classname(prefix, itg_data.integral_type, form_id, itg_data.subdomain_id)
+        ir["classname"] = make_integral_classname(prefix, itg_data.integral_type,
+                                                  form_id, itg_data.subdomain_id)
+
+        ir["classnames"] = classnames  # FIXME XXX: Use this everywhere needed?
 
-        # Storing prefix here for reconstruction of classnames on code generation side
-        ir["prefix"] = prefix
+        # Storing prefix here for reconstruction of classnames on code
+        # generation side
+        ir["prefix"] = prefix  # FIXME: Drop this?
+
+        # Store metadata for later reference (eg. printing as comment)
+        # NOTE: We make a commitment not to modify it!
+        ir["integrals_metadata"] = itg_data.metadata
+        ir["integral_metadata"] = [integral.metadata()
+                                   for integral in itg_data.integrals]
 
         # Append representation
         irs.append(ir)
@@ -353,53 +471,78 @@ def _compute_integral_ir(form_data, form_id, prefix, element_numbers, parameters
     return irs
 
 
-def _compute_form_ir(form_data, form_id, prefix, element_numbers):
+def _compute_form_ir(form_data, form_id, prefix, element_numbers,
+                     classnames, parameters, jit=False):
     "Compute intermediate representation of form."
 
+    # For consistency, all jit objects now have classnames with postfix "main"
+    if jit:
+        assert form_id == 0
+        form_id = "main"
+
     # Store id
     ir = {"id": form_id}
 
-    # Storing prefix here for reconstruction of classnames on code generation side
+    # Storing prefix here for reconstruction of classnames on code
+    # generation side
     ir["prefix"] = prefix
 
+    # Remember jit status
+    ir["jit"] = jit
+
     # Compute common data
     ir["classname"] = make_classname(prefix, "form", form_id)
-    #ir["members"] = None # unused
-    #ir["constructor"] = None # unused
-    #ir["destructor"] = None # unused
+
+    # ir["members"] = None # unused
+    # ir["constructor"] = None # unused
+    # ir["destructor"] = None # unused
     ir["signature"] = form_data.original_form.signature()
 
     ir["rank"] = len(form_data.original_form.arguments())
     ir["num_coefficients"] = len(form_data.reduced_coefficients)
     ir["original_coefficient_position"] = form_data.original_coefficient_positions
 
-    # FIXME: First: pass classnames instead of element numbers to prepare for change to signature based classnames,
-    #               remember to use these classnames in codegeneration.py instead of calling make_classname.
-    # FIXME: Next: change element classnames to use unique signatures instead of small integer element numbers
-
-    # TODO: Remove these two and access through coordinate_mapping instead in dolfin, when that's in place
-    ir["create_coordinate_finite_element"] = [make_classname(prefix, "finite_element", element_numbers[e])
-                                              for e in form_data.coordinate_elements]
-    ir["create_coordinate_dofmap"] = [make_classname(prefix, "dofmap", element_numbers[e])
-                                      for e in form_data.coordinate_elements]
-
-    ir["create_coordinate_mapping"] = [make_classname(prefix, "coordinate_mapping", element_numbers[e])
-                                       for e in form_data.coordinate_elements]
-    ir["create_finite_element"] = [make_classname(prefix, "finite_element", element_numbers[e])
-                                   for e in form_data.argument_elements + form_data.coefficient_elements]
-    ir["create_dofmap"] = [make_classname(prefix, "dofmap", element_numbers[e])
-                           for e in form_data.argument_elements + form_data.coefficient_elements]
-
+    # TODO: Remove create_coordinate_{finite_element,dofmap} and
+    # access through coordinate_mapping instead in dolfin, when that's
+    # in place
+    ir["create_coordinate_finite_element"] = [
+        classnames["finite_element"][e]
+        for e in form_data.coordinate_elements
+        ]
+    ir["create_coordinate_dofmap"] = [
+        classnames["dofmap"][e]
+        for e in form_data.coordinate_elements
+        ]
+    ir["create_coordinate_mapping"] = [
+        classnames["coordinate_mapping"][e]
+        for e in form_data.coordinate_elements
+        ]
+    ir["create_finite_element"] = [
+        classnames["finite_element"][e]
+        for e in form_data.argument_elements + form_data.coefficient_elements
+        ]
+    ir["create_dofmap"] = [
+        classnames["dofmap"][e]
+        for e in form_data.argument_elements + form_data.coefficient_elements
+        ]
+
+    # Create integral ids and names using form prefix
+    # (integrals are always generated as part of form so don't get
+    # their own prefix)
     for integral_type in ufc_integral_types:
-        ir["max_%s_subdomain_id" % integral_type] = form_data.max_subdomain_ids.get(integral_type, 0)
-        ir["has_%s_integrals" % integral_type] = _has_foo_integrals(integral_type, form_data)
-        ir["create_%s_integral" % integral_type] = _create_foo_integral(integral_type, form_data)
-        ir["create_default_%s_integral" % integral_type] = _create_default_foo_integral(integral_type, form_data)
+        ir["max_%s_subdomain_id" % integral_type] = \
+            form_data.max_subdomain_ids.get(integral_type, 0)
+        ir["has_%s_integrals" % integral_type] = \
+            _has_foo_integrals(prefix, form_id, integral_type, form_data)
+        ir["create_%s_integral" % integral_type] = \
+            _create_foo_integral(prefix, form_id, integral_type, form_data)
+        ir["create_default_%s_integral" % integral_type] = \
+            _create_default_foo_integral(prefix, form_id, integral_type, form_data)
 
     return ir
 
 
-#--- Computation of intermediate representation for non-trivial functions ---
+# --- Computation of intermediate representation for non-trivial functions ---
 
 def _generate_reference_offsets(fiat_element, offset=0):
     """Generate offsets: i.e value offset for each basis function
@@ -409,7 +552,8 @@ def _generate_reference_offsets(fiat_element, offset=0):
         offsets = []
         for e in fiat_element.elements():
             offsets += _generate_reference_offsets(e, offset)
-            # NB! This is the fiat element and therefore value_shape means reference_value_shape
+            # NB! This is the fiat element and therefore value_shape
+            # means reference_value_shape
             offset += product(e.value_shape())
         return offsets
 
@@ -420,7 +564,7 @@ def _generate_reference_offsets(fiat_element, offset=0):
         return offsets
 
     else:
-        return [offset]*fiat_element.space_dimension()
+        return [offset] * fiat_element.space_dimension()
 
 
 def _generate_physical_offsets(ufl_element, offset=0):
@@ -445,13 +589,13 @@ def _generate_physical_offsets(ufl_element, offset=0):
 
     elif isinstance(ufl_element, ufl.EnrichedElement):
         offsets = []
-        for e in ufl_element._elements: # TODO: Avoid private member access
+        for e in ufl_element._elements:  # TODO: Avoid private member access
             offsets += _generate_physical_offsets(e, offset)
         return offsets
 
     elif isinstance(ufl_element, ufl.FiniteElement):
         fiat_element = create_element(ufl_element)
-        return [offset]*fiat_element.space_dimension()
+        return [offset] * fiat_element.space_dimension()
 
     else:
         raise NotImplementedError("This element combination is not implemented")
@@ -475,17 +619,19 @@ def _generate_offsets(ufl_element, reference_offset=0, physical_offset=0):
 
     elif isinstance(ufl_element, ufl.EnrichedElement):
         offsets = []
-        for e in ufl_element._elements: # TODO: Avoid private member access
+        for e in ufl_element._elements:  # TODO: Avoid private member access
             offsets += _generate_offsets(e, reference_offset, physical_offset)
         return offsets
 
     elif isinstance(ufl_element, ufl.FiniteElement):
         fiat_element = create_element(ufl_element)
-        return [(reference_offset, physical_offset)]*fiat_element.space_dimension()
+        return [(reference_offset, physical_offset)] * fiat_element.space_dimension()
 
     else:
-        # TODO: Support RestrictedElement, QuadratureElement, TensorProductElement, etc.!
-        #       and replace _generate_{physical|reference}_offsets with this function.
+        # TODO: Support RestrictedElement, QuadratureElement,
+        #       TensorProductElement, etc.!  and replace
+        #       _generate_{physical|reference}_offsets with this
+        #       function.
         raise NotImplementedError("This element combination is not implemented")
 
 
@@ -527,21 +673,22 @@ def _evaluate_basis(ufl_element, fiat_element):
         if (len(e.value_shape()) > 1) and (e.num_sub_elements() != 1):
             return "Function not supported/implemented for TensorElements."
 
-    # Handle QuadratureElement, not supported because the basis is only defined
-    # at the dof coordinates where the value is 1, so not very interesting.
+    # Handle QuadratureElement, not supported because the basis is
+    # only defined at the dof coordinates where the value is 1, so not
+    # very interesting.
     for e in elements:
         if isinstance(e, QuadratureElement):
             return "Function not supported/implemented for QuadratureElement."
-        if isinstance(e, DiscontinuousLagrangeTrace):
+        if isinstance(e, HDivTrace):
             return "Function not supported for Trace elements"
 
     # Initialise data with 'global' values.
     data = {"reference_value_size": ufl_element.reference_value_size(),
             "physical_value_size": ufl_element.value_size(),
-            "cellname" : cellname,
-            "topological_dimension" : cell.topological_dimension(),
-            "geometric_dimension" : cell.geometric_dimension(),
-            "space_dimension" : fiat_element.space_dimension(),
+            "cellname": cellname,
+            "topological_dimension": cell.topological_dimension(),
+            "geometric_dimension": cell.geometric_dimension(),
+            "space_dimension": fiat_element.space_dimension(),
             "needs_oriented": needs_oriented_jacobian(fiat_element),
             "max_degree": max([e.degree() for e in elements])
             }
@@ -555,17 +702,18 @@ def _evaluate_basis(ufl_element, fiat_element):
         num_expansion_members = e.get_num_members(e.degree())
         dmats = e.dmats()
 
-        # Extracted parts of dd below that are common for the element here.
-        # These dict entries are added to each dof_data dict for each dof,
-        # because that's what the code generation implementation expects.
-        # If the code generation needs this structure to be optimized in the
-        # future, we can store this data for each subelement instead of for each dof.
+        # Extracted parts of dd below that are common for the element
+        # here.  These dict entries are added to each dof_data dict
+        # for each dof, because that's what the code generation
+        # implementation expects.  If the code generation needs this
+        # structure to be optimized in the future, we can store this
+        # data for each subelement instead of for each dof.
         subelement_data = {
-            "embedded_degree" : e.degree(),
-            "num_components" : num_components,
-            "dmats" : dmats,
+            "embedded_degree": e.degree(),
+            "num_components": num_components,
+            "dmats": dmats,
             "num_expansion_members": num_expansion_members,
-            }
+        }
         value_rank = len(e.value_shape())
 
         for i in range(e.space_dimension()):
@@ -586,11 +734,11 @@ def _evaluate_basis(ufl_element, fiat_element):
                 error("Unknown situation with num_components > 1")
 
             dof_data = {
-                "coeffs" : coefficients,
-                "mapping" : mappings[dof],
-                "physical_offset" : physical_offsets[dof],
-                "reference_offset" : reference_offsets[dof],
-                }
+                "coeffs": coefficients,
+                "mapping": mappings[dof],
+                "physical_offset": physical_offsets[dof],
+                "reference_offset": reference_offsets[dof],
+            }
             # Still storing element data in dd to avoid rewriting dependent code
             dof_data.update(subelement_data)
 
@@ -636,8 +784,8 @@ def _tabulate_dofs(element, cell):
                         for dofs in all_entity_dofs]
 
     # Check whether we need offset
-    multiple_entities =  any([sum(n > 0 for n in num_dofs) - 1
-                              for num_dofs in num_dofs_per_element])
+    multiple_entities = any([sum(n > 0 for n in num_dofs) - 1
+                             for num_dofs in num_dofs_per_element])
     need_offset = len(elements) > 1 or multiple_entities
 
     num_dofs_per_element = [e.space_dimension() for e in elements]
@@ -661,9 +809,10 @@ def _tabulate_facet_dofs(element, cell):
     num_facets = cell.num_facets()
 
     # Find out which entities are incident to each facet
-    incident = num_facets*[None]
+    incident = num_facets * [None]
     for facet in range(num_facets):
-        incident[facet] = [pair[1] for pair in incidence if incidence[pair] == True and pair[0] == (D - 1, facet)]
+        incident[facet] = [pair[1] for pair in incidence
+                           if incidence[pair] is True and pair[0] == (D - 1, facet)]
 
     # Make list of dofs
     facet_dofs = []
@@ -678,6 +827,43 @@ def _tabulate_facet_dofs(element, cell):
         facet_dofs[facet].sort()
     return facet_dofs
 
+
+def _tabulate_entity_closure_dofs(element, cell):
+    "Compute intermediate representation of tabulate_entity_closure_dofs."
+
+    # Compute incidences
+    incidence = __compute_incidence(cell.topological_dimension())
+
+    # Get topological dimension
+    D = max([pair[0][0] for pair in incidence])
+
+    entity_dofs = element.entity_dofs()
+
+    entity_closure_dofs = {}
+    for d0 in range(D + 1):
+        # Find out which entities are incident to each entity of dim d0
+        incident = {}
+        for e0 in entity_dofs[d0]:
+            incident[(d0, e0)] = [pair[1] for pair in incidence
+                                  if incidence[pair] is True and pair[0] == (d0, e0)]
+
+        # Make list of dofs
+        for e0 in entity_dofs[d0]:
+            dofs = []
+            for d1 in entity_dofs:
+                for e1 in entity_dofs[d1]:
+                    if (d1, e1) in incident[(d0, e0)]:
+                        dofs += entity_dofs[d1][e1]
+            entity_closure_dofs[(d0, e0)] = sorted(dofs)
+
+    num_entity_closure_dofs = [max(len(dofs)
+                               for (d, e), dofs in entity_closure_dofs.items()
+                               if d == dim)
+                           for dim in range(D + 1)]
+
+    return entity_closure_dofs, num_entity_closure_dofs
+
+
 def _interpolate_vertex_values(ufl_element, fiat_element):
     "Compute intermediate representation of interpolate_vertex_values."
 
@@ -685,8 +871,8 @@ def _interpolate_vertex_values(ufl_element, fiat_element):
     for e in all_elements(fiat_element):
         if isinstance(e, QuadratureElement):
             return "Function is not supported/implemented for QuadratureElement."
-        if isinstance(e, DiscontinuousLagrangeTrace):
-            return "Function is not implemented for DiscontinuousLagrangeTrace."
+        if isinstance(e, HDivTrace):
+            return "Function is not implemented for HDivTrace."
 
     cell = ufl_element.cell()
     cellname = cell.cellname()
@@ -699,7 +885,7 @@ def _interpolate_vertex_values(ufl_element, fiat_element):
 
     # Check whether computing the Jacobian is necessary
     mappings = fiat_element.mapping()
-    ir["needs_jacobian"] = any("piola" in m for m in mappings) or any("pullback as metric" in m for m in mappings)
+    ir["needs_jacobian"] = any("piola" in m for m in mappings)
     ir["needs_oriented"] = needs_oriented_jacobian(fiat_element)
 
     # See note in _evaluate_dofs
@@ -714,13 +900,13 @@ def _interpolate_vertex_values(ufl_element, fiat_element):
     all_fiat_elm = all_elements(fiat_element)
     ir["element_data"] = [
         {
-           # NB! value_shape of fiat element e means reference_value_shape
+            # NB! value_shape of fiat element e means reference_value_shape
            "reference_value_size": product(e.value_shape()),
 
            # FIXME: THIS IS A BUG:
-           "physical_value_size": product(e.value_shape()), # FIXME: Get from corresponding ufl element?
+           "physical_value_size": product(e.value_shape()),  # FIXME: Get from corresponding ufl element?
 
-           "basis_values": e.tabulate(0, vertices)[(0,)*tdim].transpose(),
+           "basis_values": e.tabulate(0, vertices)[(0,) * tdim].transpose(),
            "mapping": e.mapping()[0],
            "space_dim": e.space_dimension(),
         }
@@ -739,33 +925,48 @@ def _interpolate_vertex_values(ufl_element, fiat_element):
 
     return ir
 
-def _has_foo_integrals(integral_type, form_data):
+
+def _has_foo_integrals(prefix, form_id, integral_type, form_data):
     "Compute intermediate representation of has_foo_integrals."
-    v = (form_data.max_subdomain_ids.get(integral_type,0) > 0
-         or _create_default_foo_integral(integral_type, form_data) is not None)
+    v = (form_data.max_subdomain_ids.get(integral_type, 0) > 0
+         or _create_default_foo_integral(prefix, form_id, integral_type, form_data) is not None)
     return bool(v)
 
-def _create_foo_integral(integral_type, form_data):
+
+def _create_foo_integral(prefix, form_id, integral_type, form_data):
     "Compute intermediate representation of create_foo_integral."
-    return [itg_data.subdomain_id for itg_data in form_data.integral_data
-           if (itg_data.integral_type == integral_type and
-               isinstance(itg_data.subdomain_id, int))]
+    subdomain_ids = [itg_data.subdomain_id
+                     for itg_data in form_data.integral_data
+                     if (itg_data.integral_type == integral_type
+                         and isinstance(itg_data.subdomain_id, int))]
+    classnames = [make_integral_classname(prefix, integral_type, form_id, subdomain_id)
+                  for subdomain_id in subdomain_ids]
+    return subdomain_ids, classnames
+
 
-def _create_default_foo_integral(integral_type, form_data):
+def _create_default_foo_integral(prefix, form_id, integral_type, form_data):
     "Compute intermediate representation of create_default_foo_integral."
     itg_data = [itg_data for itg_data in form_data.integral_data
                 if (itg_data.integral_type == integral_type and
                     itg_data.subdomain_id == "otherwise")]
-    ffc_assert(len(itg_data) in (0,1), "Expecting at most one default integral of each type.")
-    return "otherwise" if itg_data else None
+    if len(itg_data) > 1:
+        error("Expecting at most one default integral of each type.")
+    if itg_data:
+        classname = make_integral_classname(prefix, integral_type, form_id, "otherwise")
+        return classname
+    else:
+        return None
+
 
 #--- Utility functions ---
 
+
 def all_elements(fiat_element):
     if isinstance(fiat_element, MixedElement):
         return fiat_element.elements()
     return [fiat_element]
 
+
 def _num_dofs_per_entity(fiat_element):
     """
     Compute list of integers representing the number of dofs
@@ -777,6 +978,8 @@ def _num_dofs_per_entity(fiat_element):
     return [len(entity_dofs[e][0]) for e in range(len(entity_dofs.keys()))]
 
 # These two are copied from old ffc
+
+
 def __compute_incidence(D):
     "Compute which entities are incident with which"
 
@@ -791,11 +994,8 @@ def __compute_incidence(D):
         for i0 in range(len(sub_simplices[d0])):
             for d1 in range(d0 + 1):
                 for i1 in range(len(sub_simplices[d1])):
-                    if min([v in sub_simplices[d0][i0] for v in sub_simplices[d1][i1]]) == True:
-                        incidence[((d0, i0), (d1, i1))] = True
-                    else:
-                        incidence[((d0, i0), (d1, i1))] = False
-
+                    incidence[((d0, i0), (d1, i1))] = all(v in sub_simplices[d0][i0]
+                                                          for v in sub_simplices[d1][i1])
     return incidence
 
 
@@ -823,7 +1023,7 @@ def __compute_sub_simplices(D, d):
         remove = permutations[i]
 
         # Remove vertices, keeping d + 1 vertices
-        vertices = [v for v in range(num_vertices) if not v in remove]
+        vertices = [v for v in range(num_vertices) if v not in remove]
         sub_simplices += [vertices]
 
     return sub_simplices
@@ -838,6 +1038,5 @@ def uses_integral_moments(fiat_element):
 
 def needs_oriented_jacobian(fiat_element):
     # Check whether this element needs an oriented jacobian
-    # (only contravariant piolas and pullback as metric seem to need it)
-    return ("contravariant piola" in fiat_element.mapping() or
-            "pullback as metric" in fiat_element.mapping())
+    # (only contravariant piolas seem to need it)
+    return "contravariant piola" in fiat_element.mapping()
diff --git a/ffc/representationutils.py b/ffc/representationutils.py
index 451b9c2..66569df 100644
--- a/ffc/representationutils.py
+++ b/ffc/representationutils.py
@@ -1,7 +1,8 @@
+# -*- coding: utf-8 -*-
 """This module contains utility functions for some code shared between
 quadrature and tensor representation."""
 
-# Copyright (C) 2012-2015 Marie Rognes
+# Copyright (C) 2012-2016 Marie Rognes
 #
 # This file is part of FFC.
 #
@@ -18,7 +19,7 @@ quadrature and tensor representation."""
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes 2013-2015
+# Modified by Martin Sandve Alnæs 2013-2016
 # Modified by Anders Logg 2014
 
 import numpy
@@ -27,27 +28,61 @@ from ufl.measure import integral_type_to_measure_name
 from ufl.cell import cellname2facetname
 from ufl import custom_integral_types
 
-from ffc.fiatinterface import create_element
-from ffc.cpp import make_integral_classname
 from ffc.log import error
+from ffc.cpp import make_integral_classname
+from ffc.fiatinterface import create_element
+from ffc.fiatinterface import create_quadrature
+from ffc.fiatinterface import map_facet_points
+from ffc.fiatinterface import reference_cell_vertices
 
-from ffc.quadrature_schemes import create_quadrature
 
 def create_quadrature_points_and_weights(integral_type, cell, degree, rule):
     "Create quadrature rule and return points and weights."
     if integral_type == "cell":
         (points, weights) = create_quadrature(cell.cellname(), degree, rule)
     elif integral_type == "exterior_facet" or integral_type == "interior_facet":
-        facet_cellname = cellname2facetname[cell.cellname()]
-        (points, weights) = create_quadrature(facet_cellname, degree, rule)
+        (points, weights) = create_quadrature(cellname2facetname[cell.cellname()], degree, rule)
     elif integral_type == "vertex":
-        (points, weights) = ([()], numpy.array([1.0,])) # TODO: Will be fixed
+        (points, weights) = create_quadrature("vertex", degree, rule)
     elif integral_type in custom_integral_types:
         (points, weights) = (None, None)
     else:
         error("Unknown integral type: " + str(integral_type))
     return (points, weights)
 
+
+def integral_type_to_entity_dim(integral_type, tdim):
+    "Given integral_type and domain tdim, return the tdim of the integration entity."
+    if integral_type == "cell":
+        entity_dim = tdim
+    elif (integral_type == "exterior_facet" or integral_type == "interior_facet"):
+        entity_dim = tdim - 1
+    elif integral_type == "vertex":
+        entity_dim = 0
+    elif integral_type in custom_integral_types:
+        entity_dim = tdim
+    else:
+        error("Unknown integral_type: %s" % integral_type)
+    return entity_dim
+
+
+def map_integral_points(points, integral_type, cell, entity):
+    """Map points from reference entity to its parent reference cell."""
+    tdim = cell.topological_dimension()
+    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
+    if entity_dim == tdim:
+        assert points.shape[1] == tdim
+        assert entity == 0
+        return numpy.asarray(points)
+    elif entity_dim == tdim - 1:
+        assert points.shape[1] == tdim - 1
+        return numpy.asarray(map_facet_points(points, entity))
+    elif entity_dim == 0:
+        return numpy.asarray([reference_cell_vertices(cell.cellname())[entity]])
+    else:
+        error("Can't map points from entity_dim=%s" % (entity_dim,))
+
+
 def transform_component(component, offset, ufl_element):
     """
     This function accounts for the fact that if the geometrical and
@@ -69,7 +104,7 @@ def transform_component(component, offset, ufl_element):
     # ()), and if gdim != tdim)
     if gdim == tdim:
         return component, offset
-    all_mappings =  create_element(ufl_element).mapping()
+    all_mappings = create_element(ufl_element).mapping()
     special_case = (any(['piola' in m for m in all_mappings])
                     and ufl_element.num_sub_elements() > 1)
     if not special_case:
@@ -97,7 +132,7 @@ def transform_component(component, offset, ufl_element):
         if component < tot:
             break
         else:
-            tot += physical_value_dims[sub_element_number+1]
+            tot += physical_value_dims[sub_element_number + 1]
 
     # Compute the new reference offset:
     reference_offset = sum(reference_value_dims[:sub_element_number])
@@ -109,6 +144,7 @@ def transform_component(component, offset, ufl_element):
 
     return reference_component, reference_offset
 
+
 def needs_oriented_jacobian(form_data):
     # Check whether this form needs an oriented jacobian (only forms
     # involgin contravariant piola mappings seem to need it)
@@ -118,46 +154,54 @@ def needs_oriented_jacobian(form_data):
             return True
     return False
 
+
+# Mapping from recognized domain types to entity types
+_entity_types = {
+    "cell": "cell",
+    "exterior_facet": "facet",
+    "interior_facet": "facet",
+    "vertex": "vertex",
+    # "point":          "vertex", # TODO: Not sure, clarify here what 'entity_type' refers to?
+    "custom": "cell",
+    "cutcell": "cell",
+    "interface": "cell",  # "facet"  # TODO: ?
+    "overlap": "cell",
+    }
+
+
+def entity_type_from_integral_type(integral_type):
+    return _entity_types[integral_type]
+
+
 def initialize_integral_ir(representation, itg_data, form_data, form_id):
     """Initialize a representation dict with common information that is
     expected independently of which representation is chosen."""
 
-    # Mapping from recognized domain types to entity types
-    entity_type = {"cell":           "cell",
-                   "exterior_facet": "facet",
-                   "interior_facet": "facet",
-                   "vertex":         "vertex",
-                   #"point":          "vertex", # TODO: Not sure, clarify here what 'entity_type' refers to?
-                   "custom":         "cell",
-                   "cutcell":        "cell",
-                   "interface":      "cell",
-                   "overlap":        "cell",
-                   }[itg_data.integral_type]
-
-    # Extract data
+    entitytype = entity_type_from_integral_type(itg_data.integral_type)
     cell = itg_data.domain.ufl_cell()
-    cellname = cell.cellname()
+    #cellname = cell.cellname()
     tdim = cell.topological_dimension()
     assert all(tdim == itg.ufl_domain().topological_dimension() for itg in itg_data.integrals)
 
-    # Set number of cells if not set TODO: Get automatically from number of domains
+    # Set number of cells if not set  TODO: Get automatically from number of domains
     num_cells = itg_data.metadata.get("num_cells")
 
-    return {"representation":        representation,
-            "integral_type":         itg_data.integral_type,
-            "subdomain_id":          itg_data.subdomain_id,
-            "form_id":               form_id,
-            "rank":                  form_data.rank,
-            "geometric_dimension":   form_data.geometric_dimension,
+    return {"representation": representation,
+            "integral_type": itg_data.integral_type,
+            "subdomain_id": itg_data.subdomain_id,
+            "form_id": form_id,
+            "rank": form_data.rank,
+            "geometric_dimension": form_data.geometric_dimension,
             "topological_dimension": tdim,
-            "entitytype":            entity_type,
-            "num_facets":            cell.num_facets(),
-            "num_vertices":          cell.num_vertices(),
-            "needs_oriented":        needs_oriented_jacobian(form_data),
-            "num_cells":             num_cells,
-            "enabled_coefficients":  itg_data.enabled_coefficients,
+            "entitytype": entitytype,
+            "num_facets": cell.num_facets(),
+            "num_vertices": cell.num_vertices(),
+            "needs_oriented": needs_oriented_jacobian(form_data),
+            "num_cells": num_cells,
+            "enabled_coefficients": itg_data.enabled_coefficients,
             }
 
+
 def generate_enabled_coefficients(enabled_coefficients):
     # TODO: I don't know how to implement this using the format dict, this will do for now:
     initializer_list = ", ".join("true" if enabled else "false"
@@ -165,9 +209,10 @@ def generate_enabled_coefficients(enabled_coefficients):
     code = '\n'.join([
         "static const std::vector<bool> enabled({%s});" % initializer_list,
         "return enabled;",
-        ])
+    ])
     return code
 
+
 def initialize_integral_code(ir, prefix, parameters):
     "Representation independent default initialization of code dict for integral from intermediate representation."
     code = {}
@@ -179,5 +224,5 @@ def initialize_integral_code(ir, prefix, parameters):
     code["initializer_list"] = ""
     code["destructor"] = ""
     code["enabled_coefficients"] = generate_enabled_coefficients(ir["enabled_coefficients"])
-    #code["additional_includes_set"] = set() #ir["additional_includes_set"]
+    # code["additional_includes_set"] = set() #ir["additional_includes_set"]
     return code
diff --git a/ffc/restrictedelement.py b/ffc/restrictedelement.py
index 56e6abe..8415dfb 100644
--- a/ffc/restrictedelement.py
+++ b/ffc/restrictedelement.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010 Anders Logg
 #
 # This file is part of FFC.
@@ -21,7 +22,9 @@ import numpy
 from ufl.utils.sorting import sorted_by_key
 from ffc.log import error
 
+
 class RestrictedElement:
+
     "Create a restriction of a given FIAT element."
 
     def __init__(self, element, indices, restriction_domain):
@@ -74,6 +77,7 @@ class RestrictedElement:
     def restriction_domain(self):
         return self._restriction_domain
 
+
 def _extract_entity_dofs(element, indices):
     # FIXME: Readability counts
     entity_dofs = element.entity_dofs()
diff --git a/ffc/tensor/__init__.py b/ffc/tensor/__init__.py
index 05ad3bf..aa62fe9 100644
--- a/ffc/tensor/__init__.py
+++ b/ffc/tensor/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 from .tensorrepresentation import compute_integral_ir
 from .tensorgenerator import generate_integral_code
 from .costestimation import estimate_cost
diff --git a/ffc/tensor/costestimation.py b/ffc/tensor/costestimation.py
index 2486ca1..59faf52 100644
--- a/ffc/tensor/costestimation.py
+++ b/ffc/tensor/costestimation.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010 Anders Logg
 #
 # This file is part of FFC.
@@ -19,7 +20,7 @@
 # Last changed: 2014-04-15
 
 # FFC modules
-from ffc.log import debug
+from ffc.log import debug, error
 
 # FFC tensor representation modules
 from ffc.tensor.monomialextraction import extract_monomial_form
diff --git a/ffc/tensor/geometrytensor.py b/ffc/tensor/geometrytensor.py
index 0184f28..2a5eeb4 100644
--- a/ffc/tensor/geometrytensor.py
+++ b/ffc/tensor/geometrytensor.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2004-2009 Anders Logg
 #
 # This file is part of FFC.
diff --git a/ffc/tensor/monomialextraction.py b/ffc/tensor/monomialextraction.py
index 23eb41f..d76e7a2 100644
--- a/ffc/tensor/monomialextraction.py
+++ b/ffc/tensor/monomialextraction.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Extraction of monomial representations of UFL forms."
 
 # Copyright (C) 2008-2013 Anders Logg
@@ -17,7 +18,7 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes, 2008, 2013
+# Modified by Martin Sandve Alnæs, 2008, 2013
 # Modified by Kristian B. Oelgaard
 #
 # First added:  2008-08-01
@@ -166,12 +167,12 @@ class Monomial:
             raise MonomialException("Unable to create monomial from expression: " + str(arg))
 
     def apply_derivative(self, indices):
-        if not len(self.factors) == 1:
+        if len(self.factors) != 1:
             raise MonomialException("Expecting a single factor.")
         self.factors[0].apply_derivative(indices)
 
     def apply_tensor(self, indices):
-        if not self.index_slots is None:
+        if self.index_slots is not None:
             raise MonomialException("Expecting scalar-valued expression.")
         self.index_slots = indices
 
@@ -395,7 +396,7 @@ def _replace_indices(indices, old_indices, new_indices):
     "Handle replacement of subsets of multi indices."
 
     # Old and new indices must match
-    if not len(old_indices) == len(new_indices):
+    if len(old_indices) != len(new_indices):
         raise MonomialException("Unable to replace indices, mismatching index dimensions.")
 
     # Build index map
diff --git a/ffc/tensor/monomialintegration.py b/ffc/tensor/monomialintegration.py
index ce1d826..947eab6 100644
--- a/ffc/tensor/monomialintegration.py
+++ b/ffc/tensor/monomialintegration.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This module implements efficient integration of monomial forms."
 
 # Copyright (C) 2004-2011 Anders Logg
@@ -32,7 +33,6 @@ import time
 from ffc.log import info, debug, error
 from ffc.fiatinterface import create_element
 from ffc.fiatinterface import map_facet_points
-from ffc.quadrature_schemes import create_quadrature
 from ffc.representationutils import create_quadrature_points_and_weights
 
 # FFC tensor representation modules
@@ -40,6 +40,7 @@ from .multiindex import build_indices
 from .monomialextraction import MonomialException
 from .monomialtransformation import MonomialIndex
 
+
 def integrate(monomial,
               integral_type,
               facet0, facet1,
@@ -55,7 +56,8 @@ def integrate(monomial,
     tic = time.time()
 
     # Initialize quadrature points and weights
-    (points, weights) = create_quadrature_points_and_weights(integral_type, cell, quadrature_degree, quadrature_rule)
+    (points, weights) = create_quadrature_points_and_weights(
+        integral_type, cell, quadrature_degree, quadrature_rule)
 
     # Initialize quadrature table for basis functions
     table = _init_table(monomial.arguments,
@@ -64,8 +66,8 @@ def integrate(monomial,
                         facet0, facet1)
 
     # Compute table Psi for each factor
-    psis = [_compute_psi(v, table, len(points)) \
-                for v in monomial.arguments]
+    psis = [_compute_psi(v, table, len(points))
+            for v in monomial.arguments]
 
     # Compute product of all Psis
     A0 = _compute_product(psis, monomial.float_value * weights)
@@ -78,6 +80,7 @@ def integrate(monomial,
 
     return A0
 
+
 def _init_table(arguments, integral_type, points, facet0, facet1):
     """Initialize table of basis functions and their derivatives at
     the given quadrature points for each element."""
@@ -109,6 +112,7 @@ def _init_table(arguments, integral_type, points, facet0, facet1):
 
     return table
 
+
 def _compute_psi(v, table, num_points):
     "Compute the table Psi for the given basis function v."
 
@@ -131,7 +135,7 @@ def _compute_psi(v, table, num_points):
     tdim = v.element.cell().topological_dimension()
 
     # Get indices and shapes for components
-    if len(v.components) ==  0:
+    if len(v.components) == 0:
         cindex = []
         cshape = []
     elif len(v.components) == 1:
@@ -153,7 +157,7 @@ def _compute_psi(v, table, num_points):
     shapes = cshape + dshape + vshape + [num_points]
 
     # Initialize tensor Psi: component, derivatives, basis function, points
-    Psi = numpy.zeros(shapes, dtype = numpy.float)
+    Psi = numpy.zeros(shapes, dtype=numpy.float)
 
     # Iterate over derivative indices
     dlists = build_indices([index.index_range for index in dindex]) or [[]]
@@ -181,8 +185,8 @@ def _compute_psi(v, table, num_points):
     # Remove fixed indices
     for i in range(num_indices[0]):
         Psi = Psi[0, ...]
-    indices = [index for index in indices \
-                   if not index.index_type == MonomialIndex.FIXED]
+    indices = [index for index in indices
+               if not index.index_type == MonomialIndex.FIXED]
 
     # Put quadrature points first
     rank = Psi.ndim
@@ -193,6 +197,7 @@ def _compute_psi(v, table, num_points):
 
     return (Psi, indices, bpart)
 
+
 def _compute_product(psis, weights):
     "Compute special product of list of Psis."
 
@@ -203,7 +208,7 @@ def _compute_product(psis, weights):
 
     # Initialize zero reference tensor (will be rearranged later)
     (shape, indices) = _compute_shape(psis)
-    A0 = numpy.zeros(shape, dtype= numpy.float)
+    A0 = numpy.zeros(shape, dtype=numpy.float)
 
     # Initialize list of internal multiindices
     bshape = _compute_internal_shape(psis)
@@ -217,7 +222,7 @@ def _compute_product(psis, weights):
             # Compute outer products of subtables for current (q, b)
             B = weights[q]
             for (Psi, index, bpart) in psis:
-                B = numpy.multiply.outer(B, Psi[ tuple([q] + [b[i] for i in bpart])])
+                B = numpy.multiply.outer(B, Psi[tuple([q] + [b[i] for i in bpart])])
 
             # Add product to reference tensor
             numpy.add(A0, B, A0)
@@ -228,20 +233,22 @@ def _compute_product(psis, weights):
 
     return A0
 
+
 def _compute_rearrangement(indices):
     """
     Compute rearrangement tuple for given list of Indices, so that the
     tuple reorders the given list of Indices with fixed, primary,
     secondary and internal Indices in rising order.
     """
-    fixed     = _find_indices(indices, MonomialIndex.FIXED)
-    internal  = _find_indices(indices, MonomialIndex.INTERNAL)
-    primary   = _find_indices(indices, MonomialIndex.PRIMARY)
+    fixed = _find_indices(indices, MonomialIndex.FIXED)
+    internal = _find_indices(indices, MonomialIndex.INTERNAL)
+    primary = _find_indices(indices, MonomialIndex.PRIMARY)
     secondary = _find_indices(indices, MonomialIndex.SECONDARY)
     assert len(fixed + internal + primary + secondary) == len(indices)
-    return (tuple(fixed + internal + primary + secondary), \
+    return (tuple(fixed + internal + primary + secondary),
             (len(fixed), len(internal), len(primary), len(secondary)))
 
+
 def _compute_shape(psis):
     "Compute shape of reference tensor from given list of tables."
     shape, indices = [], []
@@ -251,6 +258,7 @@ def _compute_shape(psis):
         indices += index[num_internal:]
     return (shape, indices)
 
+
 def _compute_internal_shape(psis):
     """
     Compute shape for internal indices from given list of tables.
@@ -259,7 +267,8 @@ def _compute_internal_shape(psis):
     """
     # First find the number of different internal indices (check maximum)
     bs = [b for (Psi, index, bpart) in psis for b in bpart]
-    if len(bs) == 0: return []
+    if len(bs) == 0:
+        return []
     bmax = max(bs)
     # Find the dimension for each internal index
     bshape = [0 for i in range(bmax + 1)]
@@ -271,12 +280,14 @@ def _compute_internal_shape(psis):
         error("Unable to compute the shape for each internal index.")
     return bshape
 
+
 def _find_indices(indices, index_type):
     "Return sorted list of positions for given index type."
     pos = [i for i in range(len(indices)) if indices[i].index_type == index_type]
     val = [indices[i].index_id for i in range(len(indices)) if indices[i].index_type == index_type]
     return [pos[i] for i in numpy.argsort(val)]
 
+
 def _multiindex_to_tuple(dindex, cell_dimension):
     """
     Compute lookup tuple from given derivative multiindex. Necessary
diff --git a/ffc/tensor/monomialtransformation.py b/ffc/tensor/monomialtransformation.py
index e2d2241..f2209ee 100644
--- a/ffc/tensor/monomialtransformation.py
+++ b/ffc/tensor/monomialtransformation.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Transformation of monomial representations of UFL forms."
 
 # Copyright (C) 2009 Anders Logg
@@ -39,6 +40,7 @@ from ffc.representationutils import transform_component
 from ffc.tensor.monomialextraction import MonomialForm
 from ffc.tensor.monomialextraction import MonomialException
 
+
 def transform_monomial_form(monomial_form):
     "Transform monomial form to reference element."
 
@@ -60,19 +62,22 @@ def transform_monomial_form(monomial_form):
             if not isinstance(monomial, TransformedMonomial):
                 integrand.monomials[i] = TransformedMonomial(monomial)
 
+
 class MonomialIndex:
+
     """
     This class represents a monomial index. Each index has a type,
     a range and a unique id. Valid index types are listed below.
     """
 
-    FIXED     = "fixed"      # Integer index
-    PRIMARY   = "primary"    # Argument basis function index
+    FIXED = "fixed"      # Integer index
+    PRIMARY = "primary"    # Argument basis function index
     SECONDARY = "secondary"  # Index appearing both inside and outside integral
-    INTERNAL  = "internal"   # Index appearing only inside integral
-    EXTERNAL  = "external"   # Index appearing only outside integral
+    INTERNAL = "internal"   # Index appearing only inside integral
+    EXTERNAL = "external"   # Index appearing only outside integral
 
-    def __init__(self, index=None, index_type=None, index_range=None, index_id=None):
+    def __init__(self, index=None, index_type=None, index_range=None,
+                 index_id=None):
         "Create index with given type, range and id."
         if isinstance(index, MonomialIndex):
             self.index_type = index.index_type
@@ -87,7 +92,8 @@ class MonomialIndex:
         "Comparison operator."
         return self.index_id < other.index_id
 
-    def __call__(self, primary=None, secondary=None, internal=None, external=None):
+    def __call__(self, primary=None, secondary=None, internal=None,
+                 external=None):
         "Evaluate index at current index list."
 
         if self.index_type == MonomialIndex.FIXED:
@@ -136,7 +142,9 @@ class MonomialIndex:
         else:
             return "?"
 
+
 class MonomialDeterminant:
+
     "This class representes a determinant factor in a monomial."
 
     def __init__(self, power=None, restriction=None):
@@ -154,7 +162,7 @@ class MonomialDeterminant:
         if not self.restriction:
             r = ""
         else:
-            r = "(%s)" %  self.restriction
+            r = "(%s)" % self.restriction
         if self.power == 0:
             return "|det F'%s|" % r
         elif self.power == 1:
@@ -162,7 +170,9 @@ class MonomialDeterminant:
         else:
             return "|det F'%s| (det F'%s)^%s" % (r, r, str(self.power))
 
+
 class MonomialCoefficient:
+
     "This class represents a coefficient in a monomial."
 
     def __init__(self, index, number):
@@ -174,7 +184,9 @@ class MonomialCoefficient:
         "Return informal string representation (pretty-print)."
         return "c_" + str(self.index)
 
+
 class MonomialTransform:
+
     "This class represents a transform (mapping derivative) in a form."
 
     J = "J"
@@ -190,10 +202,11 @@ class MonomialTransform:
         self.restriction = restriction
         self.offset = offset
 
-        # Subtract offset for fixed indices. Note that the index subtraction
-        # creates a new index instance. This is ok here since a fixed index
-        # does not need to match any other index (being the same instance)
-        # in index summation and index extraction.
+        # Subtract offset for fixed indices. Note that the index
+        # subtraction creates a new index instance. This is ok here
+        # since a fixed index does not need to match any other index
+        # (being the same instance) in index summation and index
+        # extraction.
         if index0.index_type is MonomialIndex.FIXED:
             self.index0 = index0 - offset
         if index1.index_type is MonomialIndex.FIXED:
@@ -210,7 +223,9 @@ class MonomialTransform:
         else:
             return "dX_%s/dx_%s%s" % (str(self.index0), str(self.index1), r)
 
+
 class MonomialArgument:
+
     """
     This class represents a monomial argument, that is, a derivative of
     a scalar component of a basis function on the reference element.
@@ -243,7 +258,9 @@ class MonomialArgument:
         v = "V_" + str(self.index)
         return d0 + v + r + c + d1
 
+
 class TransformedMonomial:
+
     """
     This class represents a monomial form after transformation to the
     reference element.
@@ -326,7 +343,8 @@ class TransformedMonomial:
                     raise MonomialException("Mappings differ: " + str(mappings))
                 mapping = mappings[0]
 
-                # Get component index relative to its sub element and its sub element
+                # Get component index relative to its sub element and
+                # its sub element
                 (component_index, sub_element) = ufl_element.extract_component(component.index_range[0])
 
                 # Get offset
@@ -345,11 +363,14 @@ class TransformedMonomial:
                         "Component transform not implemented for this case. Please request this feature."
                     component, offset = transform_component(component.index_range[0], offset, ufl_element)
                     component = MonomialIndex(index_type=MonomialIndex.FIXED,
-                                              index_range=[component], index_id=None)
+                                              index_range=[component],
+                                              index_id=None)
                     components = [component, ]
 
                 # Add transforms where appropriate
-                if mapping == "contravariant piola":
+                if mapping == "affine":
+                    pass
+                elif mapping == "contravariant piola":
                     # phi(x) = (det J)^{-1} J Phi(X)
                     index0 = component
                     index1 = MonomialIndex(index_range=list(range(tdim))) + offset
@@ -370,6 +391,8 @@ class TransformedMonomial:
                                                   f.restriction, offset)
                     self.transforms.append(transform)
                     components[0] = index0
+                else:
+                    raise MonomialException("Don't know how to handle mapping='%s'." % mapping)
 
             # Extract derivatives / transforms
             derivatives = []
@@ -384,7 +407,9 @@ class TransformedMonomial:
                 else:
                     index1 = MonomialIndex(index_range=list(range(gdim)))
                 index_map[d] = index1
-                transform = MonomialTransform(index0, index1, MonomialTransform.JINV, f.restriction, 0)
+                transform = MonomialTransform(index0, index1,
+                                              MonomialTransform.JINV,
+                                              f.restriction, 0)
 
                 self.transforms.append(transform)
                 derivatives.append(index0)
@@ -393,7 +418,8 @@ class TransformedMonomial:
             restriction = f.restriction
 
             # Create basis function
-            v = MonomialArgument(ufl_element, vindex, components, derivatives, restriction)
+            v = MonomialArgument(ufl_element, vindex, components, derivatives,
+                                 restriction)
             self.arguments.append(v)
 
         # Figure out secondary and auxiliary indices
@@ -402,7 +428,7 @@ class TransformedMonomial:
         for i in internal_indices + external_indices:
 
             # Skip already visited indices
-            if not i.index_type is None:
+            if i.index_type is not None:
                 continue
 
             # Set index type and id
@@ -411,13 +437,13 @@ class TransformedMonomial:
 
             if num_internal == 1 and num_external == 1:
                 i.index_type = MonomialIndex.SECONDARY
-                i.index_id   = _next_secondary_index()
+                i.index_id = _next_secondary_index()
             elif num_internal == 2 and num_external == 0:
                 i.index_type = MonomialIndex.INTERNAL
-                i.index_id   = _next_internal_index()
+                i.index_id = _next_internal_index()
             elif num_internal == 0 and num_external == 2:
                 i.index_type = MonomialIndex.EXTERNAL
-                i.index_id   = _next_external_index()
+                i.index_id = _next_external_index()
             else:
                 raise Exception("Summation index does not appear exactly twice: %s" % str(i))
 
@@ -425,7 +451,7 @@ class TransformedMonomial:
         "Return all unique indices for monomial w.r.t. type and id (not range)."
         indices = []
         for index in self._extract_indices(index_type):
-            if not index in indices:
+            if index not in indices:
                 indices.append(index)
         return indices
 
@@ -469,12 +495,12 @@ class TransformedMonomial:
     def _extract_indices(self, index_type=None):
         "Return all indices for monomial."
         return self._extract_internal_indices(index_type) + \
-               self._extract_external_indices(index_type)
+            self._extract_external_indices(index_type)
 
     def __str__(self):
         "Return informal string representation (pretty-print)."
         factors = []
-        if not self.float_value == 1.0:
+        if self.float_value != 1.0:
             factors.append(self.float_value)
         factors += self.determinants
         factors += self.coefficients
@@ -486,24 +512,28 @@ _current_secondary_index = 0
 _current_internal_index = 0
 _current_external_index = 0
 
+
 def _next_secondary_index():
     "Return next available secondary index."
     global _current_secondary_index
     _current_secondary_index += 1
     return _current_secondary_index - 1
 
+
 def _next_internal_index():
     "Return next available internal index."
     global _current_internal_index
     _current_internal_index += 1
     return _current_internal_index - 1
 
+
 def _next_external_index():
     "Return next available external index."
     global _current_external_index
     _current_external_index += 1
     return _current_external_index - 1
 
+
 def _reset_indices():
     "Reset all index counters."
     global _current_secondary_index
diff --git a/ffc/tensor/multiindex.py b/ffc/tensor/multiindex.py
index aa12404..daa8dd5 100644
--- a/ffc/tensor/multiindex.py
+++ b/ffc/tensor/multiindex.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2004-2009 Anders Logg
 #
 # This file is part of FFC.
@@ -30,12 +31,15 @@ import numpy
 from ffc.utils import listcopy
 from ffc.log import error
 
+
 def build_indices(dims):
     "Create a list of all index combinations."
-    if not dims: return [[]]
+    if not dims:
+        return [[]]
     ranges = listcopy(dims)
     return functools.reduce(outer_join, ranges, [[]])
 
+
 def outer_join(a, b):
     """Let a be a list of lists and b a list. We append each element
     of b to each list in a and return the resulting list of lists."""
@@ -45,6 +49,7 @@ def outer_join(a, b):
             outer += [a[i] + [b[j]]]
     return outer
 
+
 def create_multiindex(indices):
     "Create multiindex for given list of indices."
 
@@ -59,7 +64,9 @@ def create_multiindex(indices):
 
     return MultiIndex(dims)
 
+
 class MultiIndex:
+
     """
     A MultiIndex represents a list of indices and holds the following
     data:
diff --git a/ffc/tensor/referencetensor.py b/ffc/tensor/referencetensor.py
index bba12ef..f729327 100644
--- a/ffc/tensor/referencetensor.py
+++ b/ffc/tensor/referencetensor.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2004-2009 Anders Logg
 #
 # This file is part of FFC.
@@ -29,7 +30,9 @@ from .monomialintegration import integrate
 from .monomialtransformation import MonomialIndex
 from .multiindex import create_multiindex
 
+
 class ReferenceTensor:
+
     """
     This class represents the reference tensor for a monomial term of
     a multilinear form.
@@ -53,14 +56,14 @@ class ReferenceTensor:
                             cell)
 
         # Extract indices
-        primary_indices   = monomial.extract_unique_indices(MonomialIndex.PRIMARY)
+        primary_indices = monomial.extract_unique_indices(MonomialIndex.PRIMARY)
         secondary_indices = monomial.extract_unique_indices(MonomialIndex.SECONDARY)
-        internal_indices  = monomial.extract_unique_indices(MonomialIndex.INTERNAL)
+        internal_indices = monomial.extract_unique_indices(MonomialIndex.INTERNAL)
 
         # Create multiindices
-        self.primary_multi_index   = create_multiindex(primary_indices)
+        self.primary_multi_index = create_multiindex(primary_indices)
         self.secondary_multi_index = create_multiindex(secondary_indices)
-        self.internal_multi_index  = create_multiindex(internal_indices)
+        self.internal_multi_index = create_multiindex(internal_indices)
 
         # Store monomial
         self.monomial = monomial
diff --git a/ffc/tensor/tensorgenerator.py b/ffc/tensor/tensorgenerator.py
index 77849a0..d9af8bc 100644
--- a/ffc/tensor/tensorgenerator.py
+++ b/ffc/tensor/tensorgenerator.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Code generator for tensor representation"
 
 # Copyright (C) 2004-2013 Anders Logg
@@ -21,7 +22,7 @@
 # Modified by Marie Rognes, 2007
 # Modified by Garth N. Wells, 2009
 # Modified by Mehdi Nikbakht, 2010
-# Modified by Martin Alnaes, 2013
+# Modified by Martin Sandve Alnæs, 2013
 #
 # First added:  2004-11-03
 # Last changed: 2013-02-10
@@ -34,6 +35,7 @@ from ffc.cpp import format, remove_unused, count_ops
 from ffc.tensor.monomialtransformation import MonomialIndex
 from ffc.representationutils import initialize_integral_code
 
+
 def generate_integral_code(ir, prefix, parameters):
     "Generate code for integral from intermediate representation."
     code = initialize_integral_code(ir, prefix, parameters)
@@ -70,7 +72,7 @@ def _tabulate_tensor(ir, parameters):
         g_code = _generate_geometry_tensors(AK, j_set, g_set, tdim, gdim)
 
         # Generate code for basic geometric quantities
-        j_code  = ""
+        j_code = ""
         j_code += format["compute_jacobian"](tdim, gdim)
         j_code += "\n"
         j_code += format["compute_jacobian_inverse"](tdim, gdim)
@@ -166,12 +168,12 @@ def _generate_tensor_contraction_standard(terms, parameters, g_set):
     """
 
     # Prefetch formats to speed up code generation
-    iadd            = format["iadd"]
-    assign          = format["assign"]
-    element_tensor  = format["element tensor"]
+    iadd = format["iadd"]
+    assign = format["assign"]
+    element_tensor = format["element tensor"]
     geometry_tensor = format["geometry tensor"]
-    zero            = format["float"](0)
-    inner_product   = format["inner product"]
+    zero = format["float"](0)
+    inner_product = format["inner product"]
 
     # True if we should add to element tensor (not used)
     incremental = False
@@ -203,7 +205,8 @@ def _generate_tensor_contraction_standard(terms, parameters, g_set):
                 a0 = A0.A0[tuple(i + a)]
 
                 # Skip small values
-                if abs(a0) < epsilon: continue
+                if abs(a0) < epsilon:
+                    continue
 
                 # Compute value
                 coefficients.append(a0)
@@ -231,10 +234,10 @@ def _generate_geometry_tensors(terms, j_set, g_set, tdim, gdim):
     "Generate code for computation of geometry tensors."
 
     # Prefetch formats to speed up code generation
-    format_add             = format["addition"]
+    format_add = format["addition"]
     format_geometry_tensor = format["geometry tensor"]
-    format_scale_factor    = format["scale factor"]
-    format_declaration     = format["const float declaration"]
+    format_scale_factor = format["scale factor"]
+    format_declaration = format["const float declaration"]
 
     # Iterate over all terms
     lines = []
@@ -255,11 +258,12 @@ def _generate_geometry_tensors(terms, j_set, g_set, tdim, gdim):
         for a in secondary_indices:
 
             # Skip code generation if term is not used
-            if not format["geometry tensor"](i, a) in g_set: continue
+            if not format["geometry tensor"](i, a) in g_set:
+                continue
 
             # Compute factorized values
-            values = [_generate_entry(GK, a, offset + j, j_set, tdim, gdim) \
-                          for (j, GK) in enumerate(GKs)]
+            values = [_generate_entry(GK, a, offset + j, j_set, tdim, gdim)
+                      for (j, GK) in enumerate(GKs)]
 
             # Sum factorized values
             name = format_geometry_tensor(i, a)
@@ -278,7 +282,7 @@ def _generate_geometry_tensors(terms, j_set, g_set, tdim, gdim):
 
     # Add scale factor
     if det_used:
-        j_set.add(format_scale_factor) # meg says: If all values vanish, det is not used.
+        j_set.add(format_scale_factor)  # meg says: If all values vanish, det is not used.
 
     return "\n".join(lines)
 
@@ -288,12 +292,12 @@ def _generate_entry(GK, a, i, j_set, tdim, gdim):
 
     # Prefetch formats to speed up code generation
     grouping = format["grouping"]
-    add      = format["addition"]
+    add = format["addition"]
     multiply = format["multiply"]
 
     # Compute product of factors outside sum
     factors = _extract_factors(GK, a, None, j_set, tdim, gdim,
-                              MonomialIndex.SECONDARY)
+                               MonomialIndex.SECONDARY)
 
     # Compute sum of products of factors inside sum
     terms = [multiply(_extract_factors(GK, a, b, j_set, tdim, gdim,
@@ -316,7 +320,7 @@ def _multiply_value_by_det(value, dets, is_sum, j_set):
 
     # Cell / exterior facets:
     d = []
-    if all([det.restriction == None for det in dets]):
+    if all([det.restriction is None for det in dets]):
         total_power = sum(det.power for det in dets)
         if not total_power == 0:
             J = format["det(J)"](None)
@@ -367,9 +371,9 @@ def _extract_factors(GK, a, b, j_set, tdim, gdim, index_type):
         # Add factor
         if include_index:
             # FIXME: Dimensions of J and K are transposed, what is the right thing to fix this hack?
-            if t.transform_type == "J": #MonomialTransform.J:
+            if t.transform_type == "J":  # MonomialTransform.J:
                 dim0, dim1 = gdim, tdim
-            elif t.transform_type == "JINV": #MonomialTransform.JINV:
+            elif t.transform_type == "JINV":  # MonomialTransform.JINV:
                 dim0, dim1 = tdim, gdim
             else:
                 error("Unknown transform type, fix this hack.")
diff --git a/ffc/tensor/tensorreordering.py b/ffc/tensor/tensorreordering.py
index 7bafd7b..1fbe293 100644
--- a/ffc/tensor/tensorreordering.py
+++ b/ffc/tensor/tensorreordering.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "Reordering of entries in reference tensor for interior facets."
 
 # Copyright (C) 2006-2009 Anders Logg
@@ -27,6 +28,7 @@ import numpy
 from .monomialtransformation import MonomialIndex
 from .multiindex import MultiIndex
 
+
 def reorder_entries(terms):
     """Reorder entries to compute the reference tensor for an interior
     facet from the the reduced reference tensor."""
@@ -43,9 +45,9 @@ def reorder_entries(terms):
         for i in range(len(restrictions)):
             dim = dims[i]
             if restrictions[i] == "+":
-                position = position + [slice(0, dim/2)]
+                position = position + [slice(0, dim / 2)]
             elif restrictions[i] == "-":
-                position = position + [slice(dim/2, dim)]
+                position = position + [slice(dim / 2, dim)]
             else:
                 position = position + [slice(0, dim)]
 
@@ -62,6 +64,7 @@ def reorder_entries(terms):
         A0.secondary_multi_index = MultiIndex([list(range(adim)) for adim in adims])
         GK.secondary_multi_index = A0.secondary_multi_index
 
+
 def __compute_restrictions(term):
     """Compute restrictions corresponding to indices for given
     term. For indices at basis functions, we need to double the
@@ -98,10 +101,10 @@ def __compute_restrictions(term):
     new_idims = [i for i in idims]
     new_adims = [i for i in adims]
     for i in range(len(new_idims)):
-        if not restrictions[i] == None:
-            new_idims[i] = 2*new_idims[i]
+        if restrictions[i] is not None:
+            new_idims[i] = 2 * new_idims[i]
     for i in range(len(new_adims)):
-        if not restrictions[i + len(new_idims)] == None:
-            new_adims[i] = 2*new_adims[i]
+        if restrictions[i + len(new_idims)] is not None:
+            new_adims[i] = 2 * new_adims[i]
 
     return (restrictions, new_idims, new_adims)
diff --git a/ffc/tensor/tensorrepresentation.py b/ffc/tensor/tensorrepresentation.py
index 4f25dd3..d074383 100644
--- a/ffc/tensor/tensorrepresentation.py
+++ b/ffc/tensor/tensorrepresentation.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """This module implements the representation of a multilinear form as
 a sum of tensor contractions.
 
@@ -25,7 +26,7 @@ might be (re-)implemented in a future version of FFC
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Kristian B. Oelgaard, 2010.
-# Modified by Martin Alnaes, 2013
+# Modified by Martin Sandve Alnæs, 2013
 
 # FFC modules
 from ffc.log import info, error
@@ -38,10 +39,12 @@ from ffc.tensor.referencetensor import ReferenceTensor
 from ffc.tensor.geometrytensor import GeometryTensor
 from ffc.tensor.tensorreordering import reorder_entries
 
+
 def compute_integral_ir(itg_data,
                         form_data,
                         form_id,
                         element_numbers,
+                        classnames,
                         parameters):
     "Compute intermediate represention of integral."
 
@@ -65,11 +68,11 @@ def compute_integral_ir(itg_data,
 
     # Helper to simplify code below
     compute_terms = lambda i, j: _compute_terms(monomial_form,
-                                           i, j,
-                                           integral_type,
-                                           quadrature_degree,
-                                           quadrature_rule,
-                                           cell)
+                                                i, j,
+                                                integral_type,
+                                                quadrature_degree,
+                                                quadrature_rule,
+                                                cell)
 
     # Compute representation of cell tensor
     if integral_type == "cell":
@@ -96,6 +99,7 @@ def compute_integral_ir(itg_data,
 
     return ir
 
+
 def _compute_terms(monomial_form,
                    facet0, facet1,
                    integral_type,
diff --git a/ffc/ufc_include.py.in b/ffc/ufc_include.py.in
deleted file mode 100644
index d15de5b..0000000
--- a/ffc/ufc_include.py.in
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (C) 2016 Johannes Ring
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-
-def get_ufc_include():
-    """Return the include dir to ufc.h.
-
-    In this implementation, the value is computed at build time,
-    see setup.py.
-    """
-    return "@INSTALL_PREFIX/include"
diff --git a/ffc/ufc_signature.py.in b/ffc/ufc_signature.py.in
deleted file mode 100644
index 9e405be..0000000
--- a/ffc/ufc_signature.py.in
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (C) 2016 Jan Blechta
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-
-def ufc_signature():
-    """Return SHA-1 hash of the contents of ufc.h.
-
-    In this implementation, the value is computed at build time,
-    see setup.py.
-    """
-    return "@UFC_SIGNATURE"
diff --git a/uflacs/__init__.py b/ffc/uflacs/__init__.py
similarity index 76%
rename from uflacs/__init__.py
rename to ffc/uflacs/__init__.py
index fb851cb..07e1e02 100644
--- a/uflacs/__init__.py
+++ b/ffc/uflacs/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -19,6 +19,7 @@
 """This is UFLACS, the UFL Analyser and Compiler System."""
 
 __author__ = u"Martin Sandve Alnæs"
-__version__ = "2016.1.0"
-__date__ = '2016-06-23'
-__licence__ = 'LGPL v3'
+
+from ffc.uflacs.uflacsrepresentation import compute_integral_ir
+from ffc.uflacs.uflacsoptimization import optimize_integral_ir
+from ffc.uflacs.uflacsgenerator import generate_integral_code
diff --git a/uflacs/analysis/__init__.py b/ffc/uflacs/analysis/__init__.py
similarity index 94%
copy from uflacs/analysis/__init__.py
copy to ffc/uflacs/analysis/__init__.py
index 060bf68..152d70b 100644
--- a/uflacs/analysis/__init__.py
+++ b/ffc/uflacs/analysis/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/ffc/uflacs/analysis/balancing.py b/ffc/uflacs/analysis/balancing.py
new file mode 100644
index 0000000..6616da2
--- /dev/null
+++ b/ffc/uflacs/analysis/balancing.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""Algorithms for the representation phase of the form compilation."""
+
+from ufl.classes import (ReferenceValue, ReferenceGrad, Grad,
+                         CellAvg, FacetAvg,
+                         PositiveRestricted, NegativeRestricted,
+                         Indexed)
+from ufl.corealg.multifunction import MultiFunction
+from ufl.corealg.map_dag import map_expr_dag
+
+
+modifier_precedence = [ReferenceValue, ReferenceGrad, Grad,
+                       CellAvg, FacetAvg,
+                       PositiveRestricted, NegativeRestricted,
+                       Indexed]
+
+modifier_precedence = { m._ufl_handler_name_: i for i, m in enumerate(modifier_precedence) }
+
+# TODO: Move this to ufl?
+# TODO: Add expr._ufl_modifier_precedence_ ? Add Terminal first and Operator last in the above list.
+
+
+def balance_modified_terminal(expr):
+    # NB! Assuminge e.g. grad(cell_avg(expr)) does not occur,
+    # i.e. it is simplified to 0 immediately.
+
+    if expr._ufl_is_terminal_:
+        return expr
+
+    assert expr._ufl_is_terminal_modifier_
+
+    orig = expr
+
+    # Build list of modifier layers
+    layers = [expr]
+    while not expr._ufl_is_terminal_:
+        if not expr._ufl_is_terminal_modifier_:
+            import IPython; IPython.embed()
+        assert expr._ufl_is_terminal_modifier_
+        expr = expr.ufl_operands[0]
+        layers.append(expr)
+    assert layers[-1] is expr
+    assert expr._ufl_is_terminal_
+
+    # Apply modifiers in order
+    layers = sorted(layers[:-1], key=lambda e: modifier_precedence[e._ufl_handler_name_])
+    for op in layers:
+        ops = (expr,) + op.ufl_operands[1:]
+        expr = op._ufl_expr_reconstruct_(*ops)
+
+    # Preserve id if nothing has changed
+    return orig if expr == orig else expr
+
+
+class BalanceModifiers(MultiFunction):
+
+    def expr(self, expr, *ops):
+        return expr._ufl_expr_reconstruct_(*ops)
+
+    def terminal(self, expr):
+        return expr
+
+    def _modifier(self, expr, *ops):
+        return balance_modified_terminal(expr)
+
+    reference_value = _modifier
+    reference_grad = _modifier
+    grad = _modifier
+    cell_avg = _modifier
+    facet_avg = _modifier
+    positive_restricted = _modifier
+    negative_restricted = _modifier
+
+
+def balance_modifiers(expr):
+    mf = BalanceModifiers()
+    return map_expr_dag(mf, expr)
diff --git a/uflacs/datastructures/crs.py b/ffc/uflacs/analysis/crsarray.py
similarity index 62%
rename from uflacs/datastructures/crs.py
rename to ffc/uflacs/analysis/crsarray.py
index 82d3679..af5b4e5 100644
--- a/uflacs/datastructures/crs.py
+++ b/ffc/uflacs/analysis/crsarray.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,20 +18,24 @@
 
 """Compressed row storage 'matrix' (actually just a non-rectangular 2d array)."""
 
-from six.moves import xrange as range
 import numpy
 
 
-class CRS(object):
+class CRSArray(object):
+    """An array of variable length dense arrays.
 
-    """A simple compressed row storage matrix.
-
-    This CRS variant doesn't have a sparsity pattern,
+    Stored efficiently with simple compressed row storage.
+    This CRS array variant doesn't have a sparsity pattern,
     as each row is simply a dense vector.
-    """
 
+    Values are stored in one flat array 'data[]',
+    and 'row_offsets[i]' contains the index to the first
+    element on row i for 0<=i<=num_rows.
+    There is no column index.
+    """
     def __init__(self, row_capacity, element_capacity, dtype):
-        self.row_offsets = numpy.zeros(row_capacity + 1, dtype=int)
+        itype = numpy.int16 if row_capacity < 2**15 else numpy.int32
+        self.row_offsets = numpy.zeros(row_capacity + 1, dtype=itype)
         self.data = numpy.zeros(element_capacity, dtype=dtype)
         self.num_rows = 0
 
@@ -57,29 +61,12 @@ class CRS(object):
         return self.num_rows
 
     def __str__(self):
-        return "[%s]" % (', '.join(str(row) for row in self),)
-
-
-def list_to_crs(elements):
-    "Construct a diagonal CRS matrix from a list of elements of the same type."
-    n = len(elements)
-    crs = CRS(n, n, type(elements[0]))
-    for element in elements:
-        crs.push_row((element,))
-    return crs
-
-
-def rows_dict_to_crs(rows, num_rows, num_elements, dtype):
-    "Construct a CRS matrix from a dict mapping row index to row elements list."
-    crs = CRS(num_rows, num_elements, dtype)
-    for i in range(num_rows):
-        crs.push_row(rows.get(i, ()))
-    return crs
-
-
-def rows_to_crs(rows, num_rows, num_elements, dtype):
-    "Construct a CRS matrix from a list of row element lists."
-    crs = CRS(num_rows, num_elements, dtype)
-    for row in rows:
-        crs.push_row(row)
-    return crs
+        return "[%s]" % ('\n'.join(str(row) for row in self),)
+
+    @classmethod
+    def from_rows(cls, rows, num_rows, num_elements, dtype):
+        "Construct a CRSArray from a list of row element lists."
+        crs = CRSArray(num_rows, num_elements, dtype)
+        for row in rows:
+            crs.push_row(row)
+        return crs
diff --git a/uflacs/analysis/expr_shapes.py b/ffc/uflacs/analysis/expr_shapes.py
similarity index 87%
rename from uflacs/analysis/expr_shapes.py
rename to ffc/uflacs/analysis/expr_shapes.py
index ca1704b..3250fe9 100644
--- a/uflacs/analysis/expr_shapes.py
+++ b/ffc/uflacs/analysis/expr_shapes.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -22,13 +22,13 @@ The total shape is the regular shape tuple plus the index shape tuple.
 The index shape tuple is the tuple of index dimensions of the free indices
 of the expression, sorted by the count of the free indices.
 
-The total shape of a tensor valued expression A and A[*indices(len(A.ufl_shape))]
-is therefore the same.
+The total shape of a tensor valued expression ``A`` and
+``A[*indices(len(A.ufl_shape))]`` is therefore the same.
 """
 
 
 def compute_index_shape(v):
-    "Compute the 'index shape' of v."
+    """Compute the 'index shape' of v."""
     return v.ufl_index_dimensions
 
 
diff --git a/uflacs/analysis/factorization.py b/ffc/uflacs/analysis/factorization.py
similarity index 69%
rename from uflacs/analysis/factorization.py
rename to ffc/uflacs/analysis/factorization.py
index c5c7b6d..22dd5ea 100644
--- a/uflacs/analysis/factorization.py
+++ b/ffc/uflacs/analysis/factorization.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,8 +18,12 @@
 
 """Algorithms for factorizing argument dependent monomials."""
 
+import numpy
+
 from six import itervalues, iterkeys, iteritems
 from six.moves import xrange as range
+from itertools import chain
+
 from ufl import as_ufl, conditional
 from ufl.classes import Argument
 from ufl.classes import Division
@@ -27,12 +31,12 @@ from ufl.classes import Product
 from ufl.classes import Sum
 from ufl.classes import Conditional
 from ufl.classes import Zero
+from ufl.algorithms import extract_type
 
-from ffc.log import ffc_assert, error
+from ffc.log import error
 
-from uflacs.datastructures.arrays import int_array, object_array
-from uflacs.analysis.graph_dependencies import compute_dependencies
-from uflacs.analysis.modified_terminals import analyse_modified_terminal, strip_modified_terminal
+from ffc.uflacs.analysis.graph_dependencies import compute_dependencies
+from ffc.uflacs.analysis.modified_terminals import analyse_modified_terminal, strip_modified_terminal
 
 
 def _build_arg_sets(V):
@@ -62,17 +66,7 @@ def _build_argument_indices_from_arg_sets(V, arg_sets):
     def arg_ordering_key(i):
         "Return a key for sorting argument vertex indices based on the properties of the modified terminal."
         mt = analyse_modified_terminal(arg_ordering_key.V[i])
-        arg = mt.terminal
-        assert isinstance(arg, Argument)
-        assert arg.number() >= 0
-        return (arg.number(),
-                arg.part(),
-                mt.reference_value,
-                mt.component,
-                mt.global_derivatives,
-                mt.local_derivatives,
-                mt.restriction,
-                mt.averaged)
+        return mt.argument_ordering_key()
     arg_ordering_key.V = V
     ordered_arg_indices = sorted(arg_indices, key=arg_ordering_key)
 
@@ -89,7 +83,7 @@ def build_argument_indices(V):
 def build_argument_dependencies(dependencies, arg_indices):
     "Preliminary algorithm: build list of argument vertex indices each vertex (indirectly) depends on."
     n = len(dependencies)
-    A = [[] for i in range(n)]  # TODO: Use array
+    A = numpy.empty(n, dtype=object)
     for i, deps in enumerate(dependencies):
         argdeps = []
         for j in deps:
@@ -124,18 +118,18 @@ def add_to_fv(expr, FV, e2fi):
 noargs = {}
 
 
-def handle_modified_terminal(i, v, F, FV, e2fi, arg_indices, AV, sv2av):
+def handle_modified_terminal(si, v, SV_factors, FV, e2fi, arg_indices, AV, sv2av):
     # v is a modified terminal...
-    if i in arg_indices:
+    if si in arg_indices:
         # ... a modified Argument
-        argkey = (i,)
+        argkey = (si,)
         fi = None
 
         # Adding 1 as an expression allows avoiding special representation by representing "v" as "1*v"
         one = add_to_fv(as_ufl(1.0), FV, e2fi)
         factors = {argkey: one}
 
-        assert AV[sv2av[i]] == v
+        assert AV[sv2av[si]] == v
     else:
         # ... record a non-argument modified terminal
         factors = noargs
@@ -143,10 +137,11 @@ def handle_modified_terminal(i, v, F, FV, e2fi, arg_indices, AV, sv2av):
     return fi, factors
 
 
-def handle_sum(i, v, deps, F, FV, sv2fv, e2fi):
-    ffc_assert(len(deps) == 2, "Assuming binary sum here. This can be fixed if needed.")
-    fac0 = F[deps[0]]
-    fac1 = F[deps[1]]
+def handle_sum(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+    if len(deps) != 2:
+        error("Assuming binary sum here. This can be fixed if needed.")
+    fac0 = SV_factors[deps[0]]
+    fac1 = SV_factors[deps[1]]
 
     argkeys = sorted(set(iterkeys(fac0)) | set(iterkeys(fac1)))
 
@@ -155,7 +150,8 @@ def handle_sum(i, v, deps, F, FV, sv2fv, e2fi):
         fi = None
         factors = {}
         for argkey in argkeys:
-            ffc_assert(len(argkey) == keylen, "Expecting equal argument rank terms among summands.")
+            if len(argkey) != keylen:
+                error("Expecting equal argument rank terms among summands.")
 
             fi0 = fac0.get(argkey)
             fi1 = fac1.get(argkey)
@@ -176,10 +172,11 @@ def handle_sum(i, v, deps, F, FV, sv2fv, e2fi):
     return fi, factors
 
 
-def handle_product(i, v, deps, F, FV, sv2fv, e2fi):
-    ffc_assert(len(deps) == 2, "Assuming binary product here. This can be fixed if needed.")
-    fac0 = F[deps[0]]
-    fac1 = F[deps[1]]
+def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+    if len(deps) != 2:
+        error("Assuming binary product here. This can be fixed if needed.")
+    fac0 = SV_factors[deps[0]]
+    fac1 = SV_factors[deps[1]]
 
     if not fac0 and not fac1:  # non-arg * non-arg
         # Record non-argument product
@@ -222,9 +219,9 @@ def handle_product(i, v, deps, F, FV, sv2fv, e2fi):
     return fi, factors
 
 
-def handle_division(i, v, deps, F, FV, sv2fv, e2fi):
-    fac0 = F[deps[0]]
-    fac1 = F[deps[1]]
+def handle_division(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+    fac0 = SV_factors[deps[0]]
+    fac1 = SV_factors[deps[1]]
     assert not fac1, "Cannot divide by arguments."
 
     if fac0:  # arg / non-arg
@@ -244,10 +241,10 @@ def handle_division(i, v, deps, F, FV, sv2fv, e2fi):
     return fi, factors
 
 
-def handle_conditional(i, v, deps, F, FV, sv2fv, e2fi):
-    fac0 = F[deps[0]]
-    fac1 = F[deps[1]]
-    fac2 = F[deps[2]]
+def handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+    fac0 = SV_factors[deps[0]]
+    fac1 = SV_factors[deps[1]]
+    fac2 = SV_factors[deps[2]]
     assert not fac0, "Cannot have argument in condition."
 
     if not (fac1 or fac2):  # non-arg ? non-arg : non-arg
@@ -269,7 +266,7 @@ def handle_conditional(i, v, deps, F, FV, sv2fv, e2fi):
         factors = {}
 
         z = as_ufl(0.0)
-        zfi = add_to_fv(z, FV, e2fi)
+        zfi = add_to_fv(z, FV, e2fi)  # TODO: flake8 complains zfi is unused, is that ok?
 
         # In general, can decompose like this:
         #    conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui + sum_j conditional(c, 0, fj)*uj
@@ -284,9 +281,9 @@ def handle_conditional(i, v, deps, F, FV, sv2fv, e2fi):
     return fi, factors
 
 
-def handle_operator(i, v, deps, F, FV, sv2fv, e2fi):
+def handle_operator(si, v, deps, SV_factors, FV, sv2fv, e2fi):
     # Error checking
-    if any(F[deps[j]] for j in range(len(deps))):
+    if any(SV_factors[deps[j]] for j in range(len(deps))):
         error("Assuming that a {0} cannot be applied to arguments. If this is wrong please report a bug.".format(type(v)))
     # Record non-argument subexpression
     fi = add_to_fv(v, FV, e2fi)
@@ -294,7 +291,7 @@ def handle_operator(i, v, deps, F, FV, sv2fv, e2fi):
     return fi, factors
 
 
-def collect_argument_factors(SV, dependencies, arg_indices):
+def compute_argument_factorization(SV, SV_deps, SV_targets, rank):
     """Factorizes a scalar expression graph w.r.t. scalar Argument
     components.
 
@@ -320,56 +317,58 @@ def collect_argument_factors(SV, dependencies, arg_indices):
 
         This mapping represents the factorization of SV[-1] w.r.t. Arguments s.t.:
 
-          SV[-1] := sum(FV[fik] * product(AV[j] for j in aik) for aik, fik in IM.items())
+          SV[-1] := sum(FV[fik] * product(AV[ai] for ai in aik) for aik, fik in IM.items())
 
         where := means equivalence in the mathematical sense,
         of course in a different technical representation.
 
     """
     # Extract argument component subgraph
-    AV = [SV[j] for j in arg_indices]
-    av2sv = arg_indices
-    sv2av = dict((j, i) for i, j in enumerate(arg_indices))
-    assert all(AV[i] == SV[j] for i, j in enumerate(arg_indices))
-    assert all(AV[i] == SV[j] for j, i in iteritems(sv2av))
+    arg_indices = build_argument_indices(SV)
+    #A = build_argument_dependencies(SV_deps, arg_indices)
+    AV = [SV[si] for si in arg_indices]
+    #av2sv = arg_indices
+    sv2av = { si: ai for ai, si in enumerate(arg_indices) }
+    assert all(AV[ai] == SV[si] for ai, si in enumerate(arg_indices))
+    assert all(AV[ai] == SV[si] for si, ai in iteritems(sv2av))
 
     # Data structure for building non-argument factors
     FV = []
     e2fi = {}
 
     # Hack to later build dependencies for the FV entries that change K*K -> K**2
-    two = add_to_fv(as_ufl(2), FV, e2fi)
+    two = add_to_fv(as_ufl(2), FV, e2fi)  # FIXME: Might need something more robust here
 
     # Intermediate factorization for each vertex in SV on the format
-    # F[i] = None # if SV[i] does not depend on arguments
-    # F[i] = { argkey: fi } # if SV[i] does depend on arguments, where:
-    #   FV[fi] is the expression SV[i] with arguments factored out
-    #   argkey is a tuple with indices into SV for each of the argument components SV[i] depends on
-    # F[i] = { argkey1: fi1, argkey2: fi2, ... } # if SV[i] is a linear combination of multiple argkey configurations
-    F = object_array(len(SV))  # TODO: Use some CRS based format?
-    sv2fv = int_array(len(SV))
+    # SV_factors[si] = None # if SV[si] does not depend on arguments
+    # SV_factors[si] = { argkey: fi } # if SV[si] does depend on arguments, where:
+    #   FV[fi] is the expression SV[si] with arguments factored out
+    #   argkey is a tuple with indices into SV for each of the argument components SV[si] depends on
+    # SV_factors[si] = { argkey1: fi1, argkey2: fi2, ... } # if SV[si] is a linear combination of multiple argkey configurations
+    SV_factors = numpy.empty(len(SV), dtype=object)
+    sv2fv = numpy.zeros(len(SV), dtype=int)
 
     # Factorize each subexpression in order:
-    for i, v in enumerate(SV):
-        deps = dependencies[i]
+    for si, v in enumerate(SV):
+        deps = SV_deps[si]
 
-        # These handlers insert values in sv2fv and F
+        # These handlers insert values in sv2fv and SV_factors
         if not len(deps):
-            fi, factors = handle_modified_terminal(i, v, F, FV, e2fi, arg_indices, AV, sv2av)
+            fi, factors = handle_modified_terminal(si, v, SV_factors, FV, e2fi, arg_indices, AV, sv2av)
         elif isinstance(v, Sum):
-            fi, factors = handle_sum(i, v, deps, F, FV, sv2fv, e2fi)
+            fi, factors = handle_sum(si, v, deps, SV_factors, FV, sv2fv, e2fi)
         elif isinstance(v, Product):
-            fi, factors = handle_product(i, v, deps, F, FV, sv2fv, e2fi)
+            fi, factors = handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi)
         elif isinstance(v, Division):
-            fi, factors = handle_division(i, v, deps, F, FV, sv2fv, e2fi)
+            fi, factors = handle_division(si, v, deps, SV_factors, FV, sv2fv, e2fi)
         elif isinstance(v, Conditional):
-            fi, factors = handle_conditional(i, v, deps, F, FV, sv2fv, e2fi)
+            fi, factors = handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi)
         else:  # All other operators
-            fi, factors = handle_operator(i, v, deps, F, FV, sv2fv, e2fi)
+            fi, factors = handle_operator(si, v, deps, SV_factors, FV, sv2fv, e2fi)
 
         if fi is not None:
-            sv2fv[i] = fi
-        F[i] = factors
+            sv2fv[si] = fi
+        SV_factors[si] = factors
 
     assert not noargs, "This dict was not supposed to be filled with anything!"
 
@@ -377,18 +376,36 @@ def collect_argument_factors(SV, dependencies, arg_indices):
     # FV = FV[:len(e2fi)]
     assert len(FV) == len(e2fi)
 
-    # Get the factorization of the final value # TODO: Support simultaneous factorization of multiple integrands?
-    IM = F[-1]
-
-    # Map argkeys from indices into SV to indices into AV, and resort keys for canonical representation
-    IM = dict((tuple(sorted(sv2av[j] for j in argkey)), fi) for argkey, fi in iteritems(IM))
+    # Get the factorizations of the target values
+    IMs = []
+    for si in SV_targets:
+        if SV_factors[si] == {}:
+            if rank == 0:
+                # Functionals and expressions: store as no args * factor
+                factors = { (): sv2fv[si] }
+            else:
+                # Zero form of arity 1 or higher: make factors empty
+                factors = {}
+        else:
+            # Forms of arity 1 or higher:
+            # Map argkeys from indices into SV to indices into AV,
+            # and resort keys for canonical representation
+            factors = { tuple(sorted(sv2av[si] for si in argkey)): fi
+                        for argkey, fi in SV_factors[si].items() }
+        # Expecting all term keys to have length == rank
+        # (this assumption will eventually have to change if we
+        # implement joint bilinear+linear form factorization here)
+        assert all(len(k) == rank for k in factors)
+        IMs.append(factors)
+
+    # Recompute dependencies in FV
+    FV_deps = compute_dependencies(e2fi, FV)
 
-    # If this is a non-argument expression, point to the expression from IM (not sure if this is useful)
-    if any([not AV, not IM, not arg_indices]):
-        assert all([not AV, not IM, not arg_indices])
-        IM = {(): len(FV) - 1}
+    # Indices into FV that are needed for final result
+    FV_targets = list(chain(sorted(IM.values())
+                            for IM in IMs))
 
-    return FV, e2fi, AV, IM
+    return IMs, AV, FV, FV_deps, FV_targets
 
 
 def rebuild_scalar_graph_from_factorization(AV, FV, IM):
@@ -437,29 +454,3 @@ def rebuild_scalar_graph_from_factorization(AV, FV, IM):
     dependencies = compute_dependencies(se2i, SV)
 
     return SV, se2i, dependencies
-
-
-def compute_argument_factorization(SV, target_variables, dependencies):
-
-    # TODO: Use target_variables! Currently just assuming the last vertex is the target here...
-
-    if list(target_variables) != [len(SV) - 1]:
-        ffc_assert(not extract_type(SV[-1], Argument),
-                   "Multiple or nonscalar Argument dependent expressions not supported in factorization.")
-        AV = []
-        FV = SV
-        IM = {}
-        return AV, FV, IM, target_variables, dependencies
-
-    assert list(target_variables) == [len(SV) - 1]
-
-    arg_indices = build_argument_indices(SV)
-    #A = build_argument_dependencies(dependencies, arg_indices)
-    FV, e2fi, AV, IM = collect_argument_factors(SV, dependencies, arg_indices)
-
-    # Indices into FV that are needed for final result
-    target_variables = sorted(itervalues(IM))
-
-    dependencies = compute_dependencies(e2fi, FV)
-
-    return IM, AV, FV, target_variables, dependencies
diff --git a/uflacs/analysis/graph.py b/ffc/uflacs/analysis/graph.py
similarity index 88%
rename from uflacs/analysis/graph.py
rename to ffc/uflacs/analysis/graph.py
index 33544e0..021676b 100644
--- a/uflacs/analysis/graph.py
+++ b/ffc/uflacs/analysis/graph.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,8 +18,8 @@
 
 """Linearized data structure for the computational graph."""
 
-from uflacs.analysis.graph_vertices import build_graph_vertices
-from uflacs.analysis.graph_symbols import build_graph_symbols
+from ffc.uflacs.analysis.graph_vertices import build_graph_vertices
+from ffc.uflacs.analysis.graph_symbols import build_graph_symbols
 
 
 class Graph2(object):
diff --git a/uflacs/analysis/graph_dependencies.py b/ffc/uflacs/analysis/graph_dependencies.py
similarity index 79%
rename from uflacs/analysis/graph_dependencies.py
rename to ffc/uflacs/analysis/graph_dependencies.py
index c3b7259..4cc04ea 100644
--- a/uflacs/analysis/graph_dependencies.py
+++ b/ffc/uflacs/analysis/graph_dependencies.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -19,23 +19,20 @@
 """Tools for analysing dependencies within expression graphs."""
 
 import numpy
+
 from six.moves import xrange as range
-from ufl.classes import Terminal
 
-from uflacs.datastructures.types import sufficient_int_type, sufficient_uint_type
-from uflacs.datastructures.arrays import bool_array
-from uflacs.datastructures.arrays import object_array
-from uflacs.datastructures.crs import CRS, rows_to_crs
+from ffc.uflacs.analysis.crsarray import CRSArray
 
 
 def compute_dependencies(e2i, V, ignore_terminal_modifiers=True):
     # Use numpy int type sufficient to hold num_rows
     num_rows = len(V)
-    dtype = sufficient_int_type(num_rows)
+    itype = numpy.int16 if num_rows < 2**15 else numpy.int32
 
-    # Preallocate CRS matrix of sufficient capacity
+    # Preallocate CRSArray matrix of sufficient capacity
     num_nonzeros = sum(len(v.ufl_operands) for v in V)
-    dependencies = CRS(num_rows, num_nonzeros, dtype)
+    dependencies = CRSArray(num_rows, num_nonzeros, itype)
     for v in V:
         if v._ufl_is_terminal_ or (ignore_terminal_modifiers and v._ufl_is_terminal_modifier_):
             dependencies.push_row(())
@@ -49,7 +46,7 @@ def mark_active(dependencies, targets):
     """Return an array marking the recursive dependencies of targets.
 
     Input:
-    - dependencies - CRS of ints, a mapping from a symbol to the symbols of its dependencies.
+    - dependencies - CRSArray of ints, a mapping from a symbol to the symbols of its dependencies.
     - targets      - Sequence of symbols to mark the dependencies of.
 
     Output:
@@ -59,7 +56,7 @@ def mark_active(dependencies, targets):
     n = len(dependencies)
 
     # Initial state where nothing is marked as used
-    active = bool_array(n)
+    active = numpy.zeros(n, dtype=numpy.int8)
     num_used = 0
 
     # Seed with initially used symbols
@@ -79,7 +76,7 @@ def mark_image(inverse_dependencies, sources):
     """Return an array marking the set of symbols dependent on the sources.
 
     Input:
-    - dependencies - CRS of ints, a mapping from a symbol to the symbols of its dependencies.
+    - dependencies - CRSArray of ints, a mapping from a symbol to the symbols of its dependencies.
     - sources      - Sequence of symbols to mark the dependants of.
 
     Output:
@@ -89,7 +86,7 @@ def mark_image(inverse_dependencies, sources):
     n = len(inverse_dependencies)
 
     # Initial state where nothing is marked as used
-    image = bool_array(n)
+    image = numpy.zeros(n, dtype=numpy.int8)
     num_used = 0
 
     # Seed with initially used symbols
diff --git a/uflacs/analysis/graph_rebuild.py b/ffc/uflacs/analysis/graph_rebuild.py
similarity index 72%
rename from uflacs/analysis/graph_rebuild.py
rename to ffc/uflacs/analysis/graph_rebuild.py
index 3ce2a95..5361f57 100644
--- a/uflacs/analysis/graph_rebuild.py
+++ b/ffc/uflacs/analysis/graph_rebuild.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,8 +18,11 @@
 
 """Rebuilding UFL expressions from linearized representation of computational graph."""
 
+import numpy
+
 from six.moves import zip
 from six.moves import xrange as range
+
 from ufl import product
 from ufl.permutation import compute_indices
 
@@ -27,11 +30,9 @@ from ufl import as_vector
 from ufl.classes import MultiIndex, IndexSum, Product
 from ufl.corealg.multifunction import MultiFunction
 from ufl.utils.indexflattening import flatten_multiindex, shape_to_strides
-from ufl.utils.sorting import sorted_by_count
 
-from ffc.log import error, ffc_assert
-from uflacs.datastructures.arrays import object_array
-from uflacs.analysis.modified_terminals import is_modified_terminal
+from ffc.log import error
+from ffc.uflacs.analysis.modified_terminals import is_modified_terminal
 
 
 class ReconstructScalarSubexpressions(MultiFunction):
@@ -60,7 +61,8 @@ class ReconstructScalarSubexpressions(MultiFunction):
     variable = unexpected
 
     def scalar_nary(self, o, ops):
-        ffc_assert(o.ufl_shape == (), "Expecting scalar.")
+        if o.ufl_shape != ():
+            error("Expecting scalar.")
         sops = [op[0] for op in ops]
         return [o._ufl_expr_reconstruct_(*sops)]
 
@@ -83,36 +85,39 @@ class ReconstructScalarSubexpressions(MultiFunction):
         # A condition can be non scalar
         symbols = []
         n = len(ops[1])
-        ffc_assert(len(ops[0]) == 1, "Condition should be scalar.")
-        ffc_assert(n == len(ops[2]), "Conditional branches should have same shape.")
+        if len(ops[0]) != 1:
+            error("Condition should be scalar.")
+        if n != len(ops[2]):
+            error("Conditional branches should have same shape.")
         for i in range(len(ops[1])):
             sops = (ops[0][0], ops[1][i], ops[2][i])
             symbols.append(o._ufl_expr_reconstruct_(*sops))
         return symbols
 
     def division(self, o, ops):
-        ffc_assert(len(ops) == 2, "Expecting two operands.")
-        ffc_assert(len(ops[1]) == 1, "Expecting scalar divisor.")
+        if len(ops) != 2:
+            error("Expecting two operands.")
+        if len(ops[1]) != 1:
+            error("Expecting scalar divisor.")
         b, = ops[1]
         return [o._ufl_expr_reconstruct_(a, b) for a in ops[0]]
 
     def sum(self, o, ops):
-        ffc_assert(len(ops) == 2, "Expecting two operands.")
-        ffc_assert(len(ops[0]) == len(ops[1]), "Expecting scalar divisor.")
+        if len(ops) != 2:
+            error("Expecting two operands.")
+        if len(ops[0]) != len(ops[1]):
+            error("Expecting scalar divisor.")
         return [o._ufl_expr_reconstruct_(a, b) for a, b in zip(ops[0], ops[1])]
 
     def product(self, o, ops):
-        ffc_assert(len(ops) == 2, "Expecting two operands.")
+        if len(ops) != 2:
+            error("Expecting two operands.")
 
         # Get the simple cases out of the way
-        na = len(ops[0])
-        nb = len(ops[1])
-
-        if na == 1:  # True scalar * something
+        if len(ops[0]) == 1:  # True scalar * something
             a, = ops[0]
             return [Product(a, b) for b in ops[1]]
-
-        if nb == 1:  # Something * true scalar
+        elif len(ops[1]) == 1:  # Something * true scalar
             b, = ops[1]
             return [Product(a, b) for a in ops[0]]
 
@@ -161,7 +166,8 @@ class ReconstructScalarSubexpressions(MultiFunction):
         # flattened total component of indexsum o by removing
         # axis corresponding to summation index ii.
         ss = ops[0]  # Scalar subexpressions of summand
-        ffc_assert(len(ss) == predim * postdim * d, "Mismatching number of subexpressions.")
+        if len(ss) != predim * postdim * d:
+            error("Mismatching number of subexpressions.")
         sops = []
         for i in range(predim):
             iind = i * (postdim * d)
@@ -170,7 +176,7 @@ class ReconstructScalarSubexpressions(MultiFunction):
                 sops.append([ss[ind + j * postdim] for j in range(d)])
 
         # For each scalar output component, sum over collected subcomponents
-        # TODO: Need to split this into binary additions to work with future CRS format,
+        # TODO: Need to split this into binary additions to work with future CRSArray format,
         #       i.e. emitting more expressions than there are symbols for this node.
         results = [sum(sop) for sop in sops]
         return results
@@ -192,7 +198,7 @@ def rebuild_expression_from_graph(G):
         return as_vector(w)  # TODO: Consider shape of initial v
 
 
-def rebuild_with_scalar_subexpressions(G):
+def rebuild_with_scalar_subexpressions(G, targets=None):
     """Build a new expression2index mapping where each subexpression is scalar valued.
 
     Input:
@@ -221,11 +227,10 @@ def rebuild_with_scalar_subexpressions(G):
     reconstruct_scalar_subexpressions = ReconstructScalarSubexpressions()
 
     # Array to store the scalar subexpression in for each symbol
-    W = object_array(G.total_unique_symbols)
+    W = numpy.empty(G.total_unique_symbols, dtype=object)
 
     # Iterate over each graph node in order
     for i, v in enumerate(G.V):
-
         # Find symbols of v components
         vs = G.V_symbols[i]
 
@@ -234,36 +239,38 @@ def rebuild_with_scalar_subexpressions(G):
             continue
 
         if is_modified_terminal(v):
-
-            # ffc_assert(v.ufl_free_indices == (), "Expecting no free indices.")
-
+            # if v.ufl_free_indices:
+            #     error("Expecting no free indices.")
             sh = v.ufl_shape
-
             if sh:
-                # Store each terminal expression component (we may not actually need all of these later!)
+                # Store each terminal expression component.
+                # We may not actually need all of these later,
+                # but that will be optimized away.
+                # Note: symmetries will be dealt with in the value numbering.
                 ws = [v[c] for c in compute_indices(sh)]
-                # FIXME: How does this fit in with modified terminals with symmetries?
-
             else:
                 # Store single modified terminal expression component
-                ffc_assert(len(vs) == 1, "Expecting single symbol for scalar valued modified terminal.")
+                if len(vs) != 1:
+                    error("Expecting single symbol for scalar valued modified terminal.")
                 ws = [v]
-
+            # FIXME: Replace ws[:] with 0's if its table is empty
+            # Possible redesign: loop over modified terminals only first,
+            # then build tables for them, set W[s] = 0.0 for modified terminals with zero table,
+            # then loop over non-(modified terminal)s to reconstruct expression.
         else:
-
             # Find symbols of operands
             sops = []
             for j, vop in enumerate(v.ufl_operands):
-                if isinstance(vop, MultiIndex):  # TODO: Store MultiIndex in G.V and allocate a symbol to it for this to work
+                if isinstance(vop, MultiIndex):
+                    # TODO: Store MultiIndex in G.V and allocate a symbol to it for this to work
                     if not isinstance(v, IndexSum):
                         error("Not expecting a %s." % type(v))
-                    so = ()
+                    sops.append(())
                 else:
-                    k = G.e2i[vop]
-                    # TODO: Build edge datastructure and use this instead?
+                    # TODO: Build edge datastructure and use instead?
                     # k = G.E[i][j]
-                    so = G.V_symbols[k]
-                sops.append(so)
+                    k = G.e2i[vop]
+                    sops.append(G.V_symbols[k])
 
             # Fetch reconstructed operand expressions
             wops = [tuple(W[k] for k in so) for so in sops]
@@ -272,18 +279,32 @@ def rebuild_with_scalar_subexpressions(G):
             ws = reconstruct_scalar_subexpressions(v, wops)
 
             # Store all scalar subexpressions for v symbols
-            ffc_assert(len(vs) == len(ws), "Expecting one symbol for each expression.")
+            if len(vs) != len(ws):
+                error("Expecting one symbol for each expression.")
 
         # Store each new scalar subexpression in W at the index of its symbol
+        handled = set()
         for s, w in zip(vs, ws):
-            W[s] = w
-
-    # Find symbols of final v from input graph
-    vs = G.V_symbols[G.nv - 1]  # TODO: This is easy to extend to multiple 'final v'
-
-    # Sanity check: assert that we've handled these symbols
-    ffc_assert(all(W[s] is not None for s in vs),
-               "Expecting that all symbols in vs are handled at this point.")
+            if W[s] is None:
+                W[s] = w
+                handled.add(s)
+            else:
+                assert s in handled  # Result of symmetry!
+
+    # Find symbols of requested targets or final v from input graph
+    if targets is None:
+        targets = [G.V[-1]]
+
+    # Attempt to extend this to multiple target expressions
+    scalar_target_expressions = []
+    for target in targets:
+        ti = G.e2i[target]
+        vs = G.V_symbols[ti]
+        # Sanity check: assert that we've handled these symbols
+        if any(W[s] is None for s in vs):
+            error("Expecting that all symbols in vs are handled at this point.")
+        scalar_target_expressions.append([W[s] for s in vs])
 
     # Return the scalar expressions for each of the components
-    return [W[s] for s in vs]
+    assert len(scalar_target_expressions) == 1  # TODO: Currently expected by callers, fix those first
+    return scalar_target_expressions[0]  # ... TODO: then return list
diff --git a/uflacs/analysis/graph_ssa.py b/ffc/uflacs/analysis/graph_ssa.py
similarity index 92%
rename from uflacs/analysis/graph_ssa.py
rename to ffc/uflacs/analysis/graph_ssa.py
index 8c33c43..36a7cd2 100644
--- a/uflacs/analysis/graph_ssa.py
+++ b/ffc/uflacs/analysis/graph_ssa.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,15 +18,18 @@
 
 """Algorithms for working with computational graphs."""
 
+import numpy
+
 from six.moves import xrange as range
+
 from ufl.classes import (GeometricQuantity, ConstantValue,
                          Argument, Coefficient,
                          Grad, Restricted, Indexed,
                          MathFunction)
 from ufl.checks import is_cellwise_constant
 from ffc.log import error
-from uflacs.datastructures.arrays import int_array
-from uflacs.datastructures.crs import rows_to_crs
+
+from ffc.uflacs.analysis.crsarray import CRSArray
 
 
 def default_partition_seed(expr, rank):
@@ -76,7 +79,7 @@ def mark_partitions(V, active, dependencies, rank,
     Input:
     - V            - Array of expressions.
     - active       - Boolish array.
-    - dependencies - CRS with V dependencies.
+    - dependencies - CRSArray with V dependencies.
     - partition_seed - Policy for determining the partition of a terminalish.
     - partition_combiner - Policy for determinging the partition of an operator.
 
@@ -86,7 +89,7 @@ def mark_partitions(V, active, dependencies, rank,
     n = len(V)
     assert len(active) == n
     assert len(dependencies) == n
-    partitions = int_array(n)
+    partitions = numpy.zeros(n, dtype=int)
     for i, v in enumerate(V):
         deps = dependencies[i]
         if active[i]:
@@ -124,7 +127,7 @@ def build_factorized_partitions():
 def compute_dependency_count(dependencies):
     """FIXME: Test"""
     n = len(dependencies)
-    depcount = int_array(n)
+    depcount = numpy.zeros(n, dtype=int)
     for i in range(n):
         for d in dependencies[i]:
             depcount[d] += 1
@@ -139,7 +142,7 @@ def invert_dependencies(dependencies, depcount):
     for i in range(n):
         for d in dependencies[i]:
             invdeps[d] = invdeps[d] + (i,)
-    return rows_to_crs(invdeps, n, m, int)
+    return CRSArray.from_rows(invdeps, n, m, int)
 
 
 def default_cache_score_policy(vtype, ndeps, ninvdeps, partition):
@@ -171,7 +174,7 @@ def compute_cache_scores(V, active, dependencies, inverse_dependencies, partitio
     TODO: Experiment with heuristics later when we have functional code generation.
     """
     n = len(V)
-    score = int_array(n)
+    score = numpy.zeros(n, dtype=int)
     for i, v in enumerate(V):
         if active[i]:
             deps = dependencies[i]
@@ -203,9 +206,9 @@ def allocate_registers(active, partitions, targets,
     num_targets = len(targets)
 
     # Analyse scores
-    min_score = min(scores)
-    max_score = max(scores)
-    mean_score = sum(scores) // n
+    #min_score = min(scores)
+    #max_score = max(scores)
+    #mean_score = sum(scores) // n
 
     # Can allocate a number of registers up to given threshold
     num_to_allocate = max(num_targets,
@@ -237,16 +240,16 @@ def allocate_registers(active, partitions, targets,
     assert registers_used <= max(max_registers, len(targets))
 
     # Mark allocations
-    allocations = int_array(n)
+    allocations = numpy.zeros(n, dtype=int)
     allocations[:] = -1
     for r, i in enumerate(sorted(to_allocate)):
         allocations[i] = r
 
     # Possible data structures for improved register allocations
-    # register_status = int_array(max_registers)
+    # register_status = numpy.zeros(max_registers, dtype=int)
 
     # Stack/set of free registers (should wrap in stack abstraction):
-    # free_registers = int_array(max_registers)
+    # free_registers = numpy.zeros(max_registers, dtype=int)
     # num_free_registers = max_registers
     # free_registers[:] = reversed(xrange(max_registers))
 
diff --git a/uflacs/analysis/graph_symbols.py b/ffc/uflacs/analysis/graph_symbols.py
similarity index 76%
rename from uflacs/analysis/graph_symbols.py
rename to ffc/uflacs/analysis/graph_symbols.py
index 6d390c0..df504ba 100644
--- a/uflacs/analysis/graph_symbols.py
+++ b/ffc/uflacs/analysis/graph_symbols.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,13 +18,12 @@
 
 """Assigning symbols to computational graph nodes."""
 
+import numpy
 from ufl import product
 
-
-from uflacs.datastructures.arrays import int_array, object_array
-from uflacs.datastructures.crs import CRS, rows_to_crs
-from uflacs.analysis.valuenumbering import ValueNumberer
-from uflacs.analysis.expr_shapes import total_shape
+from ffc.uflacs.analysis.crsarray import CRSArray
+from ffc.uflacs.analysis.valuenumbering import ValueNumberer
+from ffc.uflacs.analysis.expr_shapes import total_shape
 
 
 def build_node_shapes(V):
@@ -32,29 +31,29 @@ def build_node_shapes(V):
 
     V is an array of ufl expressions, possibly nonscalar and with free indices.
 
-    Returning a CRS where row i is the total shape of V[i].
+    Returning a CRSArray where row i is the total shape of V[i].
     """
-    # Dimensions of returned CRS
+    # Dimensions of returned CRSArray
     nv = len(V)
     k = 0
 
     # Store shapes intermediately in an array of tuples
-    V_shapes = object_array(nv)
+    V_shapes = numpy.empty(nv, dtype=object)
     for i, v in enumerate(V):
         # Compute total shape of V[i]
         tsh = total_shape(v)
         V_shapes[i] = tsh
-        # Count number of elements for CRS representation
+        # Count number of elements for CRSArray representation
         k += len(tsh)
 
-    # Return a more memory efficient CRS representation
-    return rows_to_crs(V_shapes, nv, k, int)
+    # Return a more memory efficient CRSArray representation
+    return CRSArray.from_rows(V_shapes, nv, k, int)
 
 
 def build_node_sizes(V_shapes):
     "Compute all the products of a sequence of shapes."
     nv = len(V_shapes)
-    V_sizes = int_array(nv)
+    V_sizes = numpy.zeros(nv, dtype=int)
     for i, sh in enumerate(V_shapes):
         V_sizes[i] = product(sh)
     return V_sizes
@@ -64,14 +63,14 @@ def build_node_symbols(V, e2i, V_shapes, V_sizes):
     """Tabulate scalar value numbering of all nodes in a a list based representation of an expression graph.
 
     Returns:
-    V_symbols - CRS of symbols (value numbers) of each component of each node in V.
+    V_symbols - CRSArray of symbols (value numbers) of each component of each node in V.
     total_unique_symbols - The number of symbol values assigned to unique scalar components of the nodes in V.
     """
     # "Sparse" int matrix for storing variable number of entries (symbols) per row (vertex),
     # with a capasity bounded by the number of scalar subexpressions including repetitions
-    V_symbols = CRS(len(V), sum(V_sizes), int)
+    V_symbols = CRSArray(len(V), sum(V_sizes), int)
 
-    # Visit each node with value numberer algorithm, storing the result for each as a row in the V_symbols CRS
+    # Visit each node with value numberer algorithm, storing the result for each as a row in the V_symbols CRSArray
     value_numberer = ValueNumberer(e2i, V_sizes, V_symbols)
     for i, v in enumerate(V):
         V_symbols.push_row(value_numberer(v, i))
@@ -89,8 +88,8 @@ def build_graph_symbols(V, e2i, DEBUG):
     """Tabulate scalar value numbering of all nodes in a a list based representation of an expression graph.
 
     Returns:
-    V_shapes - CRS of the total shapes of nodes in V.
-    V_symbols - CRS of symbols (value numbers) of each component of each node in V.
+    V_shapes - CRSArray of the total shapes of nodes in V.
+    V_symbols - CRSArray of symbols (value numbers) of each component of each node in V.
     total_unique_symbols - The number of symbol values assigned to unique scalar components of the nodes in V.
     """
     # Compute the total shape (value shape x index dimensions) for each node
diff --git a/uflacs/analysis/graph_vertices.py b/ffc/uflacs/analysis/graph_vertices.py
similarity index 92%
rename from uflacs/analysis/graph_vertices.py
rename to ffc/uflacs/analysis/graph_vertices.py
index 209d324..23327d8 100644
--- a/uflacs/analysis/graph_vertices.py
+++ b/ffc/uflacs/analysis/graph_vertices.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -18,12 +18,13 @@
 
 """Algorithms for working with graphs."""
 
+import numpy
+
 from six import iteritems
 
-from ufl.classes import Terminal, MultiIndex, Label
+from ufl.classes import MultiIndex, Label
 
-from uflacs.datastructures.arrays import object_array
-from uflacs.analysis.modified_terminals import is_modified_terminal
+from ffc.uflacs.analysis.modified_terminals import is_modified_terminal
 
 
 def count_nodes_with_unique_post_traversal(expr, e2i=None, skip_terminal_modifiers=False):
@@ -59,7 +60,7 @@ def count_nodes_with_unique_post_traversal(expr, e2i=None, skip_terminal_modifie
 
 def build_array_from_counts(e2i):
     nv = len(e2i)
-    V = object_array(nv)
+    V = numpy.empty(nv, dtype=object)
     for e, i in iteritems(e2i):
         V[i] = e
     return V
diff --git a/ffc/uflacs/analysis/indexing.py b/ffc/uflacs/analysis/indexing.py
new file mode 100644
index 0000000..f0f6e96
--- /dev/null
+++ b/ffc/uflacs/analysis/indexing.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+
+"""Algorithms for working with multiindices."""
+
+from ufl import product
+from ufl.permutation import compute_indices
+from ufl.utils.indexflattening import shape_to_strides, flatten_multiindex
+from ufl.classes import ComponentTensor, FixedIndex, Index, Indexed
+
+
+def map_indexed_arg_components(indexed):
+    """Build integer list mapping between flattended components
+    of indexed expression and its underlying tensor-valued subexpression."""
+
+    assert isinstance(indexed, Indexed)
+
+    # AKA indexed = tensor[multiindex]
+    tensor, multiindex = indexed.ufl_operands
+
+    # AKA e1 = e2[multiindex]
+    # (this renaming is historical, but kept for consistency with all the variables *1,*2 below)
+    e2 = tensor
+    e1 = indexed
+
+    # Get tensor and index shape
+    sh1 = e1.ufl_shape
+    sh2 = e2.ufl_shape
+    fi1 = e1.ufl_free_indices
+    fi2 = e2.ufl_free_indices
+    fid1 = e1.ufl_index_dimensions
+    fid2 = e2.ufl_index_dimensions
+
+    # Compute regular and total shape
+    tsh1 = sh1 + fid1
+    tsh2 = sh2 + fid2
+    # r1 = len(tsh1)
+    r2 = len(tsh2)
+    # str1 = shape_to_strides(tsh1)
+    str2 = shape_to_strides(tsh2)
+    assert not sh1
+    assert sh2  # Must have shape to be indexed in the first place
+    assert product(tsh1) <= product(tsh2)
+
+    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
+    ind2_to_ind1_map = [None] * len(fi2)
+    for k, i in enumerate(fi2):
+        ind2_to_ind1_map[k] = fi1.index(i)
+
+    # Build map from fi1/fid1 position to mi position
+    nmui = len(multiindex)
+    multiindex_to_ind1_map = [None] * nmui
+    for k, i in enumerate(multiindex):
+        if isinstance(i, Index):
+            multiindex_to_ind1_map[k] = fi1.index(i.count())
+
+    # Build map from flattened e1 component to flattened e2 component
+    perm1 = compute_indices(tsh1)
+    ni1 = product(tsh1)
+
+    # Situation: e1 = e2[mi]
+    d1 = [None] * ni1
+    p2 = [None] * r2
+    assert len(sh2) == nmui
+    for k, i in enumerate(multiindex):
+        if isinstance(i, FixedIndex):
+            p2[k] = int(i)
+    for c1, p1 in enumerate(perm1):
+        for k, i in enumerate(multiindex):
+            if isinstance(i, Index):
+                p2[k] = p1[multiindex_to_ind1_map[k]]
+        for k, i in enumerate(ind2_to_ind1_map):
+            p2[nmui + k] = p1[i]
+        c2 = flatten_multiindex(p2, str2)
+        d1[c1] = c2
+
+    # Consistency checks
+    assert all(isinstance(x, int) for x in d1)
+    assert len(set(d1)) == len(d1)
+    return d1
+
+
+def map_component_tensor_arg_components(tensor):
+    """Build integer list mapping between flattended components
+    of tensor and its underlying indexed subexpression."""
+
+    assert isinstance(tensor, ComponentTensor)
+
+    # AKA tensor = as_tensor(indexed, multiindex)
+    indexed, multiindex = tensor.ufl_operands
+
+    e1 = indexed
+    e2 = tensor  # e2 = as_tensor(e1, multiindex)
+    mi = [i for i in multiindex if isinstance(i, Index)]
+
+    # Get tensor and index shapes
+    sh1 = e1.ufl_shape  # (sh)ape of e1
+    sh2 = e2.ufl_shape  # (sh)ape of e2
+    fi1 = e1.ufl_free_indices  # (f)ree (i)ndices of e1
+    fi2 = e2.ufl_free_indices  # ...
+    fid1 = e1.ufl_index_dimensions  # (f)ree (i)ndex (d)imensions of e1
+    fid2 = e2.ufl_index_dimensions  # ...
+
+    # Compute total shape (tsh) of e1 and e2
+    tsh1 = sh1 + fid1
+    tsh2 = sh2 + fid2
+    r1 = len(tsh1)  # 'total rank' or e1
+    r2 = len(tsh2)  # ...
+    str1 = shape_to_strides(tsh1)
+    assert not sh1
+    assert sh2
+    assert len(mi) == len(multiindex)
+    assert product(tsh1) == product(tsh2)
+    assert fi1
+
+    assert all(i in fi1 for i in fi2)
+
+    nmui = len(multiindex)
+    assert nmui == len(sh2)
+
+    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
+    p2_to_p1_map = [None] * r2
+    for k, i in enumerate(fi2):
+        p2_to_p1_map[k + nmui] = fi1.index(i)
+
+    # Build map from fi1/fid1 position to mi position
+    for k, i in enumerate(mi):
+        p2_to_p1_map[k] = fi1.index(mi[k].count())
+
+    # Build map from flattened e1 component to flattened e2 component
+    perm2 = compute_indices(tsh2)
+    ni2 = product(tsh2)
+
+    # Situation: e2 = as_tensor(e1, mi)
+    d2 = [None] * ni2
+    p1 = [None] * r1
+    for c2, p2 in enumerate(perm2):
+        for k2, k1 in enumerate(p2_to_p1_map):
+            p1[k1] = p2[k2]
+        c1 = flatten_multiindex(p1, str1)
+        d2[c2] = c1
+
+    # Consistency checks
+    assert all(isinstance(x, int) for x in d2)
+    assert len(set(d2)) == len(d2)
+    return d2
+
diff --git a/uflacs/analysis/modified_terminals.py b/ffc/uflacs/analysis/modified_terminals.py
similarity index 54%
rename from uflacs/analysis/modified_terminals.py
rename to ffc/uflacs/analysis/modified_terminals.py
index d4fb299..ea16072 100644
--- a/uflacs/analysis/modified_terminals.py
+++ b/ffc/uflacs/analysis/modified_terminals.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -21,16 +21,17 @@
 from __future__ import print_function # used in some debugging
 
 from six.moves import zip
+
 from ufl.permutation import build_component_numbering
-from ufl.classes import (Terminal, FormArgument,
+from ufl.classes import (FormArgument, Argument,
                          Indexed, FixedIndex,
+                         SpatialCoordinate, Jacobian,
                          ReferenceValue,
                          Grad, ReferenceGrad,
                          Restricted,
                          FacetAvg, CellAvg)
 
 from ffc.log import error
-from ffc.log import ffc_assert
 
 
 class ModifiedTerminal(object):
@@ -40,48 +41,95 @@ class ModifiedTerminal(object):
     The variables of this class are:
 
         expr - The original UFL expression
-
         terminal           - the underlying Terminal object
+
         global_derivatives - tuple of ints, each meaning derivative in that global direction
         local_derivatives  - tuple of ints, each meaning derivative in that local direction
         reference_value    - bool, whether this is represented in reference frame
         averaged           - None, 'facet' or 'cell'
         restriction        - None, '+' or '-'
+
         component          - tuple of ints, the global component of the Terminal
         flat_component     - single int, flattened local component of the Terminal, considering symmetry
 
-    """
 
-    def __init__(self, expr, terminal, global_derivatives, local_derivatives, averaged,
-                 restriction, component, flat_component, reference_value):
+        Possibly other component model:
+        - global_component
+        - reference_component
+        - flat_component
+
+    """
+    def __init__(self, expr, terminal, reference_value,
+                 base_shape, base_symmetry,
+                 component, flat_component,
+                 global_derivatives, local_derivatives,
+                 averaged, restriction):
         # The original expression
         self.expr = expr
 
         # The underlying terminal expression
         self.terminal = terminal
 
-        # Components
+        # Are we seeing the terminal in physical or reference frame
         self.reference_value = reference_value
+
+        # Get the shape of the core terminal or its reference value,
+        # this is the shape that component and flat_component refers to
+        self.base_shape = base_shape
+        self.base_symmetry = base_symmetry
+
+        # Components
         self.component = component
         self.flat_component = flat_component
-        self.restriction = restriction
 
         # Derivatives
         self.global_derivatives = global_derivatives
         self.local_derivatives = local_derivatives
 
-        # Evaluation method (alternative: { None, 'facet_midpoint', 'cell_midpoint', 'facet_avg', 'cell_avg' })
+        # Evaluation method (alternatives: { None, 'facet_midpoint',
+        #  'cell_midpoint', 'facet_avg', 'cell_avg' })
         self.averaged = averaged
 
+        # Restriction to one cell or the other for interior facet integrals
+        self.restriction = restriction
+
     def as_tuple(self):
+        """Return a tuple with hashable values that uniquely identifies this modified terminal.
+
+        Some of the derived variables can be omitted here as long as
+        they are fully determined from the variables that are included here.
+        """
+        t = self.terminal  # FIXME: Terminal is not sortable...
+        rv = self.reference_value
+        #bs = self.base_shape 
+        #bsy = self.base_symmetry
+        #c = self.component
+        fc = self.flat_component
+        gd = self.global_derivatives
+        ld = self.local_derivatives
+        a = self.averaged
+        r = self.restriction
+        return (t, rv, fc, gd, ld, a, r)
+
+    def argument_ordering_key(self):
+        """Return a key for deterministic sorting of argument vertex
+        indices based on the properties of the modified terminal.
+        Used in factorization but moved here for closeness with ModifiedTerminal attributes."""
         t = self.terminal
-        c = self.component
+        assert isinstance(t, Argument)
+        n = t.number()
+        assert n >= 0
+        p = t.part()
         rv = self.reference_value
+        #bs = self.base_shape
+        #bsy = self.base_symmetry
+        #c = self.component
+        fc = self.flat_component
         gd = self.global_derivatives
         ld = self.local_derivatives
         a = self.averaged
         r = self.restriction
-        return (t, rv, c, gd, ld, a, r)
+        return (n, p, rv, fc, gd, ld, a, r)
 
     def __hash__(self):
         return hash(self.as_tuple())
@@ -89,8 +137,11 @@ class ModifiedTerminal(object):
     def __eq__(self, other):
         return isinstance(other, ModifiedTerminal) and self.as_tuple() == other.as_tuple()
 
-    def __lt__(self, other):
-        return self.as_tuple() < other.as_tuple()
+    #def __lt__(self, other):
+    #    error("Shouldn't use this?")
+    #    # FIXME: Terminal is not sortable, so the as_tuple contents
+    #    # must be changed for this to work properly
+    #    return self.as_tuple() < other.as_tuple()
 
     def __str__(self):
         s = []
@@ -112,6 +163,7 @@ def is_modified_terminal(v):
             return False
     return True
 
+
 def strip_modified_terminal(v):
     "Extract core Terminal from a modified terminal or return None."
     while not v._ufl_is_terminal_:
@@ -123,12 +175,16 @@ def strip_modified_terminal(v):
 
 
 def analyse_modified_terminal(expr):
-    """Analyse a so-called 'modified terminal' expression and return its properties in more compact form.
+    """Analyse a so-called 'modified terminal' expression.
+
+    Return its properties in more compact form as a ModifiedTerminal object.
 
-    A modified terminal expression is an object of a Terminal subtype, wrapped in terminal modifier types.
+    A modified terminal expression is an object of a Terminal subtype,
+    wrapped in terminal modifier types.
 
     The wrapper types can include 0-* Grad or ReferenceGrad objects,
-    and 0-1 ReferenceValue, 0-1 Restricted, 0-1 Indexed, and 0-1 FacetAvg or CellAvg objects.
+    and 0-1 ReferenceValue, 0-1 Restricted, 0-1 Indexed,
+    and 0-1 FacetAvg or CellAvg objects.
     """
     # Data to determine
     component = None
@@ -142,42 +198,58 @@ def analyse_modified_terminal(expr):
     t = expr
     while not t._ufl_is_terminal_:
         if isinstance(t, Indexed):
-            ffc_assert(component is None, "Got twice indexed terminal.")
+            if component is not None:
+                error("Got twice indexed terminal.")
+
             t, i = t.ufl_operands
-            ffc_assert(all(isinstance(j, FixedIndex) for j in i), "Expected only fixed indices.")
             component = [int(j) for j in i]
 
+            if not all(isinstance(j, FixedIndex) for j in i):
+                error("Expected only fixed indices.")
+
         elif isinstance(t, ReferenceValue):
-            ffc_assert(reference_value is None, "Got twice pulled back terminal!")
-            reference_value = True
+            if reference_value is not None:
+                error("Got twice pulled back terminal!")
+
             t, = t.ufl_operands
+            reference_value = True
 
         elif isinstance(t, ReferenceGrad):
-            ffc_assert(len(component), "Got local gradient of terminal without prior indexing.")
+            if not component:  # covers None or ()
+                error("Got local gradient of terminal without prior indexing.")
+
+            t, = t.ufl_operands
             local_derivatives.append(component[-1])
             component = component[:-1]
-            t, = t.ufl_operands
 
         elif isinstance(t, Grad):
-            ffc_assert(len(component), "Got gradient of terminal without prior indexing.")
+            if not component:  # covers None or ()
+                error("Got local gradient of terminal without prior indexing.")
+
+            t, = t.ufl_operands
             global_derivatives.append(component[-1])
             component = component[:-1]
-            t, = t.ufl_operands
 
         elif isinstance(t, Restricted):
-            ffc_assert(restriction is None, "Got twice restricted terminal!")
+            if restriction is not None:
+                error("Got twice restricted terminal!")
+
             restriction = t._side
             t, = t.ufl_operands
 
         elif isinstance(t, CellAvg):
-            ffc_assert(averaged is None, "Got twice averaged terminal!")
-            averaged = "cell"
+            if averaged is not None:
+                error("Got twice averaged terminal!")
+
             t, = t.ufl_operands
+            averaged = "cell"
 
         elif isinstance(t, FacetAvg):
-            ffc_assert(averaged is None, "Got twice averaged terminal!")
-            averaged = "facet"
+            if averaged is not None:
+                error("Got twice averaged terminal!")
+
             t, = t.ufl_operands
+            averaged = "facet"
 
         elif t._ufl_terminal_modifiers_:
             error("Missing handler for terminal modifier type %s, object is %s." % (type(t), repr(t)))
@@ -185,6 +257,7 @@ def analyse_modified_terminal(expr):
         else:
             error("Unexpected type %s object %s." % (type(t), repr(t)))
 
+
     # Make canonical representation of derivatives
     global_derivatives = tuple(sorted(global_derivatives))
     local_derivatives = tuple(sorted(local_derivatives))
@@ -195,8 +268,16 @@ def analyse_modified_terminal(expr):
     #    reference_value = True
 
     # Make reference_value true or false
-    if reference_value is None:
-        reference_value = False
+    reference_value = reference_value or False
+
+    # Consistency check
+    if isinstance(t, (SpatialCoordinate, Jacobian)):
+        pass
+    else:
+        if local_derivatives and not reference_value:
+            error("Local derivatives of non-local value is not legal.")
+        if global_derivatives and reference_value:
+            error("Global derivatives of local value is not legal.")
 
     # Make sure component is an integer tuple
     if component is None:
@@ -204,42 +285,34 @@ def analyse_modified_terminal(expr):
     else:
         component = tuple(component)
 
-    # Get the (reference or global) shape of the core terminal
-    if reference_value:
-        tshape = t.ufl_element().reference_value_shape()
+    # Get the shape of the core terminal or its reference value,
+    # this is the shape that component refers to
+    if isinstance(t, FormArgument):
+        element = t.ufl_element()
+        if reference_value:
+            # Ignoring symmetry, assuming already applied in conversion to reference frame
+            base_symmetry = {}
+            base_shape = element.reference_value_shape()
+        else:
+            base_symmetry = element.symmetry()
+            base_shape = t.ufl_shape
     else:
-        tshape = t.ufl_shape
+        base_symmetry = {}
+        base_shape = t.ufl_shape
 
-    # Assert that component is within the shape of the terminal
-    ffc_assert(len(component) == len(tshape),
-               "Length of component does not match rank of terminal.")
-    ffc_assert(all(c >= 0 and c < d for c, d in zip(component, tshape)),
-               "Component indices %s are outside value shape %s" % (component, tshape))
+    # Assert that component is within the shape of the (reference) terminal
+    if len(component) != len(base_shape):
+        error("Length of component does not match rank of (reference) terminal.")
+    if not all(c >= 0 and c < d for c, d in zip(component, base_shape)):
+        error("Component indices %s are outside value shape %s" % (component, base_shape))
 
     # Flatten component
-    if isinstance(t, FormArgument):
-        symmetry = t.ufl_element().symmetry()
-        if symmetry and reference_value:
-            ffc_assert(t.ufl_element().value_shape() == t.ufl_element().reference_value_shape(),
-                       "The combination of element symmetries and "
-                       "Piola mapped elements is not currently handled.")
-    else:
-        symmetry = {}
-    vi2si, si2vi = build_component_numbering(tshape, symmetry)
+    vi2si, si2vi = build_component_numbering(base_shape, base_symmetry)
     flat_component = vi2si[component]
     # num_flat_components = len(si2vi)
 
-    mt = ModifiedTerminal(expr, t, global_derivatives, local_derivatives,
-                          averaged, restriction, component, flat_component, reference_value)
-
-    if local_derivatives and not reference_value:
-        print("Local derivatives of non-local value?")
-        import IPython; IPython.embed()
-        error("Local derivatives of non-local value?")
-
-    if global_derivatives and reference_value:
-        print("Global derivatives of local value?")
-        import IPython; IPython.embed()
-        error("Global derivatives of local value?")
-
-    return mt
+    return ModifiedTerminal(expr, t, reference_value,
+                            base_shape, base_symmetry,
+                            component, flat_component,
+                            global_derivatives, local_derivatives,
+                            averaged, restriction)
diff --git a/uflacs/analysis/valuenumbering.py b/ffc/uflacs/analysis/valuenumbering.py
similarity index 70%
rename from uflacs/analysis/valuenumbering.py
rename to ffc/uflacs/analysis/valuenumbering.py
index df39d16..09fa9b7 100644
--- a/uflacs/analysis/valuenumbering.py
+++ b/ffc/uflacs/analysis/valuenumbering.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -19,13 +19,16 @@
 """Algorithms for value numbering within computational graphs."""
 
 from six.moves import xrange as range
-from ffc.log import warning
+
 from ufl import product
 from ufl.permutation import compute_indices
 from ufl.corealg.multifunction import MultiFunction
 from ufl.classes import FormArgument
-from uflacs.analysis.indexing import map_indexed_arg_components, map_component_tensor_arg_components
-from uflacs.analysis.modified_terminals import analyse_modified_terminal
+
+from ffc.log import error
+
+from ffc.uflacs.analysis.indexing import map_indexed_arg_components, map_component_tensor_arg_components
+from ffc.uflacs.analysis.modified_terminals import analyse_modified_terminal
 
 
 class ValueNumberer(MultiFunction):
@@ -65,14 +68,20 @@ class ValueNumberer(MultiFunction):
         "Create new symbols for expressions that represent new values."
         symmetry = v.ufl_element().symmetry()
 
-        if False and symmetry:
-            # FIXME: Ignoring symmetries for now, handle by creating only
-            # some new symbols and mapping the rest using the symmetry map.
-            actual_components = sorted(set(symmetry.values()))
-            m = len(actual_components)
-            actual_symbols = self.new_symbols(m)
-            symbols = mapping_of_actual_symbols_to_all_components(actual_symbols, symmetry)  # Need to implement this
+        if symmetry:
+            # Build symbols with symmetric components skipped
+            symbols = []
+            mapped_symbols = {}
+            for c in compute_indices(v.ufl_shape):
+                # Build mapped component mc with symmetries from element considered
+                mc = symmetry.get(c, c)
 
+                # Get existing symbol or create new and store with mapped component mc as key
+                s = mapped_symbols.get(mc)
+                if s is None:
+                    s = self.new_symbol()
+                    mapped_symbols[mc] = s
+                symbols.append(s)
         else:
             n = self.V_sizes[i]
             symbols = self.new_symbols(n)
@@ -96,54 +105,39 @@ class ValueNumberer(MultiFunction):
         # (3) averaging and restrictions define distinct symbols, no additional symmetries
         # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries
 
-        # FIXME: Need modified version of amt(), v is probably not scalar here. This hack works for now.
+        # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component
+        # to analyse, which should be sufficient to get the base shape and derivatives
         if v.ufl_shape:
             mt = analyse_modified_terminal(v[(0,) * len(v.ufl_shape)])
         else:
             mt = analyse_modified_terminal(v)
 
-        domain = mt.terminal.ufl_domain()
-
+        # Get derivatives
         num_ld = len(mt.local_derivatives)
         num_gd = len(mt.global_derivatives)
         assert not (num_ld and num_gd)
-
-        # Get base shape without the derivative axes
-        if mt.reference_value:
-            base_shape = mt.terminal.ufl_element().reference_value_shape()
-        else:
-            base_shape = mt.terminal.ufl_shape
-        base_components = compute_indices(base_shape)
-
         if num_ld:
+            domain = mt.terminal.ufl_domain()
             tdim = domain.topological_dimension()
-            # d_components = compute_permutations(num_ld, tdim)
             d_components = compute_indices((tdim,) * num_ld)
         elif num_gd:
+            domain = mt.terminal.ufl_domain()
             gdim = domain.geometric_dimension()
-            # d_components = compute_permutations(num_gd, gdim)
             d_components = compute_indices((gdim,) * num_gd)
         else:
             d_components = [()]
 
-        if isinstance(mt.terminal, FormArgument):
-            element = mt.terminal.ufl_element()
-            symmetry = element.symmetry()
-            if symmetry and mt.reference_value:
-                ffc_assert(element.value_shape() == element.reference_value_shape(),
-                           "The combination of element symmetries and "
-                           "Piola mapped elements is not currently handled.")
-        else:
-            symmetry = {}
+        # Get base shape without the derivative axes
+        base_components = compute_indices(mt.base_shape)
 
+        # Build symbols with symmetric components and derivatives skipped
         symbols = []
         mapped_symbols = {}
         for bc in base_components:
             for dc in d_components:
-                # Build mapped component with symmetries from element and derivatives combined
-                mbc = symmetry.get(bc, bc)
+                # Build mapped component mc with symmetries from element and derivatives combined
+                mbc = mt.base_symmetry.get(bc, bc)
                 mdc = tuple(sorted(dc))
-                c = bc + dc
                 mc = mbc + mdc
 
                 # Get existing symbol or create new and store with mapped component mc as key
@@ -153,10 +147,10 @@ class ValueNumberer(MultiFunction):
                     mapped_symbols[mc] = s
                 symbols.append(s)
 
+        # Consistency check before returning symbols
         assert not v.ufl_free_indices
-        if not product(v.ufl_shape) == len(symbols):
+        if product(v.ufl_shape) != len(symbols):
             error("Internal error in value numbering.")
-
         return symbols
 
     # Handle modified terminals with element symmetries and second derivative symmetries!
@@ -197,26 +191,7 @@ class ValueNumberer(MultiFunction):
         row_symbols = [self.get_node_symbols(row) for row in v.ufl_operands]
         symbols = []
         for rowsymb in row_symbols:
-            symbols.extend(rowsymb)  # FIXME: Test that this produces the right transposition
-        return symbols
-
-    def transposed(self, AT, i):
-        A, = AT.ufl_operands
-
-        assert not A.ufl_free_indices, "Assuming no free indices in transposed (for now), report as bug if needed."  # FIXME
-        r, c = A.ufl_shape
-
-        A_symbols = self.get_node_symbols(A)
-        assert len(A_symbols) == r * c
-
-        # AT[j,i] = A[i,j]
-        # sh(A) = (r,c)
-        # sh(AT) = (c,r)
-        # AT[j*r+i] = A[i*c+j]
-        symbols = [None] * (r * c)
-        for j in range(c):
-            for i in range(r):
-                symbols[j * r + i] = A_symbols[i * c + j]
+            symbols.extend(rowsymb)
         return symbols
 
     def variable(self, v, i):
diff --git a/uflacs/backends/__init__.py b/ffc/uflacs/backends/__init__.py
similarity index 95%
rename from uflacs/backends/__init__.py
rename to ffc/uflacs/backends/__init__.py
index 19bf97a..358b953 100644
--- a/uflacs/backends/__init__.py
+++ b/ffc/uflacs/backends/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/uflacs/backends/ffc/__init__.py b/ffc/uflacs/backends/ffc/__init__.py
similarity index 94%
rename from uflacs/backends/ffc/__init__.py
rename to ffc/uflacs/backends/ffc/__init__.py
index b282805..6297d59 100644
--- a/uflacs/backends/ffc/__init__.py
+++ b/ffc/uflacs/backends/ffc/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/uflacs/backends/ffc/access.py b/ffc/uflacs/backends/ffc/access.py
similarity index 50%
rename from uflacs/backends/ffc/access.py
rename to ffc/uflacs/backends/ffc/access.py
index ed5c84d..4fa8262 100644
--- a/uflacs/backends/ffc/access.py
+++ b/ffc/uflacs/backends/ffc/access.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,76 +16,31 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>
 
-"""FFC specific access formatting."""
+"""FFC/UFC specific variable access."""
 
-import ufl
-from ufl.permutation import build_component_numbering
 from ufl.corealg.multifunction import MultiFunction
-from ufl.checks import is_cellwise_constant
-from ffc.log import error
-from ffc.log import ffc_assert
+from ufl.permutation import build_component_numbering
 
-from uflacs.backends.ffc.common import FFCBackendSymbols
-# FIXME: Move these to FFCBackendSymbols
-from uflacs.backends.ffc.common import (names,
-                                        format_entity_name,
-                                        format_mt_name)
+from ffc.log import error, warning
 
+from ffc.uflacs.backends.ffc.symbols import FFCBackendSymbols
+from ffc.uflacs.backends.ffc.common import physical_quadrature_integral_types
 
-class FFCAccessBackend(MultiFunction):
+
+class FFCBackendAccess(MultiFunction):
     """FFC specific cpp formatter class."""
 
-    def __init__(self, ir, language, parameters):
+    def __init__(self, ir, language, symbols, parameters):
         MultiFunction.__init__(self)
 
         # Store ir and parameters
         self.ir = ir
+        self.entitytype = ir["entitytype"]
+        self.integral_type = ir["integral_type"]
         self.language = language
+        self.symbols = symbols
         self.parameters = parameters
 
-        # Configure definitions behaviour
-        self.physical_coordinates_known = self.ir["integral_type"] == "quadrature"
-
-        # Need this for custom integrals
-        #classname = make_classname(prefix, "finite_element", ir["element_numbers"][ufl_element])
-
-        coefficient_numbering = self.ir["uflacs"]["coefficient_numbering"]
-        self.symbols = FFCBackendSymbols(self.language, coefficient_numbering)
-
-    def get_includes(self):
-        "Return include statements to insert at top of file."
-        includes = []
-        return includes
-
-
-    # === Access to names of quantities not among the symbolic UFL types ===
-    # FIXME: Move these out of the AccessBackend, maybe introduce a FFCBackendSymbols?
-    #        A symbols class can contain generate*names from common.* as well.
-    # FIXME: Use self.language.Symbol and/or self.language.ArrayAccess to wrap names.*:
-    def weights_array_name(self, num_points):
-        return "{0}{1}".format(names.weights, num_points)
-
-    def points_array_name(self, num_points):
-        return "{0}{1}".format(names.points, num_points)
-
-    def physical_points_array_name(self):
-        return names.points
-
-    def quadrature_loop_index(self):
-        return self.symbols.quadrature_loop_index()
-
-    def argument_loop_index(self, iarg):
-        L = self.language
-        return L.Symbol("{name}{num}".format(name=names.ia, num=iarg))
-
-    def element_tensor_name(self):
-        return names.A
-
-    def element_tensor_entry(self, indices, shape):
-        L = self.language
-        flat_index = L.flattened_indices(indices, shape)
-        return L.ArrayAccess(names.A, flat_index)
-
 
     # === Rules for all modified terminal types ===
 
@@ -102,18 +57,21 @@ class FFCAccessBackend(MultiFunction):
         L = self.language
         return L.LiteralFloat(0.0)
 
+
     def int_value(self, e, mt, tabledata, num_points):
         # We shouldn't have derivatives of constants left at this point
         assert not (mt.global_derivatives or mt.local_derivatives)
         L = self.language
         return L.LiteralInt(int(e))
 
+
     def float_value(self, e, mt, tabledata, num_points):
         # We shouldn't have derivatives of constants left at this point
         assert not (mt.global_derivatives or mt.local_derivatives)
         L = self.language
         return L.LiteralFloat(float(e))
 
+
     def argument(self, e, mt, tabledata, num_points):
         L = self.language
         # Expecting only local derivatives and values here
@@ -121,100 +79,166 @@ class FFCAccessBackend(MultiFunction):
         # assert mt.global_component is None
 
         # No need to store basis function value in its own variable, just get table value directly
+        #uname, begin, end, ttype = tabledata
         uname, begin, end = tabledata
-        uname = L.Symbol(uname)
+        table_types = self.ir["expr_irs"][num_points]["table_types"]
+        ttype = table_types[uname]
+
+        if ttype == "zeros":
+            error("Not expecting zero arguments to get this far.")
+            return L.LiteralFloat(0.0)
+        elif ttype == "ones":
+            warning("Should simplify ones arguments before getting this far.")
+            return L.LiteralFloat(1.0)
+
+        if ttype in ("uniform", "fixed"):
+            entity = 0
+        else:
+            entity = self.symbols.entity(self.entitytype, mt.restriction)
 
-        entity = format_entity_name(self.ir["entitytype"], mt.restriction)
-        entity = L.Symbol(entity)
+        if ttype in ("piecewise", "fixed"):
+            iq = 0
+        else:
+            iq = self.symbols.quadrature_loop_index(num_points)
 
-        iq = self.quadrature_loop_index()
-        idof = self.argument_loop_index(mt.terminal.number())
+        if ttype == "quadrature":
+            warning("Should simplify quadrature element arguments before getting this far.")
+            idof = iq
+        else:
+            idof = self.symbols.argument_loop_index(mt.terminal.number())
 
+        uname = L.Symbol(uname)
         return uname[entity][iq][idof - begin]
 
+
     def coefficient(self, e, mt, tabledata, num_points):
-        t = mt.terminal
-        if is_cellwise_constant(t):
-            access = self._constant_coefficient(e, mt, tabledata)
-        else:
-            access = self._varying_coefficient(e, mt, tabledata)
-        return access
-
-    def _constant_coefficient(self, e, mt, tabledata):
-        # Map component to flat index
-        vi2si, si2vi = build_component_numbering(mt.terminal.ufl_shape,
-                                                 mt.terminal.ufl_element().symmetry())
-        num_flat_components = len(si2vi)
-        ffc_assert(mt.flat_component == vi2si[mt.component], "Incompatible component flattening!")
-
-        # Offset index if on second cell in interior facet integral
-        # TODO: Get the notion that '-' is the second cell from a central definition?
-        if mt.restriction == "-":
-            idof = mt.flat_component + len(si2vi)
+        # TODO: Passing type along with tabledata would make a lot of code cleaner
+        #uname, begin, end, ttype = tabledata
+        uname, begin, end = tabledata
+        table_types = self.ir["expr_irs"][num_points]["table_types"]
+        ttype = table_types[uname]
+
+        if ttype == "zeros":
+            # FIXME: Remove at earlier stage so dependent code can also be removed
+            warning("Not expecting zero coefficients to get this far.")
+            L = self.language
+            return L.LiteralFloat(0.0)
+        elif ttype == "ones" and (end - begin) == 1:
+            # f = 1.0 * f_{begin}, just return direct reference to dof array at dof begin
+            # (if mt is restricted, begin contains cell offset)
+            idof = begin
+            return self.symbols.coefficient_dof_access(mt.terminal, idof)
+        elif ttype == "quadrature":
+            # f(x_q) = sum_i f_i * delta_iq = f_q, just return direct
+            # reference to dof array at quadrature point index + begin
+            iq = self.symbols.quadrature_loop_index(num_points)
+            idof = begin + iq
+            return self.symbols.coefficient_dof_access(mt.terminal, idof)
         else:
-            idof = mt.flat_component
+            # Return symbol, see definitions for computation 
+            return self.symbols.coefficient_value(mt)  #, num_points)
 
-        # Return direct reference to dof array
-        return self.symbols.coefficient_dof_access(mt.terminal, idof)
-
-    def _varying_coefficient(self, e, mt, tabledata):
-        # Format base coefficient (derivative) name
-        L = self.language
-        coefficient_numbering = self.ir["uflacs"]["coefficient_numbering"]
-        c = coefficient_numbering[mt.terminal] # mt.terminal.count()
-        basename = "{name}{count}".format(name=names.w, count=c)
-        return L.Symbol(format_mt_name(basename, mt))
 
     def quadrature_weight(self, e, mt, tabledata, num_points):
-        L = self.language
-        weight = self.weights_array_name(num_points)
-        weight = L.Symbol(weight)
-        iq = self.quadrature_loop_index()
+        weight = self.symbols.weights_array(num_points)
+        iq = self.symbols.quadrature_loop_index(num_points)
         return weight[iq]
 
+
     def spatial_coordinate(self, e, mt, tabledata, num_points):
-        L = self.language
-        ffc_assert(not mt.global_derivatives, "Not expecting derivatives of SpatialCoordinates.")
-        ffc_assert(not mt.local_derivatives, "Not expecting derivatives of SpatialCoordinates.")
-        #ffc_assert(not mt.restriction, "Not expecting restriction of SpatialCoordinates.")
-        ffc_assert(not mt.averaged, "Not expecting average of SpatialCoordinates.")
-
-        if self.physical_coordinates_known:
-            # In a context where the physical coordinates are available in existing variables.
-            x = self.physical_points_array_name()
-            x = L.Symbol(x)
-            iq = self.quadrature_loop_index()
+        #L = self.language
+        if mt.global_derivatives:
+            error("Not expecting global derivatives of SpatialCoordinate.")
+        if mt.averaged:
+            error("Not expecting average of SpatialCoordinates.")
+
+        if self.integral_type in physical_quadrature_integral_types:
+            # FIXME: Jacobian may need adjustment for physical_quadrature_integral_types
+            if mt.local_derivatives:
+                error("FIXME: Jacobian in custom integrals is not implemented.")
+
+            # Physical coordinates are available in given variables
+            assert num_points is None
+            x = self.symbols.points_array(num_points)
+            iq = self.symbols.quadrature_loop_index(num_points)
             gdim, = mt.terminal.ufl_shape
-            return x[iq * gdim + mt.flat_component]
+            if gdim == 1:
+                index = iq
+            else:
+                index = iq * gdim + mt.flat_component
+            return x[index]
         else:
-            # In a context where physical coordinates are computed by code generated by us.
-            return L.Symbol(format_mt_name(names.x, mt))
+            # Physical coordinates are computed by code generated in definitions
+            return self.symbols.x_component(mt)
+
 
     def cell_coordinate(self, e, mt, tabledata, num_points):
-        L = self.language
-        ffc_assert(not mt.global_derivatives, "Not expecting derivatives of CellCoordinates.")
-        ffc_assert(not mt.local_derivatives, "Not expecting derivatives of CellCoordinates.")
-        ffc_assert(not mt.averaged, "Not expecting average of CellCoordinates.")
+        #L = self.language
+        if mt.global_derivatives:
+            error("Not expecting derivatives of CellCoordinate.")
+        if mt.local_derivatives:
+            error("Not expecting derivatives of CellCoordinate.")
+        if mt.averaged:
+            error("Not expecting average of CellCoordinate.")
+
+        if self.integral_type == "cell" and not mt.restriction:
+            X = self.symbols.points_array(num_points)
+            tdim, = mt.terminal.ufl_shape
+            iq = self.symbols.quadrature_loop_index(num_points)
+            if num_points == 1:
+                index = mt.flat_component
+            elif tdim == 1:
+                index = iq
+            else:
+                index = iq * tdim + mt.flat_component
+            return X[index]
+        else:
+            # X should be computed from x or Xf symbolically instead of getting here
+            error("Expecting reference cell coordinate to be symbolically rewritten.")
 
-        assert not mt.restriction  # FIXME: Not used!
 
-        if self.physical_coordinates_known:
-            # No special variable should exist in this case.
-            error("Expecting reference coordinate to be symbolically rewritten.")
-        else:
-            X = self.points_array_name(num_points)
-            X = L.Symbol(X)
-            iq = self.quadrature_loop_index()
+    def facet_coordinate(self, e, mt, tabledata, num_points):
+        L = self.language
+        if mt.global_derivatives:
+            error("Not expecting derivatives of FacetCoordinate.")
+        if mt.local_derivatives:
+            error("Not expecting derivatives of FacetCoordinate.")
+        if mt.averaged:
+            error("Not expecting average of FacetCoordinate.")
+        if mt.restriction:
+            error("Not expecting restriction of FacetCoordinate.")
+
+        if self.integral_type in ("interior_facet", "exterior_facet"):
             tdim, = mt.terminal.ufl_shape
-            return X[iq * tdim + mt.flat_component]
+            if tdim == 0:
+                error("Vertices have no facet coordinates.")
+            elif tdim == 1:
+                # 0D vertex coordinate
+                warning("Vertex coordinate is always 0, should get rid of this in ufl geometry lowering.")
+                return L.LiteralFloat(0.0)
+            Xf = self.points_array(num_points)
+            iq = self.symbols.quadrature_loop_index(num_points)
+            assert 0 <= mt.flat_component < (tdim-1)
+            if num_points == 1:
+                index = mt.flat_component
+            elif tdim == 2:
+                index = iq
+            else:
+                index = iq * (tdim - 1) + mt.flat_component
+            return Xf[index]
+        else:
+            # Xf should be computed from X or x symbolically instead of getting here
+            error("Expecting reference facet coordinate to be symbolically rewritten.")
+
 
     def jacobian(self, e, mt, tabledata, num_points):
         L = self.language
-        ffc_assert(not mt.global_derivatives, "Not expecting derivatives of Jacobian.")
-        ffc_assert(not mt.local_derivatives, "Not expecting derivatives of Jacobian.")
-        ffc_assert(not mt.averaged, "Not expecting average of Jacobian.")
+        if mt.global_derivatives:
+            error("Not expecting global derivatives of Jacobian.")
+        if mt.averaged:
+            error("Not expecting average of Jacobian.")
+        return self.symbols.J_component(mt)
 
-        return L.Symbol(format_mt_name(names.J, mt))
 
     def reference_cell_volume(self, e, mt, tabledata, access):
         L = self.language
@@ -224,6 +248,7 @@ class FFCAccessBackend(MultiFunction):
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def reference_facet_volume(self, e, mt, tabledata, access):
         L = self.language
         cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
@@ -232,28 +257,31 @@ class FFCAccessBackend(MultiFunction):
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def reference_normal(self, e, mt, tabledata, access):
         L = self.language
         cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
         if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             table = L.Symbol("{0}_reference_facet_normals".format(cellname))
-            facet = L.Symbol(format_entity_name("facet", mt.restriction))
+            facet = self.symbols.entity("facet", mt.restriction)
             return table[facet][mt.component[0]]
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def cell_facet_jacobian(self, e, mt, tabledata, num_points):
         L = self.language
         cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
         if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             table = L.Symbol("{0}_reference_facet_jacobian".format(cellname))
-            facet = L.Symbol(format_entity_name("facet", mt.restriction))
+            facet = self.symbols.entity("facet", mt.restriction)
             return table[facet][mt.component[0]][mt.component[1]]
         elif cellname == "interval":
             error("The reference facet jacobian doesn't make sense for interval cell.")
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def cell_edge_vectors(self, e, mt, tabledata, num_points):
         L = self.language
         cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
@@ -265,25 +293,26 @@ class FFCAccessBackend(MultiFunction):
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def facet_edge_vectors(self, e, mt, tabledata, num_points):
         L = self.language
         cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
         if cellname in ("tetrahedron", "hexahedron"):
             table = L.Symbol("{0}_reference_edge_vectors".format(cellname))
-            facet = L.Symbol(format_entity_name("facet", mt.restriction))
+            facet = self.symbols.entity("facet", mt.restriction)
             return table[facet][mt.component[0]][mt.component[1]]
         elif cellname in ("interval", "triangle", "quadrilateral"):
             error("The reference cell facet edge vectors doesn't make sense for interval or triangle cell.")
         else:
             error("Unhandled cell types {0}.".format(cellname))
 
+
     def cell_orientation(self, e, mt, tabledata, num_points):
-        L = self.language
         # Error if not in manifold case:
-        gdim = mt.terminal.ufl_domain().geometric_dimension()
-        tdim = mt.terminal.ufl_domain().topological_dimension()
-        assert gdim > tdim
-        return L.Symbol("co")
+        domain = mt.terminal.ufl_domain()
+        assert domain.geometric_dimension() > domain.topological_dimension()
+        return self.symbols.cell_orientation_internal(mt.restriction)
+
 
     def facet_orientation(self, e, mt, tabledata, num_points):
         L = self.language
@@ -292,9 +321,10 @@ class FFCAccessBackend(MultiFunction):
             error("Unhandled cell types {0}.".format(cellname))
 
         table = L.Symbol("{0}_facet_orientations".format(cellname))
-        facet = L.Symbol(format_entity_name("facet", mt.restriction))
+        facet = self.symbols.entity("facet", mt.restriction)
         return table[facet]
 
+
     def _expect_symbolic_lowering(self, e, mt, tabledata, num_points):
         error("Expecting {0} to be replaced in symbolic preprocessing.".format(type(e)))
     facet_normal = _expect_symbolic_lowering
diff --git a/ffc/uflacs/backends/ffc/backend.py b/ffc/uflacs/backends/ffc/backend.py
new file mode 100644
index 0000000..ce61210
--- /dev/null
+++ b/ffc/uflacs/backends/ffc/backend.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""Collection of FFC specific pieces for the code generation phase."""
+
+import ffc.uflacs.language.cnodes
+from ffc.uflacs.language.ufl_to_cnodes import UFL2CNodesTranslatorCpp
+
+from ffc.uflacs.backends.ffc.symbols import FFCBackendSymbols
+from ffc.uflacs.backends.ffc.access import FFCBackendAccess
+from ffc.uflacs.backends.ffc.definitions import FFCBackendDefinitions
+
+
+class FFCBackend(object):
+    "Class collecting all aspects of the FFC backend."
+    def __init__(self, ir, parameters):
+
+        # This is the seam where cnodes/C++ is chosen for the ffc backend
+        self.language = ffc.uflacs.language.cnodes
+        self.ufl_to_language = UFL2CNodesTranslatorCpp(self.language)
+
+        coefficient_numbering = ir["coefficient_numbering"]
+        self.symbols = FFCBackendSymbols(self.language, coefficient_numbering)
+        self.definitions = FFCBackendDefinitions(ir, self.language, self.symbols, parameters)
+        self.access = FFCBackendAccess(ir, self.language, self.symbols, parameters)
diff --git a/ffc/uflacs/backends/ffc/common.py b/ffc/uflacs/backends/ffc/common.py
new file mode 100644
index 0000000..3013c68
--- /dev/null
+++ b/ffc/uflacs/backends/ffc/common.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""FFC/UFC specific symbol naming."""
+
+
+from ffc.log import error
+
+
+physical_quadrature_integral_types = ("custom", "cutcell", "interface", "overlap")
+
+
+# TODO: Move somewhere else
+def num_coordinate_component_dofs(coordinate_element):
+    """Get the number of dofs for a coordinate component for this degree.
+
+    This is a local hack that works for Lagrange 1-3, better
+    would be to get this passed by ffc from fiat through the ir.
+    The table data is to messy to figure out a clean design for that quickly.
+    """
+    from ufl.cell import num_cell_entities
+    degree = coordinate_element.degree()
+    cell = coordinate_element.cell()
+    tdim = cell.topological_dimension()
+    cellname = cell.cellname()
+    d = 0
+    for entity_dim in range(tdim+1):
+        # n = dofs per cell entity
+        if entity_dim == 0:
+            n = 1
+        elif entity_dim == 1:
+            n = degree - 1
+        elif entity_dim == 2:
+            n = (degree - 2)*(degree - 1) // 2
+        elif entity_dim == 3:
+            n = (degree - 3)*(degree - 2)*(degree - 1) // 6
+        else:
+            error("Entity dimension out of range")
+        # Accumulate
+        num_entities = num_cell_entities[cellname][entity_dim]
+        d += num_entities * n
+    return d
+
+
+# TODO: Get restriction postfix from somewhere central
+def ufc_restriction_offset(restriction, length):
+    if restriction == "-":
+        return length
+    else:
+        return 0
diff --git a/uflacs/backends/ffc/definitions.py b/ffc/uflacs/backends/ffc/definitions.py
similarity index 53%
rename from uflacs/backends/ffc/definitions.py
rename to ffc/uflacs/backends/ffc/definitions.py
index cab2afe..983bac5 100644
--- a/uflacs/backends/ffc/definitions.py
+++ b/ffc/uflacs/backends/ffc/definitions.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,129 +16,105 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>
 
-"""FFC specific definitions."""
-
-from six.moves import xrange as range
+"""FFC/UFC specific variable definitions."""
 
 from ufl.corealg.multifunction import MultiFunction
-from ufl.checks import is_cellwise_constant
-
-from ffc.log import error
-from ffc.log import ffc_assert
-
-from uflacs.backends.ffc.common import FFCBackendSymbols
-# FIXME: Move these to FFCBackendSymbols
-from uflacs.backends.ffc.common import format_entity_name, ufc_restriction_postfix
-
-
-from ufl.cell import num_cell_entities
-
-def num_coordinate_component_dofs(coordinate_element):
-    """Get the number of dofs for a coordinate component for this degree.
-
-    This is a local hack that works for Lagrange 1-3, better
-    would be to get this passed by ffc from fiat through the ir.
-    The table data is to messy to figure out a clean design for that quickly.
-    """
-    degree = coordinate_element.degree()
-    cell = coordinate_element.cell()
-    tdim = cell.topological_dimension()
-    cellname = cell.cellname()
-    d = 0
-    for entity_dim in range(tdim+1):
-        # n = dofs per cell entity
-        if entity_dim == 0:
-            n = 1
-        elif entity_dim == 1:
-            n = degree - 1
-        elif entity_dim == 2:
-            n = (degree - 2)*(degree - 1) // 2
-        elif entity_dim == 3:
-            n = (degree - 3)*(degree - 2)*(degree - 1) // 6
-        else:
-            error("Entity dimension out of range")
-        # Accumulate
-        num_entities = num_cell_entities[cellname][entity_dim]
-        d += num_entities * n
-    return d
 
+from ffc.log import error, warning
 
-class FFCDefinitionsBackend(MultiFunction):
-    """FFC specific code definitions."""
+from ffc.uflacs.backends.ffc.symbols import FFCBackendSymbols
+from ffc.uflacs.backends.ffc.common import physical_quadrature_integral_types
+from ffc.uflacs.backends.ffc.common import num_coordinate_component_dofs
 
-    def __init__(self, ir, language, parameters):
+
+class FFCBackendDefinitions(MultiFunction):
+    """FFC specific code definitions."""
+    def __init__(self, ir, language, symbols, parameters):
         MultiFunction.__init__(self)
 
         # Store ir and parameters
         self.ir = ir
+        self.integral_type = ir["integral_type"]
+        self.entitytype = ir["entitytype"]
         self.language = language
+        self.symbols = symbols
         self.parameters = parameters
 
-        # FIXME: Make this configurable for easy experimentation with dolfin!
+        # TODO: Make this configurable for easy experimentation with dolfin!
         # Coordinate dofs for each component are interleaved? Must match dolfin.
         self.interleaved_components = True # parameters["interleaved_coordinate_component_dofs"]
 
-        # Configure definitions behaviour
-        if self.ir["integral_type"] in ("custom", "vertex"):
-            self.physical_coordinates_known = True
-        else:
-            self.physical_coordinates_known = False
 
-        # Need this for custom integrals
-        #classname = make_classname(prefix, "finite_element", ir["element_numbers"][ufl_element])
+    # === Generate code to define variables for ufl types ===
 
-        coefficient_numbering = ir["uflacs"]["coefficient_numbering"]
-        self.symbols = FFCBackendSymbols(self.language, coefficient_numbering)
+    def expr(self, t, mt, tabledata, num_points, access):
+        error("Unhandled type {0}".format(type(t)))
 
-    def get_includes(self):
-        "Return include statements to insert at top of file."
-        includes = []
-        return includes
 
-    def initial(self):
-        "Return code inserted at beginning of kernel."
+    def quadrature_weight(self, e, mt, tabledata, num_points, access):
+        "Quadrature weights are precomputed and need no code."
         return []
 
-    def expr(self, t, mt, tabledata, access):
-        error("Unhandled type {0}".format(type(t)))
-
-    # === Generate code definitions ===
 
-    def quadrature_weight(self, e, mt, tabledata, access):
+    def constant_value(self, e, mt, tabledata, num_points, access):
+        "Constants simply use literals in the target language."
         return []
 
-    def constant_value(self, e, mt, tabledata, access):
-        return []
 
-    def argument(self, t, mt, tabledata, access):
+    def argument(self, t, mt, tabledata, num_points, access):
+        "Arguments are accessed through element tables."
         return []
 
-    def coefficient(self, t, mt, tabledata, access):
-        L = self.language
 
-        # For a constant coefficient we reference the dofs directly, so no definition needed
-        if is_cellwise_constant(mt.terminal):
-            return []
+    def coefficient(self, t, mt, tabledata, num_points, access):
+        "Return definition code for coefficients."
+        L = self.language
 
         # No need to store basis function value in its own variable,
         # just get table value directly
+        #uname, begin, end, ttype = tabledata
         uname, begin, end = tabledata
-        uname = L.Symbol(uname)
+        table_types = self.ir["expr_irs"][num_points]["table_types"]
+        ttype = table_types[uname]
+
+        #fe_classname = ir["classnames"]["finite_element"][t.ufl_element()]
 
-        # Empty loop needs to be skipped as zero tables may not be generated
         # FIXME: remove at earlier stage so dependent code can also be removed
-        if begin >= end:
-            code = [
-                L.VariableDecl("double", access, 0.0),
-                ]
-            return code
+        if ttype == "zeros":
+            warning("Not expecting zero coefficients to get this far.")
+            return []
+
+        # For a constant coefficient we reference the dofs directly, so no definition needed
+        if ttype == "ones" and (end - begin) == 1:
+            return []
+
+        # For quadrature elements we reference the dofs directly, so no definition needed
+        if ttype == "quadrature":
+            return []
+
+        assert begin < end
+
+        # Entity number
+        if ttype in ("uniform", "fixed"):
+            entity = 0
+        else:
+            entity = self.symbols.entity(self.entitytype, mt.restriction)
+
+        # This check covers "piecewise constant over points on entity"
+        if ttype in ("piecewise", "fixed"):
+            iq = 0
+        else:
+            iq = self.symbols.quadrature_loop_index(num_points)
 
-        # Get various symbols
-        entity = self.symbols.entity(self.ir["entitytype"], mt.restriction)
-        iq = self.symbols.quadrature_loop_index()
         idof = self.symbols.coefficient_dof_sum_index()
         dof_access = self.symbols.coefficient_dof_access(mt.terminal, idof)
-        table_access = uname[entity][iq][idof - begin]
+
+        if ttype == "ones":
+            # Don't think this can actually happen
+            table_access = L.LiteralFloat(1.0)
+        else:
+            uname = L.Symbol(uname)
+            table_access = uname[entity][iq][idof - begin]
 
         # Loop to accumulate linear combination of dofs and tables
         code = [
@@ -146,38 +122,79 @@ class FFCDefinitionsBackend(MultiFunction):
             L.ForRange(idof, begin, end,
                        body=[L.AssignAdd(access, dof_access * table_access)])
             ]
+
         return code
 
-    def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, access):
-        "Define something (x or J) linear combination of coordinate dofs with given table data."
 
+    def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, num_points, access):
+        "Define x or J as a linear combination of coordinate dofs with given table data."
         L = self.language
 
         # Get properties of domain
         domain = mt.terminal.ufl_domain()
-        tdim = domain.topological_dimension()
+        #tdim = domain.topological_dimension()
         gdim = domain.geometric_dimension()
         coordinate_element = domain.ufl_coordinate_element()
-        degree = coordinate_element.degree()
+        #degree = coordinate_element.degree()
         num_scalar_dofs = num_coordinate_component_dofs(coordinate_element)
 
         # Reference coordinates are known, no coordinate field, so we compute
         # this component as linear combination of coordinate_dofs "dofs" and table
 
+        # Find table name and dof range it corresponds to
+        #uname, begin, end, ttype = tabledata
         uname, begin, end = tabledata
-        uname = L.Symbol(uname)
-        #if not ( end - begin <= num_scalar_dofs):
-        #    import IPython; IPython.embed()
+        table_types = self.ir["expr_irs"][num_points]["table_types"]
+        ttype = table_types[uname]
+
         assert end - begin <= num_scalar_dofs
+        assert ttype != "zeros"
+        #xfe_classname = ir["classnames"]["finite_element"][coordinate_element]
+        #sfe_classname = ir["classnames"]["finite_element"][coordinate_element.sub_elements()[0]]
 
-        entity = self.symbols.entity(self.ir["entitytype"], mt.restriction)
+        # Entity number
+        if ttype in ("uniform", "fixed"):
+            entity = 0
+        else:
+            entity = self.symbols.entity(self.entitytype, mt.restriction)
 
-        if is_cellwise_constant(mt.expr):
+        # This check covers "piecewise constant over points on entity"
+        if ttype in ("piecewise", "fixed"):
             iq = 0
         else:
-            iq = self.symbols.quadrature_loop_index()
+            iq = self.symbols.quadrature_loop_index(num_points)
+
+        assert ttype != "quadrature"
+            
+        # Make indexable symbol
+        uname = L.Symbol(uname)
 
-        if 0:  # FIXME: Make an option to test
+        if ttype == "zeros":
+            code = [
+                L.VariableDecl("const double", access, L.LiteralFloat(0.0))
+                ]
+        elif ttype == "ones":
+            # Not sure if this ever happens
+            # Inlined version (we know this is bounded by a small number)
+            dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs,
+                                                         mt.restriction,
+                                                         self.interleaved_components)
+            value = L.Sum([dof_access[idof] for idof in range(begin, end)])
+            code = [
+                L.VariableDecl("const double", access, value)
+                ]
+        elif True:
+            # Inlined version (we know this is bounded by a small number)
+            dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs,
+                                                         mt.restriction,
+                                                         self.interleaved_components)
+            # Inlined loop to accumulate linear combination of dofs and tables
+            value = L.Sum([dof_access[idof] * uname[entity][iq][idof - begin]
+                           for idof in range(begin, end)])
+            code = [
+                L.VariableDecl("const double", access, value)
+                ]
+        else:  # TODO: Make an option to test this version for performance
             # Generated loop version:
             coefficient_dof = self.symbols.coefficient_dof_sum_index()
             dof_access = self.symbols.domain_dof_access(coefficient_dof, mt.flat_component,
@@ -191,23 +208,11 @@ class FFCDefinitionsBackend(MultiFunction):
                 L.ForRange(coefficient_dof, begin, end,
                            body=[L.AssignAdd(access, dof_access * table_access)])
                 ]
-        else:
-            # Inlined version (we know this is bounded by a small number)
-            dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs,
-                                                         mt.restriction,
-                                                         self.interleaved_components)
-
-            value = L.Sum([dof_access[idof] * uname[entity][iq][idof - begin]
-                           for idof in range(begin, end)])
-
-            # Inlined loop to accumulate linear combination of dofs and tables
-            code = [
-                L.VariableDecl("const double", access, value)
-                ]
 
         return code
 
-    def spatial_coordinate(self, e, mt, tabledata, access):
+
+    def spatial_coordinate(self, e, mt, tabledata, num_points, access):
         """Return definition code for the physical spatial coordinates.
 
         If physical coordinates are given:
@@ -219,49 +224,61 @@ class FFCDefinitionsBackend(MultiFunction):
         If reference facet coordinates are given:
           x = sum_k xdof_k xphi_k(Xf)
         """
-        if self.physical_coordinates_known:
+        if self.integral_type in physical_quadrature_integral_types:
+            # FIXME: Jacobian may need adjustment for physical_quadrature_integral_types
+            if mt.local_derivatives:
+                error("FIXME: Jacobian in custom integrals is not implemented.")
             return []
         else:
-            return self._define_coordinate_dofs_lincomb(e, mt, tabledata, access)
+            return self._define_coordinate_dofs_lincomb(e, mt, tabledata, num_points, access)
+
 
-    def cell_coordinate(self, e, mt, tabledata, access):
+    def cell_coordinate(self, e, mt, tabledata, num_points, access):
         """Return definition code for the reference spatial coordinates.
 
-        If reference coordinates are given:
-          No definition needed.
+        If reference coordinates are given::
+
+            No definition needed.
+
+        If physical coordinates are given and domain is affine::
+
+            X = K*(x-x0)
 
-        If physical coordinates are given and domain is affine:
-          X = K*(x-x0)
         This is inserted symbolically.
 
-        If physical coordinates are given and domain is non- affine:
-          Not currently supported.
+        If physical coordinates are given and domain is non- affine::
+
+            Not currently supported.
+
         """
+        # Should be either direct access to points array or symbolically computed
         return []
 
-    def jacobian(self, e, mt, tabledata, access):
+
+    def jacobian(self, e, mt, tabledata, num_points, access):
         """Return definition code for the Jacobian of x(X).
 
         J = sum_k xdof_k grad_X xphi_k(X)
         """
-        if self.physical_coordinates_known:
-            return []
-        else:
-            return self._define_coordinate_dofs_lincomb(e, mt, tabledata, access)
+        # TODO: Jacobian may need adjustment for physical_quadrature_integral_types
+        return self._define_coordinate_dofs_lincomb(e, mt, tabledata, num_points, access)
+
 
-    def cell_orientation(self, e, mt, tabledata, access):
+    def cell_orientation(self, e, mt, tabledata, num_points, access):
         # Would be nicer if cell_orientation was a double variable input,
         # but this is how dolfin/ufc/ffc currently passes this information.
         # 0 means up and gives +1.0, 1 means down and gives -1.0.
         L = self.language
-        co = "cell_orientation" + ufc_restriction_postfix(mt.restriction)
-        expr = L.VerbatimExpr("(" + co + " == 1) ? -1.0: +1.0;")
+        co = self.symbols.cell_orientation_argument(mt.restriction)
+        expr = L.Conditional(L.EQ(co, L.LiteralInt(1)),
+                             L.LiteralFloat(-1.0), L.LiteralFloat(+1.0))
         code = [
             L.VariableDecl("const double", access, expr)
             ]
         return code
 
-    def _expect_table(self, e, mt, tabledata, access):
+
+    def _expect_table(self, e, mt, tabledata, num_points, access):
         "These quantities refer to constant tables defined in ufc_geometry.h."
         # TODO: Inject const static table here instead?
         return []
@@ -273,7 +290,8 @@ class FFCDefinitionsBackend(MultiFunction):
     facet_edge_vectors = _expect_table
     facet_orientation = _expect_table
 
-    def _expect_symbolic_lowering(self, e, mt, tabledata, access):
+
+    def _expect_symbolic_lowering(self, e, mt, tabledata, num_points, access):
         "These quantities are expected to be replaced in symbolic preprocessing."
         error("Expecting {0} to be replaced in symbolic preprocessing.".format(type(e)))
     facet_normal = _expect_symbolic_lowering
diff --git a/ffc/uflacs/backends/ffc/symbols.py b/ffc/uflacs/backends/ffc/symbols.py
new file mode 100644
index 0000000..d748ea2
--- /dev/null
+++ b/ffc/uflacs/backends/ffc/symbols.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""FFC/UFC specific symbol naming."""
+
+from ffc.log import error
+
+
+# TODO: Get restriction postfix from somewhere central
+def ufc_restriction_postfix(restriction):
+    if restriction == "+":
+        res = "_0"
+    elif restriction == "-":
+        res = "_1"
+    else:
+        res = ""
+    return res
+
+
+def format_mt_name(basename, mt):
+    "Format variable name for modified terminal."
+    access = str(basename)
+
+    # Add averaged state to name
+    if mt.averaged:
+        avg = "_a{0}".format(mt.averaged)
+        access += avg
+
+    # Format restriction
+    res = ufc_restriction_postfix(mt.restriction).replace("_", "_r")
+    access += res
+
+    # Format local derivatives
+    assert not mt.global_derivatives
+    if mt.local_derivatives:
+        der = "_d{0}".format(''.join(map(str, mt.local_derivatives)))
+        access += der
+
+    # Add flattened component to name
+    if mt.component:
+        comp = "_c{0}".format(mt.flat_component)
+        access += comp
+
+    return access
+
+
+class FFCBackendSymbols(object):
+    """FFC specific symbol definitions. Provides non-ufl symbols."""
+    def __init__(self, language, coefficient_numbering):
+        self.L = language
+        self.S = self.L.Symbol
+        self.coefficient_numbering = coefficient_numbering
+
+        # Used for padding variable names based on restriction
+        self.restriction_postfix = { r: ufc_restriction_postfix(r)
+                                     for r in ("+", "-", None) }
+
+    def element_tensor(self):
+        "Symbol for the element tensor itself."
+        return self.S("A")
+
+    def entity(self, entitytype, restriction):
+        "Entity index for lookup in element tables."
+        if entitytype == "cell":
+            # Always 0 for cells (even with restriction)
+            return self.L.LiteralInt(0)
+        elif entitytype == "facet":
+            return self.S("facet" + ufc_restriction_postfix(restriction))
+        elif entitytype == "vertex":
+            return self.S("vertex")
+        else:
+            error("Unknown entitytype {}".format(entitytype))
+
+    def cell_orientation_argument(self, restriction):
+        "Cell orientation argument in ufc. Not same as cell orientation in generated code."
+        return self.S("cell_orientation" + ufc_restriction_postfix(restriction))
+
+    def cell_orientation_internal(self, restriction):
+        "Internal value for cell orientation in generated code."
+        return self.S("co" + ufc_restriction_postfix(restriction))
+
+    def num_quadrature_points(self, num_points):
+        if num_points is None:
+            return self.S("num_quadrature_points")
+        else:
+            return self.L.LiteralInt(num_points)
+
+    def weights_array(self, num_points):
+        if num_points is None:
+            return self.S("quadrature_weights")
+        else:
+            return self.S("weights%d" % (num_points,))
+
+    def points_array(self, num_points):
+        # Note: Points array refers to points on the integration cell
+        if num_points is None:
+            return self.S("quadrature_points")
+        else:
+            return self.S("points%d" % (num_points,))
+
+    def quadrature_loop_index(self, num_points):
+        """Reusing a single index name for all quadrature loops,
+        assumed not to be nested."""
+        if num_points == 1:
+            return self.L.LiteralInt(0)
+        elif num_points is None:
+            return self.S("iq")
+        else:
+            return self.S("iq%d" % (num_points,))
+
+    def argument_loop_index(self, iarg):
+        "Loop index for argument #iarg."
+        return self.S("ia%d" % (iarg,))
+
+    def coefficient_dof_sum_index(self):
+        """Reusing a single index name for all coefficient dof*basis sums,
+        assumed to always be the innermost loop."""
+        return self.S("ic")
+
+    def x_component(self, mt):
+        "Physical coordinate component."
+        return self.S(format_mt_name("x", mt))
+
+    def J_component(self, mt):
+        "Jacobian component."
+        return self.S(format_mt_name("J", mt))
+
+    def domain_dof_access(self, dof, component, gdim, num_scalar_dofs,
+                          restriction, interleaved_components):
+        # TODO: Add domain number?
+        vc = self.S("coordinate_dofs" + ufc_restriction_postfix(restriction))
+        if interleaved_components:
+            return vc[gdim*dof + component]
+        else:
+            return vc[num_scalar_dofs*component + dof]
+
+    def domain_dofs_access(self, gdim, num_scalar_dofs, restriction,
+                           interleaved_components):
+        # TODO: Add domain number?
+        return [self.domain_dof_access(dof, component, gdim, num_scalar_dofs,
+                                       restriction, interleaved_components)
+                for component in range(gdim)
+                for dof in range(num_scalar_dofs)]
+
+    def coefficient_dof_access(self, coefficient, dof_number):
+        # TODO: Add domain number?
+        c = self.coefficient_numbering[coefficient]
+        w = self.S("w")
+        return w[c, dof_number]
+
+    def coefficient_value(self, mt):  #, num_points):
+        "Symbol for variable holding value or derivative component of coefficient."
+        c = self.coefficient_numbering[mt.terminal]
+        return self.S(format_mt_name("w%d" % (c,), mt))
+        # TODO: Should we include num_points here? Not sure if there is a need.
+        #return self.S(format_mt_name("w%d_%d" % (c, num_points), mt))
diff --git a/uflacs/backends/ufc/__init__.py b/ffc/uflacs/backends/ufc/__init__.py
similarity index 93%
rename from uflacs/backends/ufc/__init__.py
rename to ffc/uflacs/backends/ufc/__init__.py
index 0f336f0..e5f9f9c 100644
--- a/uflacs/backends/ufc/__init__.py
+++ b/ffc/uflacs/backends/ufc/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2015-2015 Martin Sandve Alnæs
+# Copyright (C) 2015-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/uflacs/backends/ufc/coordinate_mapping.py b/ffc/uflacs/backends/ufc/coordinate_mapping.py
similarity index 99%
rename from uflacs/backends/ufc/coordinate_mapping.py
rename to ffc/uflacs/backends/ufc/coordinate_mapping.py
index a9166c0..65422c3 100644
--- a/uflacs/backends/ufc/coordinate_mapping.py
+++ b/ffc/uflacs/backends/ufc/coordinate_mapping.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2015-2015 Martin Sandve Alnæs
+# Copyright (C) 2015-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,7 +16,7 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
-from uflacs.backends.ufc.generator import ufc_generator
+from ffc.uflacs.backends.ufc.generator import ufc_generator
 
 ### Code generation utilities:
 
diff --git a/uflacs/backends/ufc/dofmap.py b/ffc/uflacs/backends/ufc/dofmap.py
similarity index 82%
rename from uflacs/backends/ufc/dofmap.py
rename to ffc/uflacs/backends/ufc/dofmap.py
index 9f33525..29bc842 100644
--- a/uflacs/backends/ufc/dofmap.py
+++ b/ffc/uflacs/backends/ufc/dofmap.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2009-2015 Anders Logg and Martin Sandve Alnæs
+# Copyright (C) 2009-2016 Anders Logg and Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -19,8 +19,9 @@
 # Note: Most of the code in this file is a direct translation from the old implementation in FFC
 
 
-from uflacs.backends.ufc.generator import ufc_generator
-from uflacs.backends.ufc.utils import generate_return_new_switch
+from ffc.uflacs.backends.ufc.generator import ufc_generator
+from ffc.uflacs.backends.ufc.utils import generate_return_new_switch
+
 
 class ufc_dofmap(ufc_generator):
     def __init__(self):
@@ -70,6 +71,13 @@ class ufc_dofmap(ufc_generator):
         default = L.Return(L.LiteralInt(0))
         return L.Switch(d, cases, default=default)
 
+    def num_entity_closure_dofs(self, L, ir):
+        d = L.Symbol("d")
+        values = ir["num_entity_closure_dofs"]
+        cases = [(i, L.Return(L.LiteralInt(value))) for i, value in enumerate(values)]
+        default = L.Return(L.LiteralInt(0))
+        return L.Switch(d, cases, default=default)
+
     def tabulate_dofs(self, L, ir):
 
         # Input arguments
@@ -205,6 +213,41 @@ class ufc_dofmap(ufc_generator):
 
         return L.Switch(d, all_cases, autoscope=False)
 
+    def tabulate_entity_closure_dofs(self, L, ir):
+        # Extract variables from ir
+        entity_closure_dofs, entity_dofs, num_dofs_per_entity = \
+            ir["tabulate_entity_closure_dofs"]
+
+        # Output argument array
+        dofs = L.Symbol("dofs")
+
+        # Input arguments
+        d = L.Symbol("d")
+        i = L.Symbol("i")
+
+        # TODO: Removed check for (d <= tdim + 1)
+        tdim = len(num_dofs_per_entity) - 1
+
+        # Generate cases for each dimension:
+        all_cases = []
+        for dim in range(tdim + 1):
+            num_entities = len(entity_dofs[dim])
+
+            # Generate cases for each mesh entity
+            cases = []
+            for entity in range(num_entities):
+                casebody = []
+                for (j, dof) in enumerate(entity_closure_dofs[(dim, entity)]):
+                    casebody += [L.Assign(dofs[j], dof)]
+                cases.append((entity, L.StatementList(casebody)))
+
+            # Generate inner switch
+            # TODO: Removed check for (i <= num_entities-1)
+            inner_switch = L.Switch(i, cases, autoscope=False)
+            all_cases.append((dim, inner_switch))
+
+        return L.Switch(d, all_cases, autoscope=False)
+
     def num_sub_dofmaps(self, L, ir):
         value = ir["num_sub_dofmaps"]
         return L.Return(L.LiteralInt(value))
@@ -212,4 +255,4 @@ class ufc_dofmap(ufc_generator):
     def create_sub_dofmap(self, L, ir):
         i = L.Symbol("i")
         classnames = ir["create_sub_dofmap"]
-        return generate_return_new_switch(L, i, classnames)
+        return generate_return_new_switch(L, i, classnames, factory=ir["jit"])
diff --git a/uflacs/backends/ufc/evaluatebasis.py b/ffc/uflacs/backends/ufc/evaluatebasis.py
similarity index 94%
rename from uflacs/backends/ufc/evaluatebasis.py
rename to ffc/uflacs/backends/ufc/evaluatebasis.py
index 730a62b..e8611aa 100644
--- a/uflacs/backends/ufc/evaluatebasis.py
+++ b/ffc/uflacs/backends/ufc/evaluatebasis.py
@@ -1,3 +1,9 @@
+# -*- coding: utf-8 -*-
+"""Work in progress translation of FFC evaluatebasis code to uflacs CNodes format."""
+
+from six import string_types
+from ffc.log import error
+
 """
 TODO: Add these to ufc::finite_element:
 
@@ -91,7 +97,7 @@ def generate_evaluate_reference_basis(L, data):
     The FFC code has a comment "From FIAT_NEW.polynomial_set.tabulate()".
     """
     # Cutoff for feature to disable generation of this code (consider removing after benchmarking final result)
-    if isinstance(data, str):
+    if isinstance(data, string_types):
         return L.Throw("evaluate_reference_basis: %s" % data)
 
     # Get some known dimensions
@@ -629,8 +635,8 @@ def __ffc_implementation_of__generate_apply_mapping_to_computed_values(L):
             name = f_component(f_values, i + offset)
             code += [f_assign(name, value)]
 
-    elif mapping == "pullback as metric":
-        code += ["", f_comment("Using metric pullback to map values back to the physical element")]
+    elif mapping == "double covariant piola":
+        code += ["", f_comment("Using double covariant Piola transform to map values back to the physical element")]
         # Get temporary values before mapping.
         code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))
                  for i in range(num_components)]
@@ -653,5 +659,30 @@ def __ffc_implementation_of__generate_apply_mapping_to_computed_values(L):
             name = f_component(f_values, p + offset)
             code += [f_assign(name, value)]
 
+    elif mapping == "double contravariant piola":
+        code += ["", f_comment("Pullback of a matrix-valued funciton as contravariant 2-tensor mapping values back to the physical element")]
+        # Get temporary values before mapping.
+        code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))
+                 for i in range(num_components)]
+        # Create names for inner product.
+        tdim = data["topological_dimension"]
+        gdim = data["geometric_dimension"]
+        basis_col = [f_tmp_ref(j) for j in range(num_components)]
+        for p in range(num_components):
+            # unflatten the indices
+            i = p // tdim
+            l = p % tdim
+            # g_il = (detJ)^(-2) J_ij G_jk J_lk
+            value = f_group(f_inner(
+                [f_inner([f_trans("J", i, j, tdim, gdim, None)
+                          for j in range(tdim)],
+                         [basis_col[j * tdim + k] for j in range(tdim)])
+                 for k in range(tdim)],
+                [f_trans("J", l, k, tdim, gdim, None)
+                 for k in range(tdim)]))
+            value = f_mul([f_inv(f_detJ(None)), f_inv(f_detJ(None)), value])
+            name = f_component(f_values, p + offset)
+            code += [f_assign(name, value)]
+
     else:
         error("Unknown mapping: %s" % mapping)
diff --git a/uflacs/backends/ufc/finite_element.py b/ffc/uflacs/backends/ufc/finite_element.py
similarity index 91%
rename from uflacs/backends/ufc/finite_element.py
rename to ffc/uflacs/backends/ufc/finite_element.py
index df2400c..13cba22 100644
--- a/uflacs/backends/ufc/finite_element.py
+++ b/ffc/uflacs/backends/ufc/finite_element.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2009-2015 Anders Logg and Martin Sandve Alnæs
+# Copyright (C) 2009-2016 Anders Logg and Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -20,8 +20,9 @@
 
 
 from ufl import product
-from uflacs.backends.ufc.generator import ufc_generator
-from uflacs.backends.ufc.utils import generate_return_new_switch
+from ffc.uflacs.backends.ufc.generator import ufc_generator
+from ffc.uflacs.backends.ufc.utils import generate_return_new_switch
+
 
 def affine_weights(dim): # FIXME: This is used where we still assume an affine mesh. Get rid of all places that use it.
     "Compute coefficents for mapping from reference to physical element"
@@ -92,7 +93,7 @@ class ufc_finite_element(ufc_generator):
         return L.Switch(i, cases, default=default, autoscope=False, autobreak=False)
 
     def evaluate_reference_basis(self, L, ir): # FIXME: NEW implement!
-        from uflacs.backends.ufc.evaluatebasis import generate_evaluate_reference_basis
+        from ffc.uflacs.backends.ufc.evaluatebasis import generate_evaluate_reference_basis
         return generate_evaluate_reference_basis(ir["evaluate_reference_basis"])
 
     def evaluate_reference_basis_derivatives(self, L, ir): # FIXME: NEW implement!
@@ -125,6 +126,7 @@ class ufc_finite_element(ufc_generator):
         return "FIXME"
 
     def evaluate_dofs(self, L, ir):
+        """Generate code for evaluate_dofs."""
         # FIXME: port this, then translate into reference version
         """
         - evaluate_dof needs to be split into invert_mapping + evaluate_dof or similar?
@@ -195,7 +197,8 @@ class ufc_finite_element(ufc_generator):
         # Basis symbol
         phi = L.Symbol("phi")
 
-        # TODO: This code assumes an affine coordinate field. Ok for now in here, this function must be removed anyway.
+        # TODO: This code assumes an affine coordinate field.
+        #       Ok for now in here, this function must be removed anyway.
         # Create code for evaluating affine coordinate basis functions
         num_scalar_xdofs = tdim + 1
         cg1_basis = affine_weights(tdim)
@@ -212,7 +215,9 @@ class ufc_finite_element(ufc_generator):
             L.ForRange(ip, 0, len(points), body=
                 L.ForRange(i, 0, gdim, body=
                     L.ForRange(k, 0, num_scalar_xdofs, body=
-                        L.AssignAdd(dof_coordinates[ip][i], coordinate_dofs[gdim*k + i] * phi[ip*num_scalar_xdofs + k])))),
+                        L.AssignAdd(dof_coordinates[ip][i],
+                                    coordinate_dofs[gdim*k + i]
+                                    * phi[ip*num_scalar_xdofs + k])))),
             ]
         return L.StatementList(code)
 
@@ -223,4 +228,4 @@ class ufc_finite_element(ufc_generator):
     def create_sub_element(self, L, ir):
         i = L.Symbol("i")
         classnames = ir["create_sub_element"]
-        return generate_return_new_switch(L, i, classnames)
+        return generate_return_new_switch(L, i, classnames, factory=ir["jit"])
diff --git a/uflacs/backends/ufc/form.py b/ffc/uflacs/backends/ufc/form.py
similarity index 84%
rename from uflacs/backends/ufc/form.py
rename to ffc/uflacs/backends/ufc/form.py
index af2026b..d626d91 100644
--- a/uflacs/backends/ufc/form.py
+++ b/ffc/uflacs/backends/ufc/form.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2009-2015 Anders Logg and Martin Sandve Alnæs
+# Copyright (C) 2009-2016 Anders Logg and Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -19,8 +19,9 @@
 # Note: Most of the code in this file is a direct translation from the old implementation in FFC
 
 from ffc.cpp import make_integral_classname
-from uflacs.backends.ufc.generator import ufc_generator, integral_name_templates, ufc_integral_types
-from uflacs.backends.ufc.utils import generate_return_new_switch
+from ffc.uflacs.backends.ufc.generator import ufc_generator, integral_name_templates, ufc_integral_types
+from ffc.uflacs.backends.ufc.utils import generate_return_new, generate_return_new_switch
+
 
 def add_ufc_form_integral_methods(cls):
     """This function generates methods on the class it decorates,
@@ -51,6 +52,7 @@ def add_ufc_form_integral_methods(cls):
             setattr(cls, declname, _delegate)
     return cls
 
+
 @add_ufc_form_integral_methods
 class ufc_form(ufc_generator):
     def __init__(self):
@@ -81,7 +83,8 @@ class ufc_form(ufc_generator):
 
         position = L.Symbol("position")
 
-        # Throwing a lot into the 'typename' string here but no plans for building a full C++ type system
+        # Throwing a lot into the 'typename' string here but
+        # no plans for building a full C++ type system
         typename = "static const std::vector<std::size_t>"
         initializer_list = L.VerbatimExpr("{" + ", ".join(str(i) for i in positions) + "}")
         code = L.StatementList([
@@ -90,34 +93,36 @@ class ufc_form(ufc_generator):
             ])
         return code
 
+
     def create_coordinate_finite_element(self, L, ir):
         classnames = ir["create_coordinate_finite_element"]
         assert len(classnames) == 1
-        return L.Return(L.New(classnames[0]))
-        # TODO: Use factory functions instead, here and in all create_* functions:
-        #factoryname = make_factory_function_name(classname)
-        #return L.Return(L.Call(factoryname))
+        return generate_return_new(L, classnames[0], factory=ir["jit"])
 
     def create_coordinate_dofmap(self, L, ir):
         classnames = ir["create_coordinate_dofmap"]
         assert len(classnames) == 1
-        return L.Return(L.New(classnames[0]))
+        return generate_return_new(L, classnames[0], factory=ir["jit"])
 
     def create_coordinate_mapping(self, L, ir):
         classnames = ir["create_coordinate_mapping"]
         assert len(classnames) == 1
-        return L.Return(L.New(classnames[0]))
+        return generate_return_new(L, classnames[0], factory=ir["jit"])
 
     def create_finite_element(self, L, ir):
         i = L.Symbol("i")
         classnames = ir["create_finite_element"]
-        return generate_return_new_switch(L, i, classnames)
+        return generate_return_new_switch(L, i, classnames, factory=ir["jit"])
 
     def create_dofmap(self, L, ir):
         i = L.Symbol("i")
         classnames = ir["create_dofmap"]
-        return generate_return_new_switch(L, i, classnames)
+        return generate_return_new_switch(L, i, classnames, factory=ir["jit"])
+
 
+    # This group of functions are repeated for each
+    # foo_integral by add_ufc_form_integral_methods:
+    
     def _max_foo_subdomain_id(self, L, ir, integral_type, declname):
         "Return implementation of ufc::form::%(declname)s()."
         # e.g. max_subdomain_id = ir["max_cell_subdomain_id"]
@@ -139,7 +144,8 @@ class ufc_form(ufc_generator):
         subdomain_ids = ir[declname] # e.g. ir["create_cell_integral"]
         classnames = [make_integral_classname(prefix, integral_type, form_id, i)
                       for i in subdomain_ids]
-        return generate_return_new_switch(L, subdomain_id, classnames, subdomain_ids)
+        return generate_return_new_switch(L, subdomain_id, classnames,
+                                          subdomain_ids, factory=ir["jit"])
 
     def _create_default_foo_integral(self, L, ir, integral_type, declname):
         "Return implementation of ufc::form::%(declname)s()."
@@ -150,4 +156,4 @@ class ufc_form(ufc_generator):
             form_id = ir["id"]
             prefix = ir["prefix"]
             classname = make_integral_classname(prefix, integral_type, form_id, subdomain_id)
-            return L.Return(L.New(classname))
+            return generate_return_new(L, classname, factory=ir["jit"])
diff --git a/uflacs/backends/ufc/generator.py b/ffc/uflacs/backends/ufc/generator.py
similarity index 96%
rename from uflacs/backends/ufc/generator.py
rename to ffc/uflacs/backends/ufc/generator.py
index 955cffd..18f972f 100644
--- a/uflacs/backends/ufc/generator.py
+++ b/ffc/uflacs/backends/ufc/generator.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2015-2015 Martin Sandve Alnæs
+# Copyright (C) 2015-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,18 +16,18 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
-import inspect
+#import inspect
 import re
-from string import Formatter
+#from string import Formatter
 
-from ufl import product
+#from ufl import product
 from ffc.log import error, warning
 #from ffc.backends.ufc import *
 
 import ffc.backends.ufc
 
-from uflacs.language.format_lines import format_indented_lines
-from uflacs.backends.ufc.templates import *
+from ffc.uflacs.language.format_lines import format_indented_lines
+from ffc.uflacs.backends.ufc.templates import *
 
 #__all__ = (["ufc_form", "ufc_dofmap", "ufc_finite_element", "ufc_integral"]
 #           + ["ufc_%s_integral" % integral_type for integral_type in integral_types])
diff --git a/uflacs/backends/ufc/generators.py b/ffc/uflacs/backends/ufc/generators.py
similarity index 66%
rename from uflacs/backends/ufc/generators.py
rename to ffc/uflacs/backends/ufc/generators.py
index ef60b1d..23163c8 100644
--- a/uflacs/backends/ufc/generators.py
+++ b/ffc/uflacs/backends/ufc/generators.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2015-2015 Martin Sandve Alnæs
+# Copyright (C) 2015-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,8 +16,8 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
-from uflacs.backends.ufc.finite_element import ufc_finite_element
-from uflacs.backends.ufc.dofmap import ufc_dofmap
-from uflacs.backends.ufc.coordinate_mapping import ufc_coordinate_mapping
-from uflacs.backends.ufc.integrals import *
-from uflacs.backends.ufc.form import ufc_form
+from ffc.uflacs.backends.ufc.finite_element import ufc_finite_element
+from ffc.uflacs.backends.ufc.dofmap import ufc_dofmap
+from ffc.uflacs.backends.ufc.coordinate_mapping import ufc_coordinate_mapping
+from ffc.uflacs.backends.ufc.integrals import *
+from ffc.uflacs.backends.ufc.form import ufc_form
diff --git a/uflacs/backends/ufc/integrals.py b/ffc/uflacs/backends/ufc/integrals.py
similarity index 88%
rename from uflacs/backends/ufc/integrals.py
rename to ffc/uflacs/backends/ufc/integrals.py
index c3a7e49..903a97e 100644
--- a/uflacs/backends/ufc/integrals.py
+++ b/ffc/uflacs/backends/ufc/integrals.py
@@ -1,5 +1,6 @@
+# -*- coding: utf-8 -*-
 
-from uflacs.backends.ufc.generator import ufc_generator, ufc_integral_types
+from ffc.uflacs.backends.ufc.generator import ufc_generator, ufc_integral_types
 
 class ufc_integral(ufc_generator):
     def __init__(self, integral_type):
@@ -23,6 +24,10 @@ class ufc_integral(ufc_generator):
         code = "code generated from %s" % tt
         return code
 
+    def tabulate_tensor_comment(self, L, ir):
+        # FIXME: Copy from ffc.codegeneration._generate_tabulate_tensor_comment
+        return ""
+
 class ufc_cell_integral(ufc_integral):
     def __init__(self):
         ufc_integral.__init__(self, "cell")
diff --git a/uflacs/backends/ufc/templates.py b/ffc/uflacs/backends/ufc/templates.py
similarity index 98%
rename from uflacs/backends/ufc/templates.py
rename to ffc/uflacs/backends/ufc/templates.py
index 4c471ef..026af83 100644
--- a/uflacs/backends/ufc/templates.py
+++ b/ffc/uflacs/backends/ufc/templates.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 
 import re
 from ffc.backends.ufc import *
diff --git a/ffc/uflacs/backends/ufc/utils.py b/ffc/uflacs/backends/ufc/utils.py
new file mode 100644
index 0000000..e7d2ce7
--- /dev/null
+++ b/ffc/uflacs/backends/ufc/utils.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+
+# TODO: Move these to uflacs.language utils?
+
+
+def generate_return_new(L, classname, factory):
+    if factory:
+        return L.Return(L.Call("create_" + classname))
+    else:
+        return L.Return(L.New(classname))
+
+
+def generate_return_new_switch(L, i, classnames, args=None, factory=False):
+    if factory:
+        def create(classname):
+            return L.New(classname)
+    else:
+        def create(classname):
+            return L.Call("create_" + classname)
+    if classnames:
+        cases = []
+        if args is None:
+            args = list(range(len(classnames)))
+        for j, classname in zip(args, classnames):
+            if classname:
+                cases.append((j, L.Return(create(classname))))
+        code = [L.Switch(i, cases, autobreak=False, autoscope=False)]
+    else:
+        code = []
+    code.append(L.Return(L.Null()))
+    return L.StatementList(code)
diff --git a/uflacs/elementtables/__init__.py b/ffc/uflacs/elementtables/__init__.py
similarity index 94%
rename from uflacs/elementtables/__init__.py
rename to ffc/uflacs/elementtables/__init__.py
index d49dca1..63d581a 100644
--- a/uflacs/elementtables/__init__.py
+++ b/ffc/uflacs/elementtables/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/ffc/uflacs/elementtables/table_utils.py b/ffc/uflacs/elementtables/table_utils.py
new file mode 100644
index 0000000..9f61e23
--- /dev/null
+++ b/ffc/uflacs/elementtables/table_utils.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""Utilities for precomputed table manipulation."""
+
+from __future__ import print_function  # used in some debugging
+
+import numpy
+
+from ufl.permutation import build_component_numbering
+from ufl.cell import num_cell_entities
+
+from ffc.log import error
+from ffc.fiatinterface import create_element
+from ffc.representationutils import integral_type_to_entity_dim, map_integral_points
+from ffc.representationutils import create_quadrature_points_and_weights
+
+def equal_tables(a, b, eps):
+    "Compare tables to be equal within a tolerance."
+    a = numpy.asarray(a)
+    b = numpy.asarray(b)
+    if a.shape != b.shape:
+        return False
+    if len(a.shape) > 1:
+        return all(equal_tables(a[i], b[i], eps)
+                   for i in range(a.shape[0]))
+    def scalars_equal(x, y, eps):
+        return abs(x-y) < eps
+    return all(scalars_equal(a[i], b[i], eps)
+               for i in range(a.shape[0]))
+
+
+def clamp_table_small_integers(table, eps):
+    "Clamp almost 0,1,-1 values to integers. Returns new table."
+    # Get shape of table and number of columns, defined as the last axis
+    table = numpy.asarray(table)
+    for n in (-1, 0, 1):
+        table[numpy.where(abs(table - n) < eps)] = float(n)
+    return table
+
+
+def strip_table_zeros(table, eps):
+    "Strip zero columns from table. Returns column range (begin,end) and the new compact table."
+    # Get shape of table and number of columns, defined as the last axis
+    table = numpy.asarray(table)
+    sh = table.shape
+    nc = sh[-1]
+
+    # Find first nonzero column
+    begin = nc
+    for i in range(nc):
+        if numpy.linalg.norm(table[..., i]) > eps:
+            begin = i
+            break
+
+    # Find (one beyond) last nonzero column
+    end = begin
+    for i in range(nc-1, begin-1, -1):
+        if numpy.linalg.norm(table[..., i]) > eps:
+            end = i+1
+            break
+
+    # Make subtable by stripping first and last columns
+    stripped_table = table[..., begin:end]
+    return begin, end, stripped_table
+
+
+def build_unique_tables(tables, eps):
+    """Given a list or dict of tables, return a list of unique tables
+    and a dict of unique table indices for each input table key."""
+    unique = []
+    mapping = {}
+
+    if isinstance(tables, list):
+        keys = list(range(len(tables)))
+    elif isinstance(tables, dict):
+        keys = sorted(tables.keys())
+
+    for k in keys:
+        t = tables[k]
+        found = -1
+        for i, u in enumerate(unique):
+            if equal_tables(u, t, eps):
+                found = i
+                break
+        if found == -1:
+            i = len(unique)
+            unique.append(t)
+        mapping[k] = i
+
+    return unique, mapping
+
+
+def get_ffc_table_values(points,
+                         cell, integral_type,
+                         num_points, # TODO: Remove, not needed
+                         ufl_element, avg,
+                         entitytype, derivative_counts,
+                         flat_component, epsilon):
+    """Extract values from ffc element table.
+
+    Returns a 3D numpy array with axes
+    (entity number, quadrature point number, dof number)
+    """
+    deriv_order = sum(derivative_counts)
+
+    if avg in ("cell", "facet"):
+        # Redefine points to compute average tables
+
+        # Make sure this is not called with points, that doesn't make sense
+        #assert points is None
+        #assert num_points is None
+
+        # Not expecting derivatives of averages
+        assert not any(derivative_counts)
+        assert deriv_order == 0
+
+        # Doesn't matter if it's exterior or interior facet integral,
+        # just need a valid integral type to create quadrature rule
+        if avg == "cell":
+            integral_type = "cell"
+        elif avg == "facet":
+            integral_type = "exterior_facet"
+
+        # Make quadrature rule and get points and weights
+        points, weights = create_quadrature_points_and_weights(
+            integral_type, cell, ufl_element.degree(), "default")
+
+    # Tabulate table of basis functions and derivatives in points for each entity
+    fiat_element = create_element(ufl_element)
+    tdim = cell.topological_dimension()
+    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
+    num_entities = num_cell_entities[cell.cellname()][entity_dim]
+    entity_tables = []
+    for entity in range(num_entities):
+        entity_points = map_integral_points(points, integral_type, cell, entity)
+        tbl = fiat_element.tabulate(deriv_order, entity_points)[derivative_counts]
+        entity_tables.append(tbl)
+
+    # Extract arrays for the right scalar component
+    component_tables = []
+    sh = ufl_element.value_shape()
+    if sh == ():
+        # Scalar valued element
+        for entity, entity_table in enumerate(entity_tables):
+            component_tables.append(entity_table)
+    elif len(sh) == 2 and ufl_element.num_sub_elements() == 0:
+        # 2-tensor-valued elements, not a tensor product
+        # mapping flat_component back to tensor component
+        (_, f2t) = build_component_numbering(sh, ufl_element.symmetry())
+        t_comp = f2t[flat_component]
+        for entity, entity_table in enumerate(entity_tables):
+            tbl = entity_table[:, t_comp[0], t_comp[1], :]
+            component_tables.append(tbl)
+    else:
+        # Vector-valued or mixed element
+        for entity, entity_table in enumerate(entity_tables):
+            tbl = entity_table[:, flat_component, :]
+            component_tables.append(tbl)
+
+    if avg in ("cell", "facet"):
+        # Compute numeric integral of the each component table
+        wsum = sum(weights)
+        for entity, tbl in enumerate(component_tables):
+            num_dofs = tbl.shape[0]
+            tbl = numpy.dot(tbl, weights) / wsum
+            tbl = numpy.reshape(tbl, (num_dofs, 1))
+            component_tables[entity] = tbl
+
+    # Loop over entities and fill table blockwise (each block = points x dofs)
+    # Reorder axes as (points, dofs) instead of (dofs, points)
+    assert len(component_tables) == num_entities
+    num_dofs, num_points = component_tables[0].shape
+    shape = (num_entities, num_points, num_dofs)
+    res = numpy.zeros(shape)
+    for entity in range(num_entities):
+        res[entity, :, :] = numpy.transpose(component_tables[entity])
+    return res
+
+
+def generate_psi_table_name(num_points, element_counter, averaged,
+                            entitytype, derivative_counts, flat_component):
+    """Generate a name for the psi table of the form:
+    FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value.
+
+    FE  - is a simple counter to distinguish the various bases, it will be
+          assigned in an arbitrary fashion.
+
+    C   - is the component number if any (this does not yet take into account
+          tensor valued functions)
+
+    D   - is the number of derivatives in each spatial direction if any.
+          If the element is defined in 3D, then D012 means d^3(*)/dydz^2.
+
+    AC  - marks that the element values are averaged over the cell
+
+    AF  - marks that the element values are averaged over the facet
+
+    F   - marks that the first array dimension enumerates facets on the cell
+
+    V   - marks that the first array dimension enumerates vertices on the cell
+
+    Q   - number of quadrature points, to distinguish between tables in a mixed quadrature degree setting
+
+    """
+    name = "FE%d" % element_counter
+    if flat_component is not None:
+        name += "_C%d" % flat_component
+    if any(derivative_counts):
+        name += "_D" + "".join(str(d) for d in derivative_counts)
+    name += { None: "", "cell": "_AC", "facet": "_AF" }[averaged]
+    name += { "cell": "", "facet": "_F", "vertex": "_V" }[entitytype]
+    if num_points is not None:
+        name += "_Q%d" % num_points
+    return name
diff --git a/ffc/uflacs/elementtables/terminaltables.py b/ffc/uflacs/elementtables/terminaltables.py
new file mode 100644
index 0000000..9dd8c90
--- /dev/null
+++ b/ffc/uflacs/elementtables/terminaltables.py
@@ -0,0 +1,416 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tools for precomputed tables of terminal values."""
+
+import numpy
+
+from ufl.utils.sequences import product
+from ufl.utils.derivativetuples import derivative_listing_to_counts
+from ufl.permutation import build_component_numbering
+from ufl.classes import FormArgument, GeometricQuantity, SpatialCoordinate, Jacobian
+from ufl.algorithms.analysis import unique_tuple
+
+from ffc.log import error
+
+from ffc.uflacs.elementtables.table_utils import generate_psi_table_name, get_ffc_table_values
+from ffc.uflacs.elementtables.table_utils import clamp_table_small_integers, strip_table_zeros, build_unique_tables
+
+from ffc.uflacs.backends.ffc.common import ufc_restriction_offset
+
+
+class Table(object):  # TODO: Use this class for tables with metadata
+    """Table with metadata.
+
+    Valid table types:
+    "zeros"
+    "ones"
+    "quadrature"
+    "piecewise"
+    "uniform"
+    "fixed"
+    "varying"
+
+    FIXME: Document these. For now see table computation.
+    """
+    def __init__(self, name, values, tabletype):
+        self.name = name
+        self.values = values
+        self.num_entities = values.shape[0]
+        self.num_points = values.shape[1]
+        self.num_dofs = values.shape[2]
+        self.tabletype = tabletype
+
+        self.piecewise = tabletype in ("piecewise", "fixed")
+        self.uniform = tabletype in ("uniform", "fixed")
+
+
+def get_modified_terminal_element(mt):
+    gd = mt.global_derivatives
+    ld = mt.local_derivatives
+
+    # Extract element from FormArguments and relevant GeometricQuantities
+    if isinstance(mt.terminal, FormArgument):
+        if gd and mt.reference_value:
+            error("Global derivatives of reference values not defined.")
+        elif ld and not mt.reference_value:
+            error("Local derivatives of global values not defined.")
+        element = mt.terminal.ufl_element()
+        fc = mt.flat_component
+    elif isinstance(mt.terminal, SpatialCoordinate):
+        if mt.reference_value:
+            error("Not expecting reference value of x.")
+        if gd:
+            error("Not expecting global derivatives of x.")
+        element = mt.terminal.ufl_domain().ufl_coordinate_element()
+        if not ld:
+            fc = mt.flat_component
+        else:
+            # Actually the Jacobian expressed as reference_grad(x)
+            fc = mt.flat_component  # x-component
+            assert len(mt.component) == 1
+            assert mt.component[0] == mt.flat_component
+    elif isinstance(mt.terminal, Jacobian):
+        if mt.reference_value:
+            error("Not expecting reference value of J.")
+        if gd:
+            error("Not expecting global derivatives of J.")
+        element = mt.terminal.ufl_domain().ufl_coordinate_element()
+        # Translate component J[i,d] to x element context rgrad(x[i])[d]
+        assert len(mt.component) == 2
+        fc, d = mt.component  # x-component, derivative
+        ld = tuple(sorted((d,) + ld))
+    else:
+        return None
+
+    assert not (mt.averaged and (ld or gd))
+
+    # Change derivatives format for table lookup
+    #gdim = mt.terminal.ufl_domain().geometric_dimension()
+    #global_derivatives = derivative_listing_to_counts(gd, gdim)
+
+    # Change derivatives format for table lookup
+    tdim = mt.terminal.ufl_domain().topological_dimension()
+    local_derivatives = derivative_listing_to_counts(ld, tdim)
+    
+    return element, mt.averaged, local_derivatives, fc
+
+
+def build_element_tables(num_points, quadrature_rules,
+                         cell, integral_type, entitytype,
+                         modified_terminals, epsilon):
+    """Build the element tables needed for a list of modified terminals.
+
+    Input:
+      entitytype - str
+      modified_terminals - ordered sequence of unique modified terminals
+      FIXME: Document
+
+    Output:
+      tables - dict(name: table)
+      mt_table_names - dict(ModifiedTerminal: name)
+
+    """
+    mt_table_names = {}
+    tables = {}
+    table_origins = {}
+
+    # Add to element tables
+    analysis = {}
+    for mt in modified_terminals:
+        # FIXME: Use a namedtuple for res
+        res = get_modified_terminal_element(mt)
+        if res:
+            analysis[mt] = res
+
+    # Build element numbering using topological
+    # ordering so subelements get priority
+    from ffc.analysis import extract_sub_elements, sort_elements, _compute_element_numbers
+    all_elements = [res[0] for res in analysis.values()]
+    unique_elements = sort_elements(extract_sub_elements(all_elements))
+    element_numbers = _compute_element_numbers(unique_elements)
+
+    def add_table(res):
+        element, avg, local_derivatives, flat_component = res
+
+        # Build name for this particular table
+        element_number = element_numbers[element]
+        name = generate_psi_table_name(
+            num_points, element_number, avg,
+            entitytype, local_derivatives, flat_component)
+
+        # Extract the values of the table from ffc table format
+        if name not in tables:
+            tables[name] = get_ffc_table_values(
+                quadrature_rules[num_points][0],
+                cell, integral_type,
+                num_points, element, avg,
+                entitytype, local_derivatives, flat_component,
+                epsilon)
+
+            # Track table origin for custom integrals:
+            table_origins[name] = res
+        return name
+
+    for mt in modified_terminals:
+        res = analysis.get(mt)
+        if not res:
+            continue
+        element, avg, local_derivatives, flat_component = res
+
+        # Generate tables for each subelement in topological ordering,
+        # using same avg and local_derivatives, for each component.
+        # We want the first table to be the innermost subelement so that's
+        # the one the optimized tables get the name from and so that's
+        # the one the table origins point to for custom integrals.
+        # This results in some superfluous tables but those will be
+        # removed before code generation and it's not believed to be
+        # a bottleneck.
+        for subelement in sort_elements(extract_sub_elements([element])):
+            for fc in range(product(subelement.reference_value_shape())):
+                subres = (subelement, avg, local_derivatives, fc)
+                name_ignored = add_table(subres)
+
+        # Generate table and store table name with modified terminal
+        name = add_table(res)
+        mt_table_names[mt] = name
+
+    return tables, mt_table_names, table_origins
+
+
+def optimize_element_tables(tables, mt_table_names, table_origins, epsilon):
+    """Optimize tables and make unique set.
+
+    Steps taken:
+
+      - clamp values that are very close to -1, 0, +1 to those values
+      - remove dofs from beginning and end of tables where values are all zero
+      - for each modified terminal, provide the dof range that a given table corresponds to
+
+    Terminology:
+      name - str, name used in input arguments here
+      mt - modified terminal
+      table - numpy array of float values
+      stripped_table - numpy array of float values with zeroes
+                       removed from each end of dofrange
+
+    Input:
+      tables - { name: table }
+      mt_table_names - { mt: name }
+
+    Output:
+      unique_tables - { unique_name: stripped_table }
+      mt_table_ranges - { mt: (unique_name, begin, end) }
+    """
+    # Find and sort all unique table names mentioned in mt_table_names
+    used_names = set(mt_table_names.values())
+    assert None not in used_names
+    #used_names.remove(None)
+    used_names = sorted(used_names)
+
+    # Drop unused tables (if any at this point)
+    tables = { name: tables[name] for name in tables if name in used_names }
+
+    # Clamp almost -1.0, 0.0, and +1.0 values first
+    # (i.e. 0.999999 -> 1.0 if within epsilon distance)
+    for name in used_names:
+        tables[name] = clamp_table_small_integers(tables[name], epsilon)
+
+    # Strip contiguous zero blocks at the ends of all tables
+    table_ranges = {}
+    for name in used_names:
+        begin, end, stripped_table = strip_table_zeros(tables[name], epsilon)
+        tables[name] = stripped_table
+        table_ranges[name] = (begin, end)
+
+    # Build unique table mapping
+    unique_tables_list, name_to_unique_index = build_unique_tables(tables, epsilon)
+
+    # Build mapping of constructed table names to unique names.
+    # Picking first constructed name preserves some information
+    # about the table origins although some names may be dropped.
+    unique_names = {}
+    for name in used_names:
+        ui = name_to_unique_index[name]
+        if ui not in unique_names:
+            unique_names[ui] = name
+
+    # Build mapping from unique table name to the table itself
+    unique_tables = {}
+    for ui in range(len(unique_tables_list)):
+        unique_tables[unique_names[ui]] = unique_tables_list[ui]
+
+    unique_table_origins = {}
+    for ui in range(len(unique_tables_list)):
+        uname = unique_names[ui]
+        # Track table origins for runtime recomputation in custom integrals:
+        dofrange = table_ranges[uname]
+        # FIXME: Make sure the "smallest" element is chosen
+        (element, avg, derivative_counts, fc) = table_origins[name]
+        unique_table_origins[uname] = (element, avg, derivative_counts, fc, dofrange)
+
+    # Build mapping from modified terminal to compacted table and dof range
+    # { mt: (unique name, table dof range begin, table dof range end) }
+    mt_table_ranges = {}
+    for mt, name in mt_table_names.items():
+        assert name is not None
+        b, e = table_ranges[name]
+        ui = name_to_unique_index[name]
+        unique_name = unique_names[ui]
+        mt_table_ranges[mt] = (unique_name, b, e)
+
+    return unique_tables, mt_table_ranges, unique_table_origins
+
+
+def offset_restricted_table_ranges(mt_table_ranges, mt_table_names,
+                                   tables, modified_terminals):
+    # Modify dof ranges for restricted form arguments
+    # (geometry gets padded variable names instead)
+    for mt in modified_terminals:
+        if mt.restriction and isinstance(mt.terminal, FormArgument):
+            # offset = 0 or number of dofs before table optimization
+            num_original_dofs = int(tables[mt_table_names[mt]].shape[-1])
+            offset = ufc_restriction_offset(mt.restriction, num_original_dofs)
+            (unique_name, b, e) = mt_table_ranges[mt]
+            mt_table_ranges[mt] = (unique_name, b + offset, e + offset)
+    return mt_table_ranges
+
+
+def is_zeros_table(table, epsilon):
+    return (product(table.shape) == 0
+            or numpy.allclose(table, numpy.zeros(table.shape), atol=epsilon))
+
+
+def is_ones_table(table, epsilon):
+    return numpy.allclose(table, numpy.ones(table.shape), atol=epsilon)
+
+
+def is_quadrature_table(table, epsilon):
+    num_entities, num_points, num_dofs = table.shape
+    I = numpy.eye(num_points)
+    return (num_points == num_dofs
+            and all(numpy.allclose(table[i, :, :], I, atol=epsilon)
+                    for i in range(num_entities)))
+
+
+def is_piecewise_table(table, epsilon):
+    return all(numpy.allclose(table[:, 0, :], table[:, i, :], atol=epsilon)
+               for i in range(1, table.shape[1]))
+
+
+def is_uniform_table(table, epsilon):
+    return all(numpy.allclose(table[0, :, :], table[i, :, :], atol=epsilon)
+               for i in range(1, table.shape[0]))
+
+
+def analyse_table_types(unique_tables, epsilon):
+    table_types = {}
+    for unique_name, table in unique_tables.items():
+        num_entities, num_points, num_dofs = table.shape
+        if is_zeros_table(table, epsilon):
+            # Table is empty or all values are 0.0
+            tabletype = "zeros"
+        elif is_ones_table(table, epsilon):
+            # All values are 1.0
+            tabletype = "ones"
+        elif is_quadrature_table(table, epsilon):
+            # Identity matrix mapping points to dofs (separately on each entity)
+            tabletype = "quadrature"
+        else:
+            # Equal for all points on a given entity
+            piecewise = is_piecewise_table(table, epsilon)
+
+            # Equal for all entities
+            uniform = is_uniform_table(table, epsilon)
+
+            if piecewise and uniform:
+                # Constant for all points and all entities
+                tabletype = "fixed"
+            elif piecewise:
+                # Constant for all points on each entity separately
+                tabletype = "piecewise"
+            elif uniform:
+                # Equal on all entities
+                tabletype = "uniform"
+            else:
+                # Varying over points and entities
+                tabletype = "varying"
+
+        table_types[unique_name] = tabletype
+
+    return table_types
+
+
+def build_optimized_tables(num_points, quadrature_rules,
+                           cell, integral_type, entitytype,
+                           modified_terminals, parameters):
+    # Get tolerance for checking table values against 0.0 or 1.0
+    from ffc.uflacs.language.format_value import get_float_threshold
+    epsilon = get_float_threshold()
+    # FIXME: Should be epsilon from ffc parameters
+    #epsilon = parameters["epsilon"]
+
+    # Build tables needed by all modified terminals
+    tables, mt_table_names, table_origins = \
+        build_element_tables(num_points, quadrature_rules,
+            cell, integral_type, entitytype,
+            modified_terminals, epsilon)
+
+    # Optimize tables and get table name and dofrange for each modified terminal
+    unique_tables, mt_table_ranges, table_origins = \
+        optimize_element_tables(tables, mt_table_names, table_origins, epsilon)
+
+    # Analyze tables for properties useful for optimization
+    table_types = analyse_table_types(unique_tables, epsilon)
+
+
+    # Consistency checking
+    for unique_name, tabletype in table_types.items():
+        if tabletype == "zeros":
+            # All table ranges referring to this table should be empty
+            assert all(data[1] == data[2]
+                       for mt, data in mt_table_ranges.items()
+                       if data is not None and data[0] == unique_name)
+        if tabletype == "varying":
+            # No table ranges referring to this table should be averaged
+            assert all(not mt.averaged
+                       for mt, data in mt_table_ranges.items()
+                       if data is not None and data[0] == unique_name)
+
+
+    # Add offsets to dof ranges for restricted terminals
+    mt_table_ranges = offset_restricted_table_ranges(
+        mt_table_ranges, mt_table_names, tables, modified_terminals)
+
+    # Delete unused tables and compress piecewise constant tables
+    used_names = set(tabledata[0] for tabledata in mt_table_ranges.values())
+    unused_names = set(unique_tables.keys()) - used_names
+    for uname in unused_names:
+        del table_types[uname]
+        del unique_tables[uname]
+    for uname, tabletype in table_types.items():
+        if tabletype in ("piecewise", "fixed"):
+            # Reduce table to dimension 1 along num_points axis in generated code
+            unique_tables[uname] = unique_tables[uname][:,0:1,:]
+        if tabletype in ("uniform", "fixed"):
+            # Reduce table to dimension 1 along num_entities axis in generated code
+            unique_tables[uname] = unique_tables[uname][0:1,:,:]
+        if tabletype in ("zeros", "ones", "quadrature"):
+            del unique_tables[uname]
+
+    return unique_tables, mt_table_ranges, table_types
diff --git a/uflacs/generation/__init__.py b/ffc/uflacs/generation/__init__.py
similarity index 94%
rename from uflacs/generation/__init__.py
rename to ffc/uflacs/generation/__init__.py
index 0930cc7..bea41cf 100644
--- a/uflacs/generation/__init__.py
+++ b/ffc/uflacs/generation/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/ffc/uflacs/generation/integralgenerator.py b/ffc/uflacs/generation/integralgenerator.py
new file mode 100644
index 0000000..a8cf922
--- /dev/null
+++ b/ffc/uflacs/generation/integralgenerator.py
@@ -0,0 +1,510 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""Controlling algorithm for building the tabulate_tensor source structure from factorized representation."""
+
+from ufl import product
+from ufl.classes import ConstantValue, Condition
+
+from ffc.log import error, warning
+
+from ffc.uflacs.analysis.modified_terminals import analyse_modified_terminal, is_modified_terminal
+
+
+class IntegralGenerator(object):
+
+    def __init__(self, ir, backend):
+        # Store ir
+        self.ir = ir
+
+        # Backend specific plugin with attributes
+        # - language: for translating ufl operators to target language
+        # - symbols: for translating ufl operators to target language
+        # - definitions: for defining backend specific variables
+        # - access: for accessing backend specific variables
+        self.backend = backend
+
+        # Set of operator names code has been generated for,
+        # used in the end for selecting necessary includes
+        self._ufl_names = set()
+
+
+    def get_includes(self):
+        "Return list of include statements needed to support generated code."
+        includes = set()
+
+        includes.add("#include <cstring>")  # for using memset
+        #includes.add("#include <algorithm>")  # for using std::fill instead of memset
+
+        cmath_names = set((
+                "abs", "sign", "pow", "sqrt",
+                "exp", "ln",
+                "cos", "sin", "tan",
+                "acos", "asin", "atan", "atan_2",
+                "cosh", "sinh", "tanh",
+                "acosh", "asinh", "atanh",
+                "erf", "erfc",
+            ))
+
+        boost_math_names = set((
+            "bessel_j", "bessel_y", "bessel_i", "bessel_k",
+            ))
+
+        # Only return the necessary headers
+        if cmath_names & self._ufl_names:
+            includes.add("#include <cmath>")
+
+        if boost_math_names & self._ufl_names:
+            includes.add("#include <boost/math/special_functions.hpp>")
+
+        return sorted(includes)
+
+
+    def generate(self):
+        """Generate entire tabulate_tensor body.
+
+        Assumes that the code returned from here will be wrapped in a context
+        that matches a suitable version of the UFC tabulate_tensor signatures.
+        """
+        L = self.backend.language
+
+        parts = []
+        parts += self.generate_quadrature_tables()
+        parts += self.generate_element_tables()
+        parts += self.generate_tensor_reset()
+
+        # If we have integrals with different number of quadrature points,
+        # we wrap each integral in a separate scope, avoiding having to
+        # think about name clashes for now. This is a bit wasteful in that
+        # piecewise quantities are not shared, but at least it should work.
+        expr_irs = self.ir["expr_irs"]
+        all_num_points = sorted(expr_irs)
+
+        # Reset variables, separate sets for quadrature loop
+        self.vaccesses = { num_points: {} for num_points in all_num_points }
+
+        for num_points in all_num_points:
+            body = []
+            body += self.generate_unstructured_partition(num_points, "piecewise")
+            body += self.generate_dofblock_partition(num_points, "piecewise")
+            body += self.generate_quadrature_loops(num_points)
+
+            # If there are multiple quadrature rules here, just wrapping
+            # in Scope to avoid thinking about scoping issues for now.
+            # A better handling of multiple rules would be nice,
+            # in particular 
+            if len(all_num_points) > 1:
+                parts.append(L.Scope(body))
+            else:
+                parts.extend(body)
+
+        parts += self.generate_finishing_statements()
+
+        return L.StatementList(parts)
+
+
+    def generate_quadrature_tables(self):
+        "Generate static tables of quadrature points and weights."
+        L = self.backend.language
+
+        parts = []
+
+        # No quadrature tables for custom (given argument)
+        # or point (evaluation in single vertex)
+        skip = ("custom", "cutcell", "interface", "overlap", "vertex")
+        if self.ir["integral_type"] in skip:
+            return parts
+
+        # Loop over quadrature rules
+        qrs = self.ir["quadrature_rules"]
+        for num_points in sorted(qrs):
+            points, weights = qrs[num_points]
+            assert num_points == len(weights)
+            expr_ir = self.ir["expr_irs"][num_points]
+
+            # Generate quadrature weights array
+            if expr_ir["need_weights"]:
+                wsym = self.backend.symbols.weights_array(num_points)
+                parts += [L.ArrayDecl("static const double", wsym, num_points, weights,
+                                      alignas=self.ir["alignas"])]
+
+            # Size of quadrature points depends on context, assume this is correct:
+            pdim = len(points[0])
+            assert points.shape[0] == num_points
+            assert pdim == points.shape[1]
+            #import IPython; IPython.embed()
+
+            # Generate quadrature points array
+            if pdim and expr_ir["need_points"]:
+                # Flatten array: (TODO: avoid flattening here, it makes padding harder)
+                flattened_points = points.reshape(product(points.shape))
+                psym = self.backend.symbols.points_array(num_points)
+                parts += [L.ArrayDecl("static const double", psym, num_points * pdim,
+                                      flattened_points, alignas=self.ir["alignas"])]
+
+        # Add leading comment if there are any tables
+        parts = L.commented_code_list(parts,
+            "Section for quadrature weights and points")
+        return parts
+
+
+    def generate_element_tables(self):
+        """Generate static tables with precomputed element basis
+        function values in quadrature points."""
+        L = self.backend.language
+        parts = []
+        expr_irs = self.ir["expr_irs"]
+
+        for num_points in sorted(expr_irs):
+            # Get all unique tables for this quadrature rule
+            tables = expr_irs[num_points]["unique_tables"]
+            if tables:
+                tmp = "Definitions of {0} tables for {1} quadrature points"
+                parts += [L.Comment(tmp.format(len(tables), num_points))]
+                for name in sorted(tables):
+                    # TODO: table here can actually have only 1 point,
+                    # regroup or at least fix generated comment
+                    table = tables[name]
+                    # TODO: Not padding, consider when and if to do so
+                    parts += [L.ArrayDecl("static const double", name, table.shape, table,
+                                          alignas=self.ir["alignas"])]
+        # Add leading comment if there are any tables
+        parts = L.commented_code_list(parts, [
+            "Section for precomputed element basis function values",
+            "Table dimensions: num_entities, num_points, num_dofs"])
+        return parts
+
+
+    def generate_tensor_reset(self):
+        "Generate statements for resetting the element tensor to zero."
+        L = self.backend.language
+
+        # TODO: Move this to language module, make CNode type
+        def memzero(ptrname, size):
+            tmp = "memset({ptrname}, 0, {size} * sizeof(*{ptrname}));"
+            code = tmp.format(ptrname=str(ptrname), size=size)
+            return L.VerbatimStatement(code)
+
+        # Compute tensor size
+        A = self.backend.symbols.element_tensor()
+        A_size = product(self.ir["tensor_shape"])
+
+        # Stitch it together
+        parts = [L.Comment("Reset element tensor")]
+        if A_size == 1:
+            parts += [L.Assign(A[0], L.LiteralFloat(0.0))]
+        else:
+            parts += [memzero(A, A_size)]
+        return parts
+
+
+    def generate_quadrature_loops(self, num_points):
+        "Generate all quadrature loops."
+        L = self.backend.language
+        body = []
+
+        # Generate unstructured varying partition
+        body += self.generate_unstructured_partition(num_points, "varying")
+        body = L.commented_code_list(body,
+            "Quadrature loop body setup (num_points={0})".format(num_points))
+
+        body += self.generate_dofblock_partition(num_points, "varying")
+
+        # Wrap body in loop or scope
+        if not body:
+            # Could happen for integral with everything zero and optimized away
+            parts = []
+        elif num_points == 1:
+            # For now wrapping body in Scope to avoid thinking about scoping issues
+            parts = L.commented_code_list(L.Scope(body), "Only 1 quadrature point, no loop")
+        else:
+            # Regular case: define quadrature loop
+            iq = self.backend.symbols.quadrature_loop_index(num_points)
+            np = self.backend.symbols.num_quadrature_points(num_points)
+            parts = [L.ForRange(iq, 0, np, body=body)]
+
+        return parts
+
+
+    def generate_dofblock_partition(self, num_points, partition):
+        L = self.backend.language
+
+        # TODO: Add partial blocks (T[i0] = factor_index * arg0;)
+
+        # TODO: Move piecewise blocks outside quadrature loop
+        # (Can only do this by removing weight from factor,
+        # and using that piecewise f*u*v gives that
+        # sum_q weight[q]*f*u*v == f*u*v*(sum_q weight[q]) )
+
+        # Get representation details
+        expr_ir = self.ir["expr_irs"][num_points]
+        V = expr_ir["V"]
+        modified_arguments = expr_ir["modified_arguments"]
+        block_contributions = expr_ir["block_contributions"]
+
+        vaccesses = self.vaccesses[num_points]
+        A = self.backend.symbols.element_tensor()
+
+        parts = []
+        for dofblock, contributions in sorted(block_contributions[partition].items()):
+            for data in contributions:
+                (ma_indices, factor_index, table_ranges, unames, ttypes) = data
+
+                # Add code in layers starting with innermost A[...] += product(factors)
+                rank = len(unames)
+                factors = []
+
+                # Get factor expression
+                v = V[factor_index]
+                if not (v._ufl_is_literal_ and float(v) == 1.0):
+                    factors.append(vaccesses[v])
+
+                # Get loop counter symbols to access A with
+                A_indices = []
+                for i in range(rank):
+                    if ttypes[i] == "quadrature":
+                        # Used to index A like A[iq*num_dofs + iq]
+                        ia = self.backend.symbols.quadrature_loop_index(num_points)
+                    else:
+                        # Regular dof index
+                        ia = self.backend.symbols.argument_loop_index(i)
+                    A_indices.append(ia)
+
+                # Add table access to factors, unless it's always 1.0
+                for i in range(rank):
+                    tt = ttypes[i]
+                    assert tt not in ("zeros",)
+                    if tt not in ("quadrature", "ones"):
+                        ma = ma_indices[i]
+                        access = self.backend.access(
+                            modified_arguments[ma].terminal,
+                            modified_arguments[ma],
+                            table_ranges[i],
+                            num_points)
+                        factors.append(access)
+
+                # Special case where all factors are 1.0 and dropped
+                if factors:
+                    term = L.Product(factors)
+                else:
+                    term = L.LiteralFloat(1.0)
+
+                # Format flattened index expression to access A
+                flat_index = L.flattened_indices(A_indices, self.ir["tensor_shape"])
+                body = L.AssignAdd(A[flat_index], term)
+
+                # Wrap accumulation in loop nest
+                #for i in range(rank):
+                for i in range(rank-1, -1, -1):
+                    if ttypes[i] != "quadrature":
+                        dofrange = dofblock[i]
+                        body = L.ForRange(A_indices[i], dofrange[0], dofrange[1], body=body)
+
+                # Add this block to parts
+                parts.append(body)
+
+        return parts
+
+
+    def generate_partition(self, symbol, V, partition, table_ranges, num_points):
+        L = self.backend.language
+
+        definitions = []
+        intermediates = []
+
+        vaccesses = self.vaccesses[num_points]
+
+        partition_indices = [i for i, p in enumerate(partition) if p]
+
+        for i in partition_indices:
+            v = V[i]
+
+            if is_modified_terminal(v):
+                mt = analyse_modified_terminal(v)
+
+                # Backend specific modified terminal translation
+                vaccess = self.backend.access(mt.terminal,
+                    mt, table_ranges[i], num_points)
+                vdef = self.backend.definitions(mt.terminal,
+                    mt, table_ranges[i], num_points, vaccess)
+
+                # Store definitions of terminals in list
+                assert isinstance(vdef, list)
+                definitions.extend(vdef)
+            else:
+                # Get previously visited operands (TODO: use edges of V instead of ufl_operands?)
+                vops = [vaccesses[op] for op in v.ufl_operands]
+
+                # Mapping UFL operator to target language
+                self._ufl_names.add(v._ufl_handler_name_)
+                vexpr = self.backend.ufl_to_language(v, *vops)
+
+                # TODO: Let optimized ir provide mapping of vertex indices to
+                # variable indices, marking which subexpressions to store in variables
+                # and in what order:
+                #j = variable_id[i]
+
+                # Currently instead creating a new intermediate for
+                # each subexpression except boolean conditions
+                if isinstance(v, Condition):
+                    # Inline the conditions x < y, condition values
+                    # 'x' and 'y' may still be stored in intermediates.
+                    # This removes the need to handle boolean intermediate variables.
+                    # With tensor-valued conditionals it may not be optimal but we
+                    # let the C++ compiler take responsibility for optimizing those cases.
+                    j = None
+                else:
+                    j = len(intermediates)
+
+                if j is not None:
+                    # Record assignment of vexpr to intermediate variable
+                    vaccess = symbol[j]
+                    intermediates.append(L.Assign(vaccess, vexpr))
+                else:
+                    # Access the inlined expression
+                    vaccess = vexpr
+
+            # Store access node for future reference
+            vaccesses[v] = vaccess
+
+        # Join terminal computation, array of intermediate expressions,
+        # and intermediate computations
+        parts = []
+        if definitions:
+            parts += definitions
+        if intermediates:
+            parts += [L.ArrayDecl("double", symbol, len(intermediates),
+                                  alignas=self.ir["alignas"])]
+            parts += intermediates
+        return parts
+
+
+    def generate_unstructured_partition(self, num_points, partition):
+        L = self.backend.language
+        expr_ir = self.ir["expr_irs"][num_points]
+        if partition == "piecewise":
+            name = "sp"
+        elif partition == "varying":
+            name = "sv"
+        arraysymbol = L.Symbol("{0}{1}".format(name, num_points))
+        parts = self.generate_partition(arraysymbol,
+                                        expr_ir["V"],
+                                        expr_ir[partition],
+                                        expr_ir["table_ranges"],
+                                        num_points)
+        parts = L.commented_code_list(parts,
+            "Unstructured %s computations" % (partition,))
+        return parts
+
+
+    def generate_finishing_statements(self):
+        """Generate finishing statements.
+
+        This includes assigning to output array if there is no integration.
+        """
+        parts = []
+
+        if self.ir["integral_type"] == "expression":
+            error("Expression generation not implemented yet.")
+            # TODO: If no integration, assuming we generate an expression, and assign results here
+            # Corresponding code from compiler.py:
+            # assign_to_variables = tfmt.output_variable_names(len(final_variable_names))
+            # parts += list(format_assignments(zip(assign_to_variables, final_variable_names)))
+
+        return parts
+
+
+"""
+    # TODO: Rather take list of vertices, not markers
+    # XXX FIXME: Fix up this function and use it instead?
+    def alternative_generate_partition(self, symbol, C, MT, partition, table_ranges, num_points):
+        L = self.backend.language
+
+        definitions = []
+        intermediates = []
+
+        # XXX FIXME: create these!
+        # C = input CRSArray representation of expression DAG
+        # MT = input list/dict of modified terminals
+
+        self.ast_variables = [None]*len(C) # FIXME: Create outside
+
+        # TODO: Get this as input instead of partition?
+        partition_indices = [i for i, p in enumerate(partition) if p]
+        for i in partition_indices:
+            row = C[i] # XXX FIXME: Get this as input
+            if len(row) == 1:
+                # Modified terminal
+                t, = row
+                mt = MT[t] # XXX FIXME: Get this as input
+                tc = mt[0]
+
+
+                if isinstance(mt.terminal, ConstantValue):
+                    # Format literal value for the chosen language
+                    modified_literal_to_ast_node = []  # silence flake8
+                    # XXX FIXME: Implement this mapping:
+                    vaccess = modified_literal_to_ast_node[tc](mt)
+                    vdef = None
+                else:
+                    # Backend specific modified terminal formatting
+                    vaccess = self.backend.access(mt.terminal,
+                        mt, table_ranges[i], num_points)
+                    vdef = self.backend.definitions(mt.terminal,
+                        mt, table_ranges[i], num_points, vaccess)
+
+                # Store definitions of terminals in list
+                if vdef is not None:
+                    definitions.append(vdef)
+
+            else:
+                # Application of operator with typecode tc to operands with indices ops
+                tc = mt[0]
+                ops = mt[1:]
+
+                # Get operand AST nodes
+                opsaccess = [self.ast_variables[k] for k in ops]
+
+                # Generate expression for this operator application
+                typecode2astnode = []  # silence flake8
+                vexpr = typecode2astnode[tc](opsaccess) # XXX FIXME: Implement this mapping
+
+                store_this_in_variable = True # TODO: Don't store all subexpressions
+                if store_this_in_variable:
+                    # Record assignment of vexpr to intermediate variable
+                    j = len(intermediates)
+                    vaccess = symbol[j]
+                    intermediates.append(L.Assign(vaccess, vexpr))
+                else:
+                    # Access the inlined expression
+                    vaccess = vexpr
+
+            # Store access string, either a variable symbol or an inlined expression
+            self.ast_variables[i] = vaccess
+
+        # Join terminal computation, array of intermediate expressions,
+        # and intermediate computations
+        parts = []
+        if definitions:
+            parts += definitions
+        if intermediates:
+            parts += [L.ArrayDecl("double", symbol, len(intermediates),
+                                  alignas=self.ir["alignas"])]
+            parts += intermediates
+        return parts
+"""
diff --git a/uflacs/language/__init__.py b/ffc/uflacs/language/__init__.py
similarity index 93%
rename from uflacs/language/__init__.py
rename to ffc/uflacs/language/__init__.py
index 5ff56ec..9e21fe8 100644
--- a/uflacs/language/__init__.py
+++ b/ffc/uflacs/language/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/uflacs/language/cnodes.py b/ffc/uflacs/language/cnodes.py
similarity index 89%
rename from uflacs/language/cnodes.py
rename to ffc/uflacs/language/cnodes.py
index 8a88920..3dd83f7 100644
--- a/uflacs/language/cnodes.py
+++ b/ffc/uflacs/language/cnodes.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,13 +16,14 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
-from __future__ import print_function # used in some debugging
-
+from __future__ import print_function  # used in some debugging
+from six import string_types
 import numpy
 
-from uflacs.language.format_value import format_value, format_float
-from uflacs.language.format_lines import format_indented_lines, Indented
-from uflacs.language.precedence import PRECEDENCE
+from ffc.uflacs.language.format_value import format_value, format_float
+from ffc.uflacs.language.format_lines import format_indented_lines, Indented
+from ffc.uflacs.language.precedence import PRECEDENCE
+
 
 """CNode TODO:
 - Array copy statement
@@ -30,7 +31,6 @@ from uflacs.language.precedence import PRECEDENCE
 - Extend ArrayDecl and ArrayAccess with support for
   flattened but conceptually multidimensional arrays,
   maybe even with padding (FlattenedArray possibly covers what we need)
-- ArrayDecl using std::array
 - Function declaration
 - TypeDef
 - Type
@@ -52,6 +52,7 @@ def assign_loop(src, dst, ranges):
         code = ForRange(i, b, e, code)
     return code
 
+
 def accumulate_loop(src, dst, ranges):
     """Generate a nested loop over a list of ranges, adding dst to src in the innermost loop.
 
@@ -62,6 +63,7 @@ def accumulate_loop(src, dst, ranges):
         code = ForRange(i, b, e, code)
     return code
 
+
 def scale_loop(src, dst, ranges):
     """Generate a nested loop over a list of ranges, multiplying dst with src in the innermost loop.
 
@@ -150,11 +152,13 @@ class CExpr(CNode):
     def __rfloordiv__(self, other):
         return NotImplemented
 
+
 class CExprOperator(CExpr):
     """Base class for all C expression operator."""
     __slots__ = ("children",)
     sideeffect = False
 
+
 class CExprTerminal(CExpr):
     """Base class for all C expression terminals."""
     __slots__ = ()
@@ -268,7 +272,7 @@ class Symbol(CExprTerminal):
     precedence = PRECEDENCE.SYMBOL
 
     def __init__(self, name):
-        assert isinstance(name, str)
+        assert isinstance(name, string_types)
         self.name = name
 
     def ce_format(self):
@@ -282,7 +286,7 @@ class VerbatimExpr(CExprTerminal):
     precedence = PRECEDENCE.LOWEST
 
     def __init__(self, codestring):
-        assert isinstance(codestring, str)
+        assert isinstance(codestring, string_types)
         self.codestring = codestring
 
     def ce_format(self):
@@ -291,7 +295,7 @@ class VerbatimExpr(CExprTerminal):
 class New(CExpr):
     __slots__ = ("typename",)
     def __init__(self, typename):
-        assert isinstance(typename, str)
+        assert isinstance(typename, string_types)
         self.typename = typename
 
     def ce_format(self):
@@ -594,7 +598,7 @@ class FlattenedArray(object):
         elif isinstance(array, Symbol):
             self.array = array
         else:
-            assert isinstance(array, str)
+            assert isinstance(array, string_types)
             self.array = Symbol(array)
 
         # Allow expressions or literals as strides or dims and offset
@@ -645,7 +649,7 @@ class ArrayAccess(CExprOperator):
 
     def __init__(self, array, indices):
         # Typecheck array argument
-        if isinstance(array, str):
+        if isinstance(array, string_types):
             array = Symbol(array)
         if isinstance(array, Symbol):
             self.array = array
@@ -712,6 +716,7 @@ class Conditional(CExprOperator):
         # Return combined string
         return c + " ? " + t + " : " + f
 
+
 class Call(CExprOperator):
     __slots__ = ("function", "arguments")
     precedence = PRECEDENCE.CALL
@@ -720,6 +725,7 @@ class Call(CExprOperator):
     def __init__(self, function, arguments=None):
         # Note: This will wrap a str as a Symbol
         self.function = as_cexpr(function)
+
         # Accept None, single, or multple arguments; literals or CExprs
         if arguments is None:
             arguments = ()
@@ -736,6 +742,15 @@ class Call(CExprOperator):
 
 number_types = (int, float, complex, numpy.number)
 
+
+def _is_zero(values):
+    global number_types
+    if isinstance(values, number_types + (LiteralFloat, LiteralInt)):
+        return float(values) == 0.0
+    else:
+        return numpy.count_nonzero(values) == 0
+
+
 def as_cexpr(node):
     """Typechecks and wraps an object as a valid CExpr.
 
@@ -748,19 +763,21 @@ def as_cexpr(node):
         return LiteralInt(node)
     elif isinstance(node, (float, numpy.floating)):
         return LiteralFloat(node)
-    elif isinstance(node, str):
+    elif isinstance(node, string_types):
         # Treat string as a symbol
         # TODO: Using LiteralString or VerbatimExpr would be other options, is this too ambiguous?
         return Symbol(node)
     else:
         raise RuntimeError("Unexpected CExpr type %s:\n%s" % (type(node), str(node)))
 
+
 def as_symbol(symbol):
-    if isinstance(symbol, str):
+    if isinstance(symbol, string_types):
         symbol = Symbol(symbol)
     assert isinstance(symbol, Symbol)
     return symbol
 
+
 def flattened_indices(indices, shape):
     """Given a tuple of indices and a shape tuple,
     return CNode expression for flattened indexing
@@ -816,12 +833,13 @@ class VerbatimStatement(CStatement):
     "Wraps a source code string to be pasted verbatim into the source code."
     __slots__ = ("codestring",)
     def __init__(self, codestring):
-        assert isinstance(codestring, str)
+        assert isinstance(codestring, string_types)
         self.codestring = codestring
 
     def cs_format(self):
         return self.codestring
 
+
 class Statement(CStatement):
     "Make an expression into a statement."
     __slots__ = ("expr",)
@@ -831,6 +849,7 @@ class Statement(CStatement):
     def cs_format(self):
         return self.expr.ce_format() + ";"
 
+
 class StatementList(CStatement):
     "A simple sequence of statements. No new scopes are introduced."
     __slots__ = ("statements",)
@@ -846,22 +865,25 @@ class StatementList(CStatement):
 class Using(CStatement):
     __slots__ = ("name",)
     def __init__(self, name):
-        assert isinstance(name, str)
+        assert isinstance(name, string_types)
         self.name = name
 
     def cs_format(self):
         return "using " + self.name + ";"
 
+
 class Break(CStatement):
     __slots__ = ()
     def cs_format(self):
         return "break;"
 
+
 class Continue(CStatement):
     __slots__ = ()
     def cs_format(self):
         return "continue;"
 
+
 class Return(CStatement):
     __slots__ = ("value",)
     def __init__(self, value):
@@ -870,6 +892,7 @@ class Return(CStatement):
     def cs_format(self):
         return "return " + self.value.ce_format() + ";"
 
+
 class Case(CStatement):
     __slots__ = ("value",)
     def __init__(self, value):
@@ -879,39 +902,56 @@ class Case(CStatement):
     def cs_format(self):
         return "case " + self.value.ce_format() + ":"
 
+
 class Default(CStatement):
     __slots__ = ()
     def cs_format(self):
         return "default:"
 
+
 class Throw(CStatement):
     __slots__ = ("exception", "message")
     def __init__(self, exception, message):
-        assert isinstance(exception, str)
-        assert isinstance(message, str)
+        assert isinstance(exception, string_types)
+        assert isinstance(message, string_types)
         self.exception = exception
         self.message = message
 
     def cs_format(self):
-        assert '"' not in message
+        assert '"' not in self.message
         return "throw " + self.exception + '("' + self.message + '");'
 
+
 class Comment(CStatement):
     "Line comment(s) used for annotating the generated code with human readable remarks."
     __slots__ = ("comment",)
     def __init__(self, comment):
-        assert isinstance(comment, str)
+        assert isinstance(comment, string_types)
         self.comment = comment
 
     def cs_format(self):
         lines = self.comment.strip().split("\n")
         return ["// " + line.strip() for line in lines]
 
-class Pragma(CStatement): # TODO: Improve on this with a use case later
+
+def commented_code_list(code, comments):
+    "Convenience wrapper for adding comment to code list if the list is not empty."
+    if isinstance(code, CNode):
+        code = [code]
+    assert isinstance(code, list)
+    if code:
+        if not isinstance(comments, (list, tuple)):
+            comments = [comments]
+        comments = [Comment(c) for c in comments]
+        code = comments + code
+    return code
+
+
+class Pragma(CStatement):  # TODO: Improve on this with a use case later
     "Pragma comments used for compiler-specific annotations."
     __slots__ = ("comment",)
     def __init__(self, comment):
-        assert isinstance(comment, str)
+        assert isinstance(comment, string_types)
         self.comment = comment
 
     def cs_format(self):
@@ -927,7 +967,7 @@ class VariableDecl(CStatement):
     def __init__(self, typename, symbol, value=None):
 
         # No type system yet, just using strings
-        assert isinstance(typename, str)
+        assert isinstance(typename, string_types)
         self.typename = typename
 
         # Allow Symbol or just a string
@@ -943,7 +983,13 @@ class VariableDecl(CStatement):
             code += " = " + self.value.ce_format()
         return code + ";"
 
-def build_1d_initializer_list(values, formatter):
+
+def leftover(size, padlen):
+    "Return minimum integer to add to size to make it divisible by padlen."
+    return (padlen - (size % padlen)) % padlen
+
+
+def build_1d_initializer_list(values, formatter, padlen=0):
     '''Return a list containing a single line formatted like "{ 0.0, 1.0, 2.0 }"'''
     tokens = ["{ "]
     if numpy.product(values.shape) > 0:
@@ -952,16 +998,24 @@ def build_1d_initializer_list(values, formatter):
         for v in fvalues[:-1]:
             tokens.append(v)
             tokens.append(sep)
-        tokens += [fvalues[-1]]
+        tokens.append(fvalues[-1])
+        if padlen:
+            # Add padding
+            zero = formatter(values.dtype(0))
+            for i in range(leftover(len(values), padlen)):
+                tokens.append(sep)
+                tokens.append(zero)
     tokens += " }"
     return "".join(tokens)
 
-def build_initializer_lists(values, sizes, level, formatter):
+
+def build_initializer_lists(values, sizes, level, formatter, padlen=0):
     """Return a list of lines with initializer lists for a multidimensional array.
 
-    Example output:
-    { { 0.0, 0.1 },
-      { 1.0, 1.1 } }
+    Example output::
+
+        { { 0.0, 0.1 },
+          { 1.0, 1.1 } }
     """
     values = numpy.asarray(values)
     assert numpy.product(values.shape) == numpy.product(sizes)
@@ -973,10 +1027,13 @@ def build_initializer_lists(values, sizes, level, formatter):
     r = len(sizes)
     assert r > 0
     if r == 1:
-        return [build_1d_initializer_list(values, formatter)]
+        return [build_1d_initializer_list(values, formatter, padlen=padlen)]
     else:
         # Render all sublists
-        parts = [build_initializer_lists(val, sizes[1:], level+1, formatter) for val in values]
+        parts = []
+        for val in values:
+            sublist = build_initializer_lists(val, sizes[1:], level+1, formatter, padlen=padlen)
+            parts.append(sublist)
         # Add comma after last line in each part except the last one
         for part in parts[:-1]:
             part[-1] += ","
@@ -991,11 +1048,6 @@ def build_initializer_lists(values, sizes, level, formatter):
         lines[-1] += " }"
         return lines
 
-def _is_zero(values):
-    if isinstance(values, (int, float, LiteralFloat, LiteralInt)):
-        return float(values) == 0.0
-    else:
-        return numpy.count_nonzero(values) == 0
 
 class ArrayDecl(CStatement):
     """A declaration or definition of an array.
@@ -1006,9 +1058,9 @@ class ArrayDecl(CStatement):
     Otherwise use nested lists of lists to represent
     multidimensional array values to initialize to.
     """
-    __slots__ = ("typename", "symbol", "sizes", "values")
-    def __init__(self, typename, symbol, sizes, values=None):
-        assert isinstance(typename, str)
+    __slots__ = ("typename", "symbol", "sizes", "alignas", "padlen", "values")
+    def __init__(self, typename, symbol, sizes, values=None, alignas=None, padlen=0):
+        assert isinstance(typename, string_types)
         self.typename = typename
 
         self.symbol = as_symbol(symbol)
@@ -1018,7 +1070,13 @@ class ArrayDecl(CStatement):
         self.sizes = tuple(sizes)
 
         # NB! No type checking, assuming nested lists of literal values. Not applying as_cexpr.
-        self.values = values
+        if isinstance(values, (list, tuple)):
+            self.values = numpy.asarray(values)
+        else:
+            self.values = values
+
+        self.alignas = alignas
+        self.padlen = padlen
 
     def __getitem__(self, indices):
         """Allow using array declaration object as the array when indexed.
@@ -1029,25 +1087,39 @@ class ArrayDecl(CStatement):
         return ArrayAccess(self, indices)
 
     def cs_format(self):
-        # C style
-        brackets = ''.join("[%d]" % n for n in self.sizes)
+        # Pad innermost array dimension
+        sizes = list(self.sizes)
+        if self.padlen:
+            sizes[-1] += leftover(sizes[-1], self.padlen)
+
+        # Add brackets
+        brackets = ''.join("[%d]" % n for n in sizes)
+
+        # Join declaration
         decl = self.typename + " " + self.symbol.name + brackets
 
-        # C++11 style with std::array # TODO: Enable this, needs #include <array>
-        #typename = self.typename
-        #for dim in reversed(self.sizes):
-        #    typename = "std::array<%s, %s>" % (typename, dim)
-        #decl = "%s %s" % (typename, self.symbol.name)
+        # NB! C++11 style alignas prefix syntax.
+        # If trying other target languages, must use other syntax.
+        if self.alignas:
+            align = "alignas(%d)" % int(self.alignas)
+            decl = align + " " + decl
 
         if self.values is None:
             # Undefined initial values
             return decl + ";"
         elif _is_zero(self.values):
             # Zero initial values
+            # (NB! C++ style zero initialization, not sure about other target languages)
             return decl + " = {};"
         else:
             # Construct initializer lists for arbitrary multidimensional array values
-            initializer_lists = build_initializer_lists(self.values, self.sizes, 0, format_value)
+            if self.values.dtype.kind == "f":
+                formatter = format_float
+            else:
+                # Not really using other types, this can be buggy
+                formatter = format_value
+            initializer_lists = build_initializer_lists(self.values, self.sizes, 0,
+                                                        formatter, padlen=self.padlen)
             if len(initializer_lists) == 1:
                 return decl + " = " + initializer_lists[0] + ";"
             else:
@@ -1065,10 +1137,11 @@ class Scope(CStatement):
     def cs_format(self):
         return ("{", Indented(self.body.cs_format()), "}")
 
+
 class Namespace(CStatement):
     __slots__ = ("name", "body")
     def __init__(self, name, body):
-        assert isinstance(name, str)
+        assert isinstance(name, string_types)
         self.name = name
         self.body = as_cstatement(body)
 
@@ -1076,6 +1149,7 @@ class Namespace(CStatement):
         return ("namespace " + self.name,
                 "{", Indented(self.body.cs_format()), "}")
 
+
 class If(CStatement):
     __slots__ = ("condition", "body")
     def __init__(self, condition, body):
@@ -1086,6 +1160,7 @@ class If(CStatement):
         return ("if (" + self.condition.ce_format() + ")",
                 "{", Indented(self.body.cs_format()), "}")
 
+
 class ElseIf(CStatement):
     __slots__ = ("condition", "body")
     def __init__(self, condition, body):
@@ -1096,6 +1171,7 @@ class ElseIf(CStatement):
         return ("else if (" + self.condition.ce_format() + ")",
                 "{", Indented(self.body.cs_format()), "}")
 
+
 class Else(CStatement):
     __slots__ = ("body",)
     def __init__(self, body):
@@ -1105,6 +1181,7 @@ class Else(CStatement):
         return ("else",
                 "{", Indented(self.body.cs_format()), "}")
 
+
 class While(CStatement):
     __slots__ = ("condition", "body")
     def __init__(self, condition, body):
@@ -1115,6 +1192,7 @@ class While(CStatement):
         return ("while (" + self.condition.ce_format() + ")",
                 "{", Indented(self.body.cs_format()), "}")
 
+
 class Do(CStatement):
     __slots__ = ("condition", "body")
     def __init__(self, condition, body):
@@ -1125,6 +1203,7 @@ class Do(CStatement):
         return ("do", "{", Indented(self.body.cs_format()),
                 "} while (" + self.condition.ce_format() + ");")
 
+
 class For(CStatement):
     __slots__ = ("init", "check", "update", "body")
     def __init__(self, init, check, update, body):
@@ -1137,7 +1216,7 @@ class For(CStatement):
         # The C model here is a bit crude and this causes trouble
         # in the init statement/expression here:
         init = self.init.cs_format()
-        assert isinstance(init, str)
+        assert isinstance(init, string_types)
         assert init.rstrip().endswith(";")
 
         check = self.check.ce_format()
@@ -1146,6 +1225,7 @@ class For(CStatement):
         return ("for (" + init + " " + check + "; " + update + ")",
                 "{", Indented(body), "}")
 
+
 class Switch(CStatement):
     __slots__ = ("arg", "cases", "default", "autobreak", "autoscope")
     def __init__(self, arg, cases, default=None, autobreak=True, autoscope=True):
@@ -1226,7 +1306,7 @@ def as_cstatement(node):
     elif isinstance(node, list):
         # Convenience case for list of statements
         return StatementList(node)
-    elif isinstance(node, str):
+    elif isinstance(node, string_types):
         # Backdoor for flexibility in code generation to allow verbatim pasted statements
         return VerbatimStatement(node)
     else:
diff --git a/uflacs/language/format_lines.py b/ffc/uflacs/language/format_lines.py
similarity index 95%
rename from uflacs/language/format_lines.py
rename to ffc/uflacs/language/format_lines.py
index e6c5085..62335db 100644
--- a/uflacs/language/format_lines.py
+++ b/ffc/uflacs/language/format_lines.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -23,6 +23,8 @@ lists of snippets and then join them than adding the pieces
 continually, which gives O(n^2) behaviour w.r.t. AST size n.
 """
 
+from six import string_types
+
 class Indented(object):
     """Class to mark a collection of snippets for indentation.
 
@@ -50,7 +52,7 @@ def iter_indented_lines(snippets, level=0):
     """
     tabsize = 4
     indentation = ' ' * (tabsize * level)
-    if isinstance(snippets, str):
+    if isinstance(snippets, string_types):
         for line in snippets.split("\n"):
             yield indentation + line
     elif isinstance(snippets, Indented):
diff --git a/uflacs/language/format_value.py b/ffc/uflacs/language/format_value.py
similarity index 97%
rename from uflacs/language/format_value.py
rename to ffc/uflacs/language/format_value.py
index 86f3f7b..fa38ce1 100644
--- a/uflacs/language/format_value.py
+++ b/ffc/uflacs/language/format_value.py
@@ -16,6 +16,7 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
+from six import string_types
 import re
 import numpy
 #from ffc.log import info
@@ -101,7 +102,7 @@ def format_value(value):
         return format_float(float(value))
     elif isinstance(value, _ints):
         return str(int(value))
-    elif isinstance(value, str):
+    elif isinstance(value, string_types):
         return '"' + value + '"'
     elif hasattr(value, "ce_format"):
         return value.ce_format()
diff --git a/uflacs/language/precedence.py b/ffc/uflacs/language/precedence.py
similarity index 96%
rename from uflacs/language/precedence.py
rename to ffc/uflacs/language/precedence.py
index 5b7e60d..a6c87cc 100644
--- a/uflacs/language/precedence.py
+++ b/ffc/uflacs/language/precedence.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/uflacs/language/ufl_to_cnodes.py b/ffc/uflacs/language/ufl_to_cnodes.py
similarity index 77%
rename from uflacs/language/ufl_to_cnodes.py
rename to ffc/uflacs/language/ufl_to_cnodes.py
index 7973227..fbb2db3 100644
--- a/uflacs/language/ufl_to_cnodes.py
+++ b/ffc/uflacs/language/ufl_to_cnodes.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -20,20 +20,14 @@
 
 from ffc.log import error
 
-import ufl
-
 from ufl.corealg.multifunction import MultiFunction
-from ufl.corealg.map_dag import map_expr_dag
+#from ufl.corealg.map_dag import map_expr_dag
+
 
-# FIXME: Need to incorporate some functions as CNode types, to
-#        create seams for selecting between C and C++ behaviour:
 class UFL2CNodesMixin(object):
     """Rules collection mixin for a UFL to CNodes translator class."""
-
     def __init__(self, language):
         self.L = language
-        # TODO: Make this configurable and add using statements somewhere:
-        self._enable_namespaces = True
 
     # === Error handlers for missing formatting rules ===
 
@@ -55,18 +49,39 @@ class UFL2CNodesMixin(object):
     def division(self, o, a, b):
         return self.L.Div(a, b)
 
-    # === Formatting rules for cmath functions ===
+    # === Formatting rules for conditional expressions ===
 
-    def power(self, o, a, b):
-        name = "pow"
-        if self._enable_namespaces:
-            name = "std::" + name
-        return self.L.Call(name, (a, b))
+    def conditional(self, o, c, t, f):
+        return self.L.Conditional(c, t, f)
 
-    def _cmath(self, name, op):
-        if self._enable_namespaces:
-            name = "std::" + name
-        return self.L.Call(name, op)
+    def eq(self, o, a, b):
+        return self.L.EQ(a, b)
+
+    def ne(self, o, a, b):
+        return self.L.NE(a, b)
+
+    def le(self, o, a, b):
+        return self.L.LE(a, b)
+
+    def ge(self, o, a, b):
+        return self.L.GE(a, b)
+
+    def lt(self, o, a, b):
+        return self.L.LT(a, b)
+
+    def gt(self, o, a, b):
+        return self.L.GT(a, b)
+
+    def and_condition(self, o, a, b):
+        return self.L.And(a, b)
+
+    def or_condition(self, o, a, b):
+        return self.L.Or(a, b)
+
+    def not_condition(self, o, a):
+        return self.L.Not(a)
+
+    # === Formatting rules for cmath functions ===
 
     def math_function(self, o, op):
         return self._cmath(o._name, op)
@@ -104,10 +119,7 @@ class UFL2CNodesMixin(object):
         return self._cmath("tanh", op)
 
     def atan_2(self, o, y, x):
-        name = "atan2"
-        if self._enable_namespaces:
-            name = "std::" + name
-        return self.L.Call(name, (y, x))
+        return self._cmath("atan_2", (y, x))
 
     def acos(self, o, op):
         return self._cmath("acos", op)
@@ -134,30 +146,46 @@ class UFL2CNodesMixin(object):
     #    # C++11 stl has this function
     #    return self._cmath("erfc", op)
 
+
+class RulesForC(object):
+    def _cmath(self, name, op):
+        return self.L.Call(name, op)
+
+    def power(self, o, a, b):
+        return self.L.Call("pow", (a, b))
+
     def abs(self, o, op):
-        #return Call("fabs", op) # C version
-        return self._cmath("abs", op)  # C++ stl version
+        return self.L.Call("fabs", op)
 
     def min_value(self, o, a, b):
-        #name = "fmin" # C99 version
-        name = "min" # C++ stl version
-        if self._enable_namespaces:
-            name = "std::" + name
-        return self.L.Call(name, (a, b))
+        return self.L.Call("fmin", (a, b))
 
     def max_value(self, o, a, b):
-        #name = "fmax" # C99 version
-        name = "max" # C++ stl version
-        if self._enable_namespaces:
-            name = "std::" + name
-        return self.L.Call(name, (a, b))
+        return self.L.Call("fmax", (a, b))
+
+    # ignoring bessel functions
+
+
+class RulesForCpp(object):
+    def _cmath(self, name, op):
+        return self.L.Call("std::" + name, op)
+
+    def power(self, o, a, b):
+        return self.L.Call("std::pow", (a, b))
+
+    def abs(self, o, op):
+        return self.L.Call("std::abs", op)
+
+    def min_value(self, o, a, b):
+        return self.L.Call("std::min", (a, b))
+
+    def max_value(self, o, a, b):
+        return self.L.Call("std::max", (a, b))
 
     # === Formatting rules for bessel functions ===
 
     def _bessel(self, o, n, v, name):
-        if self._enable_namespaces:
-            name = "boost::math::" + name
-        return self.L.Call(name, (n, v))
+        return self.L.Call("boost::math::" + name, (n, v))
 
     def bessel_i(self, o, n, v):
         return self._bessel(o, n, v, "cyl_bessel_i")
@@ -171,39 +199,15 @@ class UFL2CNodesMixin(object):
     def bessel_y(self, o, n, v):
         return self._bessel(o, n, v, "cyl_neumann")
 
-    # === Formatting rules for conditional expressions ===
-
-    def conditional(self, o, c, t, f):
-        return self.L.Conditional(c, t, f)
-
-    def eq(self, o, a, b):
-        return self.L.EQ(a, b)
-
-    def ne(self, o, a, b):
-        return self.L.NE(a, b)
-
-    def le(self, o, a, b):
-        return self.L.LE(a, b)
-
-    def ge(self, o, a, b):
-        return self.L.GE(a, b)
-
-    def lt(self, o, a, b):
-        return self.L.LT(a, b)
-
-    def gt(self, o, a, b):
-        return self.L.GT(a, b)
 
-    def and_condition(self, o, a, b):
-        return self.L.And(a, b)
-
-    def or_condition(self, o, a, b):
-        return self.L.Or(a, b)
+class UFL2CNodesTranslatorC(MultiFunction, UFL2CNodesMixin, RulesForC):
+    """UFL to CNodes translator class."""
+    def __init__(self, language):
+        MultiFunction.__init__(self)
+        UFL2CNodesMixin.__init__(self, language)
 
-    def not_condition(self, o, a):
-        return self.L.Not(a)
 
-class UFL2CNodesTranslator(MultiFunction, UFL2CNodesMixin):
+class UFL2CNodesTranslatorCpp(MultiFunction, UFL2CNodesMixin, RulesForCpp):
     """UFL to CNodes translator class."""
     def __init__(self, language):
         MultiFunction.__init__(self)
diff --git a/uflacs/analysis/__init__.py b/ffc/uflacs/params.py
similarity index 80%
rename from uflacs/analysis/__init__.py
rename to ffc/uflacs/params.py
index 060bf68..0f56e40 100644
--- a/uflacs/analysis/__init__.py
+++ b/ffc/uflacs/params.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
@@ -16,4 +16,7 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
-"""Algorithms for the analysis phase of the form compilation."""
+"""Collection of exposed parameters available to tune form compiler algorithms."""
+
+def default_parameters():
+    return {}
diff --git a/uflacs/representation/__init__.py b/ffc/uflacs/representation/__init__.py
similarity index 94%
rename from uflacs/representation/__init__.py
rename to ffc/uflacs/representation/__init__.py
index 4bd5334..dc64a80 100644
--- a/uflacs/representation/__init__.py
+++ b/ffc/uflacs/representation/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
 #
 # This file is part of UFLACS.
 #
diff --git a/ffc/uflacs/representation/build_uflacs_ir.py b/ffc/uflacs/representation/build_uflacs_ir.py
new file mode 100644
index 0000000..37a7f4d
--- /dev/null
+++ b/ffc/uflacs/representation/build_uflacs_ir.py
@@ -0,0 +1,437 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+
+"""Main algorithm for building the uflacs intermediate representation."""
+
+import numpy
+
+from ufl import product, as_ufl
+from ufl.log import error
+from ufl.checks import is_cellwise_constant
+from ufl.classes import CellCoordinate, FacetCoordinate, QuadratureWeight
+
+from ffc.uflacs.analysis.balancing import balance_modifiers
+from ffc.uflacs.analysis.modified_terminals import is_modified_terminal, analyse_modified_terminal
+from ffc.uflacs.analysis.graph import build_graph
+from ffc.uflacs.analysis.graph_vertices import build_scalar_graph_vertices
+from ffc.uflacs.analysis.graph_rebuild import rebuild_with_scalar_subexpressions
+from ffc.uflacs.analysis.graph_dependencies import compute_dependencies, mark_active, mark_image
+from ffc.uflacs.analysis.graph_ssa import compute_dependency_count, invert_dependencies
+#from ffc.uflacs.analysis.graph_ssa import default_cache_score_policy, compute_cache_scores, allocate_registers
+from ffc.uflacs.analysis.factorization import compute_argument_factorization
+from ffc.uflacs.elementtables.terminaltables import build_optimized_tables
+
+
+def build_uflacs_ir(cell, integral_type, entitytype,
+                    integrands, tensor_shape,
+                    coefficient_numbering,
+                    quadrature_rules, parameters):
+    ir = {}
+
+    # { ufl coefficient: count }
+    ir["coefficient_numbering"] = coefficient_numbering
+
+    rank = len(tensor_shape)
+    
+    # { num_points: expr_ir for one integrand }
+    ir["expr_irs"] = {}
+
+    # Build the core uflacs expression ir for each num_points/integrand
+    # TODO: Better to compute joint IR for all integrands
+    #       and deal with num_points later?
+    #       I.e. common_expr_ir = compute_common_expr_ir(integrands)
+    #       If we want to adjoint quadrature rules for subterms
+    #       automatically anyway, num_points should be advisory.
+    #       For now, expecting multiple num_points to be rare.
+    for num_points in sorted(integrands.keys()):
+        expressions = [integrands[num_points]]
+
+        # TODO: Apply this transformation to integrands earlier?
+        expressions = [balance_modifiers(expr) for expr in expressions]
+
+        # Build scalar list-based graph representation
+        V, V_deps, V_targets = build_scalar_graph(expressions)
+
+
+        # Build terminal_data from V here before factorization.
+        # Then we can use it to derive table properties for all modified terminals,
+        # and then use that to rebuild the scalar graph more efficiently before
+        # argument factorization. We can build terminal_data again after factorization
+        # if that's necessary.
+        initial_terminal_indices = [i for i, v in enumerate(V)
+                                    if is_modified_terminal(v)]
+        initial_terminal_data = [analyse_modified_terminal(V[i])
+                                 for i in initial_terminal_indices]
+        unique_tables, mt_table_ranges, table_types = \
+            build_optimized_tables(num_points, quadrature_rules,
+                cell, integral_type, entitytype, initial_terminal_data, parameters)
+
+        # Build replacement map for modified terminals with zero tables
+        z = as_ufl(0.0)
+        for i, mt in zip(initial_terminal_indices, initial_terminal_data):
+            tr = mt_table_ranges.get(mt)
+            if tr is not None:
+                uname, begin, end = tr
+                ttype = table_types[uname]
+                # Any modified terminal with zero table is itself a zero value
+                if ttype == "zeros":
+                    V[i] = z
+        # Propagate expression changes
+        # (could possibly use replace() on target expressions instead)
+        for i in range(len(V)):
+            deps = [V[j] for j in V_deps[i]]
+            if deps:
+                V[i] = V[i]._ufl_expr_reconstruct_(*deps)
+
+        # Rebuild scalar target expressions and graph
+        # (this may be overkill and possible to optimize
+        # away if it turns out to be costly)
+        expressions = [V[i] for i in V_targets]
+
+        # Rebuild scalar list-based graph representation
+        SV, SV_deps, SV_targets = build_scalar_graph(expressions)
+        assert all(i < len(SV) for i in SV_targets)
+
+
+        # Compute factorization of arguments
+        (argument_factorizations, modified_arguments,
+             FV, FV_deps, FV_targets) = \
+            compute_argument_factorization(SV, SV_deps, SV_targets, rank)
+        assert len(SV_targets) == len(argument_factorizations)
+
+        # TODO: Still expecting one target variable in code generation
+        assert len(argument_factorizations) == 1
+        argument_factorization, = argument_factorizations
+
+        # Store modified arguments in analysed form
+        for i in range(len(modified_arguments)):
+            modified_arguments[i] = analyse_modified_terminal(modified_arguments[i])
+
+        # Build set of modified_terminal indices into factorized_vertices
+        modified_terminal_indices = [i for i, v in enumerate(FV)
+                                     if is_modified_terminal(v)]
+
+        # Build set of modified terminal ufl expressions
+        modified_terminals = [analyse_modified_terminal(FV[i])
+                              for i in modified_terminal_indices]
+
+        # Organize table data more, split into arguments and other terminals
+        modified_terminal_table_ranges = [mt_table_ranges.get(mt)
+                                          for mt in modified_terminals]
+        modified_argument_table_ranges = [mt_table_ranges.get(mt)
+                                          for mt in modified_arguments]
+
+
+        # Dependency analysis
+        inv_FV_deps, FV_active, FV_piecewise, FV_varying = \
+            analyse_dependencies(FV, FV_deps, FV_targets,
+                                 modified_terminal_indices,
+                                 mt_table_ranges,
+                                 table_types)
+
+        # Mark active modified arguments
+        #active_modified_arguments = numpy.zeros(len(modified_arguments), dtype=int)
+        #for ma_indices in argument_factorization:
+        #    for j in ma_indices:
+        #        active_modified_arguments[j] = 1
+
+
+        # Figure out which table names are active
+        active_table_names = set()
+        for i, tr in zip(modified_terminal_indices, modified_terminal_table_ranges):
+            if FV_active[i] and tr is not None:
+                active_table_names.add(tr[0])
+        for ma_indices in argument_factorization:
+            for j in ma_indices:
+                tr = modified_argument_table_ranges[j]
+                if tr is not None:
+                    active_table_names.add(tr[0])
+
+        # Drop tables not referenced from modified terminals
+        # and and tables of zeros and ones
+        unused_types = ("zeros", "ones", "quadrature")
+        used_table_names = set(name for name in active_table_names
+                               if name is not None
+                                  and table_types[name] not in unused_types)
+        unique_tables = { name: unique_tables[name] for name in used_table_names }
+
+
+        # Analyse active terminals to check what we'll need to generate code for
+        active_mts = [mt for i, mt in zip(modified_terminal_indices, modified_terminals)
+                      if FV_active[i]]
+
+        # Figure out if we need to access CellCoordinate to
+        # avoid generating quadrature point table otherwise
+        if integral_type == "cell":
+            need_points = any(isinstance(mt.terminal, CellCoordinate)
+                              for mt in active_mts)
+        elif integral_type in ("interior_facet", "exterior_facet"):
+            need_points = any(isinstance(mt.terminal, FacetCoordinate)
+                              for mt in active_mts)
+        else:
+            need_points = False
+
+        # Figure out if we need to access QuadratureWeight to
+        # avoid generating quadrature point table otherwise
+        need_weights = any(isinstance(mt.terminal, QuadratureWeight)
+                           for mt in active_mts)
+
+        # Loop over factorization terms
+        from collections import defaultdict
+        block_contributions = {
+            # TODO: Should not store piecewise blocks inside num_points context
+            "piecewise": defaultdict(list),
+            "varying": defaultdict(list)
+            }
+        for ma_indices, fi in sorted(argument_factorization.items()):
+            # Get a bunch of information about this term
+            rank = len(ma_indices)
+            trs = tuple(modified_argument_table_ranges[ai] for ai in ma_indices)
+            unames = tuple(tr[0] for tr in trs)
+            dofblock = tuple(tr[1:3] for tr in trs)
+            ttypes = tuple(table_types[name] for name in unames)
+            assert not any(tt == "zeros" for tt in ttypes)
+
+            piecewise_types = ("piecewise", "fixed", "ones")
+            if FV_piecewise[fi] and all(tt in piecewise_types for tt in ttypes):
+                contributions = block_contributions["piecewise"][dofblock]
+            else:
+                contributions = block_contributions["varying"][dofblock]
+
+            data = (ma_indices, fi, trs, unames, ttypes)
+            contributions.append(data)
+
+
+        # Build IR dict for the given expressions
+        expr_ir = {}
+
+        expr_ir["block_contributions"] = block_contributions
+        
+        # (array) FV-index -> UFL subexpression
+        expr_ir["V"] = FV
+
+        # (array) Flattened input expression component index -> FV-index
+        expr_ir["target_variables"] = FV_targets
+
+        ### Result of factorization:
+        # (array) MA-index -> UFL expression of modified arguments
+        expr_ir["modified_arguments"] = modified_arguments
+
+        # (dict) tuple(MA-indices) -> FV-index of monomial factor
+        expr_ir["argument_factorization"] = argument_factorization
+
+        ### Modified terminals
+        # (array) list of FV-indices to modified terminals
+        expr_ir["modified_terminal_indices"] = modified_terminal_indices
+
+        # Dependency structure of graph:
+        # (CRSArray) FV-index -> direct dependency FV-index list
+        #expr_ir["dependencies"] = FV_deps
+
+        # (CRSArray) FV-index -> direct dependee FV-index list
+        #expr_ir["inverse_dependencies"] = inv_FV_deps
+
+        # Metadata about each vertex
+        expr_ir["active"] = FV_active        # (array) FV-index -> bool
+        expr_ir["piecewise"] = FV_piecewise  # (array) FV-index -> bool
+        expr_ir["varying"] = FV_varying      # (array) FV-index -> bool
+
+        expr_ir["modified_terminal_table_ranges"] = modified_terminal_table_ranges
+        expr_ir["modified_argument_table_ranges"] = modified_argument_table_ranges
+
+        # Store table data in FV indexing, this is used in integralgenerator
+        expr_ir["table_ranges"] = numpy.empty(len(FV), dtype=object)
+        expr_ir["table_ranges"][expr_ir["modified_terminal_indices"]] = \
+            expr_ir["modified_terminal_table_ranges"]
+
+        expr_ir["need_points"] = need_points
+        expr_ir["need_weights"] = need_weights
+
+        # Store the tables and ranges
+        expr_ir["table_types"] = table_types
+        expr_ir["unique_tables"] = unique_tables
+
+
+        # TODO: Some tables are associated with num_points, some are not
+        #       (i.e. piecewise constant, averaged and x0).
+        #       It will be easier to deal with that if we can join
+        #       the expr_ir for all num_points as mentioned above.
+        ir["expr_irs"][num_points] = expr_ir
+
+    return ir
+
+
+def build_scalar_graph(expressions):
+    """Build list representation of expression graph covering the given expressions.
+
+    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
+    """
+
+    # Build the initial coarse computational graph of the expression
+    G = build_graph(expressions)
+
+    assert len(expressions) == 1, "FIXME: Multiple expressions in graph building needs more work from this point on."
+
+    # Build more fine grained computational graph of scalar subexpressions
+    # TODO: Make it so that
+    #   expressions[k] <-> NV[nvs[k][:]],
+    #   len(nvs[k]) == value_size(expressions[k])
+    scalar_expressions = rebuild_with_scalar_subexpressions(G)
+
+    # Sanity check on number of scalar symbols/components
+    assert len(scalar_expressions) == sum(product(expr.ufl_shape) for expr in expressions)
+
+    # Build new list representation of graph where all
+    # vertices of V represent single scalar operations
+    e2i, V, V_targets = build_scalar_graph_vertices(scalar_expressions)
+
+    # Compute sparse dependency matrix
+    V_deps = compute_dependencies(e2i, V)
+
+    return V, V_deps, V_targets
+
+
+def analyse_dependencies(V, V_deps, V_targets,
+                         modified_terminal_indices,
+                         mt_table_ranges,
+                         table_types):
+    # Count the number of dependencies every subexpr has
+    V_depcount = compute_dependency_count(V_deps)
+
+    # Build the 'inverse' of the sparse dependency matrix
+    inv_deps = invert_dependencies(V_deps, V_depcount)
+
+    # Mark subexpressions of V that are actually needed for final result
+    active, num_active = mark_active(V_deps, V_targets)
+
+    # Build piecewise/varying markers for factorized_vertices
+    varying_indices = []
+    for i in modified_terminal_indices:
+
+        # TODO: Can probably avoid this re-analysis by
+        # passing other datastructures in here:
+        mt = analyse_modified_terminal(V[i])
+        tr = mt_table_ranges.get(mt)
+        if tr is not None:
+            # Check if table computations have revealed values varying over points
+            uname = tr[0]
+            ttype = table_types[uname]
+            # Note: uniform means entity-wise uniform, varying over points
+            if ttype in ("varying", "uniform", "quadrature"):
+                varying_indices.append(i)
+            else:
+                if ttype not in ("fixed", "piecewise", "ones", "zeros"):
+                    error("Invalid ttype %s" % (ttype,))
+
+        elif not is_cellwise_constant(V[i]):
+            # Keeping this check to be on the safe side,
+            # not sure which cases this will cover (if any)
+            varying_indices.append(i)
+
+    # Mark every subexpression that is computed
+    # from the spatially dependent terminals
+    varying, num_varying = mark_image(inv_deps, varying_indices)
+
+    # The rest of the subexpressions are piecewise constant (1-1=0, 1-0=1)
+    piecewise = 1 - varying
+
+    # Unmark non-active subexpressions
+    varying *= active
+    piecewise *= active
+
+    # TODO: Skip literals in both varying and piecewise
+    # nonliteral = ...
+    # varying *= nonliteral
+    # piecewise *= nonliteral
+
+    return inv_deps, active, piecewise, varying
+
+
+# TODO: Consider comments below and do it or delete them.
+
+""" Old comments:
+
+Work for later::
+
+        - Apply some suitable renumbering of vertices and corresponding arrays prior to returning
+
+        - Allocate separate registers for each partition
+          (but e.g. argument[iq][i0] may need to be accessible in other loops)
+
+        - Improve register allocation algorithm
+
+        - Take a list of expressions as input to compile several expressions in one joined graph
+          (e.g. to compile a,L,M together for nonlinear problems)
+
+"""
+
+
+""" # Old comments:
+
+    # TODO: Inspection of varying shows that factorization is
+    # needed for effective loop invariant code motion w.r.t. quadrature loop as well.
+    # Postphoning that until everything is working fine again.
+    # Core ingredients for such factorization would be:
+    # - Flatten products of products somehow
+    # - Sorting flattened product factors by loop dependency then by canonical ordering
+    # Or to keep binary products:
+    # - Rebalancing product trees ((a*c)*(b*d) -> (a*b)*(c*d)) to make piecewise quantities 'float' to the top of the list
+
+    # rank = max(len(ma_indices) for ma_indices in argument_factorization)
+    # for i,a in enumerate(modified_arguments):
+    #    iarg = a.number()
+    # ipart = a.part()
+
+    # TODO: More structured MA organization?
+    #modified_arguments[rank][block][entry] -> UFL expression of modified argument
+    #dofranges[rank][block] -> (begin, end)
+    # or
+    #modified_arguments[rank][entry] -> UFL expression of modified argument
+    #dofrange[rank][entry] -> (begin, end)
+    #argument_factorization: (dict) tuple(MA-indices (only relevant ones!)) -> V-index of monomial factor
+    # becomes
+    #argument_factorization: (dict) tuple(entry for each(!) rank) -> V-index of monomial factor ## doesn't cover intermediate f*u in f*u*v!
+"""
+
+
+"""
+def old_code_useful_for_optimization():
+
+    # Use heuristics to mark the usefulness of storing every subexpr in a variable
+    scores = compute_cache_scores(V,
+                                  active,
+                                  dependencies,
+                                  inverse_dependencies,
+                                  partitions,  # TODO: Rewrite in terms of something else, this doesn't exist anymore
+                                  cache_score_policy=default_cache_score_policy)
+
+    # Allocate variables to store subexpressions in
+    allocations = allocate_registers(active, partitions, target_variables,
+                                     scores, int(parameters["max_registers"]), int(parameters["score_threshold"]))
+    target_registers = [allocations[r] for r in target_variables]
+    num_registers = sum(1 if x >= 0 else 0 for x in allocations)
+    # TODO: If we renumber we can allocate registers separately for each partition, which is probably a good idea.
+
+    expr_oir = {}
+    expr_oir["num_registers"] = num_registers
+    expr_oir["partitions"] = partitions
+    expr_oir["allocations"] = allocations
+    expr_oir["target_registers"] = target_registers
+    return expr_oir
+"""
+
diff --git a/ffc/uflacs/tools.py b/ffc/uflacs/tools.py
new file mode 100644
index 0000000..e8f900b
--- /dev/null
+++ b/ffc/uflacs/tools.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2009-2016 Kristian B. Oelgaard and Martin Sandve Alnæs
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import numbers
+import collections
+import numpy
+
+from ufl.sorting import sorted_expr_sum
+from ufl import custom_integral_types
+from ufl.classes import Integral
+from ffc.representationutils import create_quadrature_points_and_weights
+
+
+def collect_quadrature_rules(integrals, default_scheme, default_degree):
+    "Collect quadrature rules found in list of integrals."
+    rules = set()
+    for integral in integrals:
+        md = integral.metadata() or {}
+        scheme = md.get("quadrature_rule", default_scheme)
+        degree = md.get("quadrature_degree", default_degree)
+        rule = (scheme, degree)
+        rules.add(rule)
+    return rules
+
+
+def compute_quadrature_rules(itg_data):
+    "Compute points and weights for a set of quadrature rules."
+    # Collect which quadrature rules occur in integrals
+    default_scheme = itg_data.metadata["quadrature_degree"]
+    default_degree = itg_data.metadata["quadrature_rule"]
+    rules = collect_quadrature_rules(
+        itg_data.integrals, default_scheme, default_degree)
+
+    quadrature_rules = {}
+    quadrature_rule_sizes = {}
+    for rule in rules:
+        scheme, degree = rule
+
+        # Compute quadrature points and weights
+        (points, weights) = create_quadrature_points_and_weights(
+            itg_data.integral_type, itg_data.domain.ufl_cell(), degree, scheme)
+
+        if points is not None:
+            points = numpy.asarray(points)
+
+        if weights is None:
+            # For custom integrals, there are no points
+            num_points = None
+        else:
+            num_points = len(weights)
+
+        # Assuming all rules with the same number of points are equal
+        if num_points in quadrature_rules:
+            assert quadrature_rules[num_points][0] == points
+            assert quadrature_rules[num_points][0] == weights
+            error("This number of points is already present in the weight table:\n  %s" % (quadrature_rules,))
+
+        quadrature_rules[num_points] = (points, weights)
+        quadrature_rule_sizes[rule] = num_points
+    return quadrature_rules, quadrature_rule_sizes
+
+
+def accumulate_integrals(itg_data, quadrature_rule_sizes):
+    """Group and accumulate integrals according to the number
+    of quadrature points in their rules.
+    """
+    if not itg_data.integrals:
+        return {}
+
+    default_scheme = itg_data.metadata["quadrature_degree"]
+    default_degree = itg_data.metadata["quadrature_rule"]
+
+    # Group integrands by quadrature rule
+    sorted_integrands = collections.defaultdict(list)
+    for integral in itg_data.integrals:
+        md = integral.metadata() or {}
+        scheme = md.get("quadrature_rule", default_scheme)
+        degree = md.get("quadrature_degree", default_degree)
+        rule = (scheme, degree)
+        num_points = quadrature_rule_sizes[rule]
+        sorted_integrands[num_points].append(integral.integrand())
+
+    # Accumulate integrands in a canonical ordering defined by UFL
+    sorted_integrals = {
+        num_points: Integral(
+            sorted_expr_sum(integrands),
+            itg_data.integral_type,
+            itg_data.domain,
+            itg_data.subdomain_id,
+            {},
+            None)
+        for num_points, integrands in list(sorted_integrands.items())
+        }
+    return sorted_integrals
diff --git a/ffc/uflacsrepr/uflacsgenerator.py b/ffc/uflacs/uflacsgenerator.py
similarity index 58%
rename from ffc/uflacsrepr/uflacsgenerator.py
rename to ffc/uflacs/uflacsgenerator.py
index 9d38974..3813775 100644
--- a/ffc/uflacsrepr/uflacsgenerator.py
+++ b/ffc/uflacs/uflacsgenerator.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2013-2014 Martin Alnaes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2013-2016 Martin Sandve Alnæs
 #
 # This file is part of FFC.
 #
@@ -15,15 +16,21 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
+"""Controlling algorithm for building the tabulate_tensor
+source structure from factorized representation."""
+
 from ffc.log import info
 from ffc.representationutils import initialize_integral_code
 
-from uflacs.backends.ffc.generation import generate_tabulate_tensor_code
+from ffc.uflacs.backends.ffc.backend import FFCBackend
+from ffc.uflacs.generation.integralgenerator import IntegralGenerator
+from ffc.uflacs.language.format_lines import format_indented_lines
+
 
 def generate_integral_code(ir, prefix, parameters):
     "Generate code for integral from intermediate representation."
 
-    info("Generating code from uflacs representation")
+    info("Generating code from ffc.uflacs representation")
 
     # Generate generic ffc code snippets
     code = initialize_integral_code(ir, prefix, parameters)
@@ -42,3 +49,30 @@ def generate_integral_code(ir, prefix, parameters):
     code["additional_includes_set"].update(uflacs_code["additional_includes_set"])
 
     return code
+
+
+def generate_tabulate_tensor_code(ir, prefix, parameters):
+
+    # Create FFC C++ backend
+    backend = FFCBackend(ir, parameters)
+
+    # Create code generator for integral body
+    ig = IntegralGenerator(ir, backend)
+
+    # Generate code ast for the tabulate_tensor body
+    parts = ig.generate()
+
+    # Format code AST as one string
+    body = format_indented_lines(parts.cs_format(), 1)
+
+    # Fetch includes
+    includes = set(ig.get_includes())
+
+    # Format uflacs specific code structures into a single
+    # string and place in dict before returning to ffc
+    code = {
+        "tabulate_tensor": body,
+        "additional_includes_set": includes,
+    }
+
+    return code
diff --git a/ffc/uflacsrepr/uflacsoptimization.py b/ffc/uflacs/uflacsoptimization.py
similarity index 93%
rename from ffc/uflacsrepr/uflacsoptimization.py
rename to ffc/uflacs/uflacsoptimization.py
index 75e9c9f..73e8990 100644
--- a/ffc/uflacsrepr/uflacsoptimization.py
+++ b/ffc/uflacs/uflacsoptimization.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2013-2014 Martin Alnaes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2013-2016 Martin Sandve Alnæs
 #
 # This file is part of FFC.
 #
diff --git a/ffc/uflacs/uflacsrepresentation.py b/ffc/uflacs/uflacsrepresentation.py
new file mode 100644
index 0000000..23c1252
--- /dev/null
+++ b/ffc/uflacs/uflacsrepresentation.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2013-2016 Martin Sandve Alnæs
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import numpy
+
+from ufl.algorithms import replace
+from ufl.utils.sorting import sorted_by_count
+
+from ffc.log import info
+from ffc.representationutils import initialize_integral_ir
+from ffc.fiatinterface import create_element
+from ffc.uflacs.tools import compute_quadrature_rules, accumulate_integrals
+from ffc.uflacs.representation.build_uflacs_ir import build_uflacs_ir
+
+
+def compute_integral_ir(itg_data,
+                        form_data,
+                        form_id,
+                        element_numbers,
+                        classnames,
+                        parameters):
+    "Compute intermediate represention of integral."
+
+    info("Computing uflacs representation")
+
+    # Initialise representation
+    ir = initialize_integral_ir("uflacs", itg_data, form_data, form_id)
+
+    # Store element classnames
+    ir["classnames"] = classnames
+
+    # TODO: Set alignas and padlen from parameters
+    sizeof_double = 8
+    ir["alignas"] = 32
+    ir["padlen"] = ir["alignas"] // sizeof_double
+
+    # Get element space dimensions
+    unique_elements = element_numbers.keys()
+    ir["element_dimensions"] = { ufl_element: create_element(ufl_element).space_dimension()
+                                 for ufl_element in unique_elements }
+
+    # Create dimensions of primary indices, needed to reset the argument 'A'
+    # given to tabulate_tensor() by the assembler.
+    argument_dimensions = [ir["element_dimensions"][ufl_element]
+                           for ufl_element in form_data.argument_elements]
+
+    # Compute shape of element tensor
+    if ir["integral_type"] == "interior_facet":
+        ir["tensor_shape"] = [2 * dim for dim in argument_dimensions]
+    else:
+        ir["tensor_shape"] = argument_dimensions
+
+    # Compute actual points and weights
+    quadrature_rules, quadrature_rule_sizes = compute_quadrature_rules(itg_data)
+
+    # Store quadrature rules in format { num_points: (points, weights) }
+    ir["quadrature_rules"] = quadrature_rules
+
+
+    # Group and accumulate integrals on the format { num_points: integral data }
+    sorted_integrals = accumulate_integrals(itg_data, quadrature_rule_sizes)
+
+    # Build coefficient numbering for UFC interface here, to avoid
+    # renumbering in UFL and application of replace mapping
+    if True:
+        # Using the mapped coefficients, numbered by UFL
+        coefficient_numbering = {}
+        sorted_coefficients = sorted_by_count(form_data.function_replace_map.keys())
+        for i, f in enumerate(sorted_coefficients):
+            g = form_data.function_replace_map[f]
+            coefficient_numbering[g] = i
+            assert i == g.count()
+
+        # Replace coefficients so they all have proper element and domain for what's to come
+        # TODO: We can avoid the replace call when proper Expression support is in place
+        #       and element/domain assignment is removed from compute_form_data.
+        integrands = {
+            num_points: replace(sorted_integrals[num_points].integrand(), form_data.function_replace_map)
+            for num_points in sorted(sorted_integrals)
+            }
+    else:
+        pass
+        #coefficient_numbering = {}
+        #coefficient_element = {}
+        #coefficient_domain = {}
+        #sorted_coefficients = sorted_by_count(form_data.function_replace_map.keys())
+        #for i, f in enumerate(sorted_coefficients):
+        #    g = form_data.function_replace_map[f]
+        #    coefficient_numbering[f] = i
+        #    coefficient_element[f] = g.ufl_element()
+        #    coefficient_domain[f] = g.ufl_domain()
+        #integrands = {
+        #    num_points: sorted_integrals[num_points].integrand()
+        #    for num_points in sorted(sorted_integrals)
+        #    }
+        # then pass coefficient_element and coefficient_domain to the uflacs ir as well
+
+
+    # Build the more uflacs-specific intermediate representation
+    uflacs_ir = build_uflacs_ir(itg_data.domain.ufl_cell(),
+                                itg_data.integral_type,
+                                ir["entitytype"],
+                                integrands,
+                                ir["tensor_shape"],
+                                coefficient_numbering,
+                                quadrature_rules,
+                                parameters)
+    ir.update(uflacs_ir)
+
+    # Consistency check on quadrature rules
+    rules1 = sorted(ir["expr_irs"].keys())
+    rules2 = sorted(ir["quadrature_rules"].keys())
+    if rules1 != rules2:
+        warning("Found different rules in expr_irs and "
+                "quadrature_rules:\n{0}\n{1}".format(rules1, rules2))
+
+    return ir
diff --git a/ffc/uflacsrepr/__init__.py b/ffc/uflacsrepr/__init__.py
deleted file mode 100644
index 76cbbf5..0000000
--- a/ffc/uflacsrepr/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .uflacsrepresentation import compute_integral_ir
-from .uflacsoptimization import optimize_integral_ir
-from .uflacsgenerator import generate_integral_code
diff --git a/ffc/uflacsrepr/uflacsrepresentation.py b/ffc/uflacsrepr/uflacsrepresentation.py
deleted file mode 100644
index 09bb88b..0000000
--- a/ffc/uflacsrepr/uflacsrepresentation.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (C) 2013-2014 Martin Alnaes
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-
-from ffc.log import info, error, begin, end, debug_ir, ffc_assert, warning
-
-from ffc.fiatinterface import create_element
-from ffc.representationutils import initialize_integral_ir
-from ffc.quadrature.parameters import parse_optimise_parameters
-from ffc.quadrature.tabulate_basis import tabulate_basis
-from ffc.quadrature.quadraturerepresentation import sort_integrals
-
-from uflacs.backends.ffc.representation import compute_uflacs_integral_ir
-
-def compute_integral_ir(itg_data,
-                        form_data,
-                        form_id,
-                        element_numbers,
-                        parameters):
-    "Compute intermediate represention of integral."
-
-    info("Computing uflacs representation")
-
-    # Initialise representation
-    ir = initialize_integral_ir("uflacs", itg_data, form_data, form_id)
-
-    # Sort integrals into a dict with quadrature degree and rule as key
-    sorted_integrals = sort_integrals(itg_data.integrals,
-                                      itg_data.metadata["quadrature_degree"],
-                                      itg_data.metadata["quadrature_rule"])
-
-    # TODO: Might want to create the uflacs ir first and then create the tables we need afterwards!
-    # Tabulate quadrature points and basis function values in these points
-    integrals_dict, psi_tables, quadrature_rules = \
-        tabulate_basis(sorted_integrals, form_data, itg_data)
-
-    # Store element numbers, needed for classnames
-    ir["element_numbers"] = element_numbers
-
-    # Delegate to flacs to build its intermediate representation and add to ir
-    uflacs_ir = compute_uflacs_integral_ir(psi_tables, ir["entitytype"], integrals_dict, form_data, parameters)
-
-    # Store uflacs generated part separately
-    ir["uflacs"] = uflacs_ir
-
-    # Create and save the optisation parameters # TODO: Define uflacs specific optimization parameters instead
-    #ir["optimise_parameters"] = parse_optimise_parameters(parameters)
-
-    # Save tables for quadrature weights and points
-    ir["quadrature_rules"] = quadrature_rules
-
-    # Create dimensions of primary indices, needed to reset the argument 'A'
-    # given to tabulate_tensor() by the assembler.
-    ir["prim_idims"] = [create_element(ufl_element).space_dimension()
-                        for ufl_element in form_data.argument_elements]
-
-    # Added for uflacs, not sure if this is the best way to get this:
-    ir["coeff_idims"] = [create_element(ufl_element).space_dimension()
-                         for ufl_element in form_data.coefficient_elements]
-
-    return ir
diff --git a/ffc/utils.py b/ffc/utils.py
index 4e964be..da91f76 100644
--- a/ffc/utils.py
+++ b/ffc/utils.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2005-2014 Anders Logg
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2005-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -16,7 +18,7 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Kristian B. Oelgaard, 2009
-# Modified by Martin Alnaes 2014
+# Modified by Martin Sandve Alnæs 2014
 
 # Python modules.
 import operator
@@ -28,16 +30,19 @@ from .log import error
 
 from ufl.utils.sequences import product
 
+
 def all_equal(sequence):
     "Check that all items in list are equal."
     return sequence[:-1] == sequence[1:]
 
+
 def pick_first(sequence):
     "Check that all values are equal and return the value."
     if not all_equal(sequence):
         error("Values differ: " + str(sequence))
     return sequence[0]
 
+
 def listcopy(sequence):
     """Create a copy of the list, calling the copy constructor on each
     object in the list (problems when using copy.deepcopy)."""
@@ -46,18 +51,29 @@ def listcopy(sequence):
     else:
         return [object.__class__(object) for object in sequence]
 
-def compute_permutations(k, n, skip = []):
-   """Compute all permutations of k elements from (0, n) in rising order.
-   Any elements that are contained in the list skip are not included."""
-   if k == 1:
-       return [(i,) for i in range(n) if not i in skip]
-   pp = compute_permutations(k - 1, n, skip)
-   permutations = []
-   for i in range(n):
-       if i in skip:
-           continue
-       for p in pp:
-           if i < p[0]:
-               permutations += [(i, ) + p]
-   return permutations
 
+def compute_permutations(k, n, skip=[]):
+    """Compute all permutations of k elements from (0, n) in rising order.
+    Any elements that are contained in the list skip are not included."""
+    if k == 1:
+        return [(i,) for i in range(n) if i not in skip]
+    pp = compute_permutations(k - 1, n, skip)
+    permutations = []
+    for i in range(n):
+        if i in skip:
+            continue
+        for p in pp:
+            if i < p[0]:
+                permutations += [(i, ) + p]
+    return permutations
+
+
+def insert_nested_dict(root, keys, value):
+    "Set root[keys[0]][...][keys[-1]] = value, creating subdicts on the way if missing."
+    for k in keys[:-1]:
+        d = root.get(k)
+        if d is None:
+            d = {}
+            root[k] = d
+        root = d
+    root[keys[-1]] = value
diff --git a/ffc/wrappers.py b/ffc/wrappers.py
index 6d7ea4e..471e1b2 100644
--- a/ffc/wrappers.py
+++ b/ffc/wrappers.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2010-2014 Anders Logg
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2010-2016 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -29,6 +31,7 @@ __all__ = ["generate_wrapper_code"]
 
 # FIXME: More clean-ups needed here.
 
+
 def generate_wrapper_code(analysis, prefix, object_names, parameters):
     "Generate code for additional wrappers."
 
@@ -39,6 +42,7 @@ def generate_wrapper_code(analysis, prefix, object_names, parameters):
     # Return dolfin wrapper
     return _generate_dolfin_wrapper(analysis, prefix, object_names, parameters)
 
+
 def _generate_dolfin_wrapper(analysis, prefix, object_names, parameters):
 
     begin("Compiler stage 4.1: Generating additional wrapper code")
@@ -56,6 +60,7 @@ def _generate_dolfin_wrapper(analysis, prefix, object_names, parameters):
 
     return code
 
+
 def _encapsulate(prefix, object_names, analysis, parameters):
 
     # Extract data from analysis
@@ -73,8 +78,8 @@ def _encapsulate(prefix, object_names, analysis, parameters):
     # Special case: with error control
     elif parameters["error_control"] and num_form_datas == 11:
         capsules = [_encapsule_form(prefix, object_names, form_data, i, element_map)
-                    for (i, form_data) in enumerate(form_datas[:num_form_datas-1])]
-        capsules += [_encapsule_form(prefix, object_names, form_datas[-1], num_form_datas-1,
+                    for (i, form_data) in enumerate(form_datas[:num_form_datas - 1])]
+        capsules += [_encapsule_form(prefix, object_names, form_datas[-1], num_form_datas - 1,
                                      element_map, "GoalFunctional")]
     # Otherwise: generate standard capsules for each form
     else:
@@ -105,8 +110,9 @@ def _encapsule_form(prefix, object_names, form_data, i, element_map, superclassn
 
     return form_names
 
+
 def _encapsule_element(prefix, elements):
-    element_number = len(elements) - 1 # eh? this doesn't make any sense
+    element_number = len(elements) - 1  # eh? this doesn't make any sense
     args = ("0",
             [make_classname(prefix, "finite_element", element_number)],
             [make_classname(prefix, "dofmap", element_number)])
diff --git a/release.conf b/release.conf
deleted file mode 100644
index ff340fb..0000000
--- a/release.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-# Configuration file for fenics-release
-
-PACKAGE="ffc"
-BRANCH="master"
-FILES="ChangeLog \
-       README.rst \
-       setup.py \
-       ffc/__init__.py \
-       ufc/ufc.h \
-       uflacs/__init__.py \
-       ffc/backends/ufc/__init__.py \
-       ffc/backends/ufc/dofmap.py \
-       ffc/backends/ufc/finite_element.py \
-       ffc/backends/ufc/form.py \
-       ffc/backends/ufc/function.py \
-       ffc/backends/ufc/integrals.py"
-
-pre-release()
-{
-    # Update regression tests
-    echo "Installing FFC locally"
-    python setup.py install --prefix=$PWD/local
-    PYVER=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))')
-    export PYTHONPATH=$PWD/local/lib/python$PYVER/site-packages:$PYTHONPATH
-    export PATH=$PWD/local/bin:$PATH
-    export PKG_CONFIG_PATH=$PWD/local/lib/pkgconfig:$PKG_CONFIG_PATH
-    echo "Running regression tests"
-    cd test/regression
-    python test.py --skip-run
-    less error.log
-    echo "Only version numbers should differ, press return to continue"
-    read
-    ./scripts/upload
-    cd -
-}
diff --git a/requirements.txt b/requirements.txt
index 8fc779f..4301eb6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
 -e git+https://bitbucket.org/fenics-project/fiat.git#egg=fiat
--e git+https://bitbucket.org/fenics-project/instant.git#egg=instant
+-e git+https://bitbucket.org/fenics-project/dijitso.git#egg=dijitso
 -e git+https://bitbucket.org/fenics-project/ufl.git#egg=ufl
diff --git a/scripts/ffc b/scripts/ffc
deleted file mode 100755
index a7647da..0000000
--- a/scripts/ffc
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python
-
-# This script is the command-line interface to FFC. It parses
-# command-line arguments and wraps the given form file code in a
-# Python module which is then executed.
-
-# Copyright (C) 2004-2014 Anders Logg
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Johan Jansson, 2005.
-# Modified by Ola Skavhaug, 2006.
-# Modified by Dag Lindbo, 2008.
-# Modified by Kristian B. Oelgaard 2010.
-
-import sys
-from ffc.main import main
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv[1:]))
diff --git a/scripts/makedist b/scripts/makedist
deleted file mode 100755
index 3c319d8..0000000
--- a/scripts/makedist
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2004-2008 Anders Logg
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# This script creates a new release of FFC
-
-# Make sure FFC is installed (so we may run unit tests)
-#echo '--- Uninstalling FFC'
-#sudo python setup.py install
-
-# Make sure we have the current version
-echo '--- Synchronizing repository'
-sleep 1
-hg commit
-hg pull ssh://ffc@fenics.org/hg/ffc
-hg merge
-hg commit
-hg update
-hg push ssh://ffc@fenics.org/hg/ffc
-
-# Update version numbers
-echo '--- Update version number in ChangeLog'
-sleep 1
-emacs -nw ChangeLog
-echo '--- Update version number in constants.py'
-sleep 1
-emacs -nw ffc/common/constants.py
-echo '--- Update version number in setup.py'
-sleep 1
-emacs -nw setup.py
-
-# Install latest version
-echo "Running commands for installing FFC locally on my machine. Sorry about that."
-echo "We need to figure out a better way to organize the makedist script. /Anders"
-fenics-install
-fenics-dev
-
-# Get the version number
-VERSION=`grep 'FFC_VERSION' ffc/common/constants.py | cut -d'"' -f2`
-echo "--- Version number is $VERSION"
-
-# Run tests
-echo '--- Running tests'
-cd test
-python test.py
-echo '--- Only version numbers should differ, press return to continue'
-read
-cd regression
-./update-references
-cd ../..
-
-# Run benchmark problem
-echo '--- Running benchmark problem'
-cd bench
-echo "FFC version $VERSION" >> bench.log
-date >> bench.log
-echo "" >> bench.log
-./bench >> bench.log
-cd ../
-
-# Tag repository
-hg tag $VERSION
-
-# Commit changes to hg
-echo '--- Pushing changes to parent repository'
-sleep 1
-hg commit
-hg push ssh://ffc@fenics.org/hg/ffc
-
-# Create archive
-hg archive -t tgz ffc-$VERSION.tar.gz
-
-# Copy files to web page
-echo '--- Copying files to web server'
-scp ffc-$VERSION.tar.gz fenics at fenics.org:www.fenics.org/pub/software/ffc/v0.7
-scp ChangeLog fenics at fenics.org:www.fenics.org/pub/software/ffc/
-scp TODO fenics at fenics.org:www.fenics.org/pub/software/ffc/
-
-# Notify ffc-dev of the new version
-echo '--- Notifying mailing list'
-SUBJECT="Version "$VERSION" of FFC released"
-cat ChangeLog | mail -s "$SUBJECT" ffc-dev at fenics.org
-
-# Edit web pages
-echo '--- Edit web pages'
-ssh -t fenics at fenics.org '/home/fenics/local/bin/news'
-firefox http://www.fenics.org/wiki/Download
-
-# Notify pypi
-python setup.py register
diff --git a/scripts/makedoc b/scripts/makedoc
deleted file mode 100755
index 2e357b3..0000000
--- a/scripts/makedoc
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011 Marie E. Rognes
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2011-11-16
-# Last changed: 2011-11-16
-
-#
-# This is a utility script for generating .rst and .html
-# documentation for FFC.
-#
-# Run from the top level FFC directory:
-#
-#    ./scripts/makedoc
-#
-
-echo ""
-echo "--- Generating FFC html documentation"
-echo ""
-
-SPHINX_DIR=./doc/sphinx
-SPHINX_SCRIPT_DIR=$SPHINX_DIR/scripts
-SPHINX_SOURCE_DIR=$SPHINX_DIR/source
-
-# Generate .rst files
-$SPHINX_SCRIPT_DIR/generate_modules.py ffc --dest-dir=$SPHINX_SOURCE_DIR --suffix=rst --force
-
-echo ""
-echo "--- reSTructured text files generated in doc/sphinx/source/"
-echo ""
-
-# Generate index (and add some labels)
-VERSION=`grep '__version__' ffc/__init__.py | cut -d'"' -f2`
-$SPHINX_SCRIPT_DIR/generate_index.py $SPHINX_SOURCE_DIR $VERSION
-
-# Run sphinx make html
-cd $SPHINX_DIR
-make clean
-make html
-
-echo ""
-echo "--- HTML files generated in $SPHINX_DIR/build/html"
-echo ""
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..0a9d60e
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,15 @@
+[tool:pytest]
+norecursedirs=libs doc test/uflacs/crosslanguage/generated
+[flake8]
+max-line-length = 100
+exclude = .git,__pycache__,docs/source/conf.py,build,dist,libs
+ignore =
+    # TODO: Enable these after fixing one by one, these may hide bugs
+    F403,F405,F812,F999,F401,F841,F821,
+    # TODO: Some of these may also be important
+    E501,E203,E265,E701,E702,E703,E302,E402,E122,
+    E222,E303,E129,E251,E225,E221,E731,E272,
+    E131,E115,E127,E261,E128,E202,E231,E201,E266,
+    E301,E262,E401,E124,E227,E126,E121,E704,E502,E241,E123,E226,
+    # TODO: Some of these may also be important
+    W503,W391,W291,W293
diff --git a/setup.py b/setup.py
index d145b3d..18dd289 100755
--- a/setup.py
+++ b/setup.py
@@ -1,16 +1,13 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import print_function
 
-import os, sys, platform, re, subprocess, string, tempfile, shutil, hashlib
+import os
+import sys
+import re
+import subprocess
+import string
 
-try:
-    from setuptools import setup
-    from setuptools.command.install import install
-except ImportError:
-    from distutils.core import setup
-    from distutils.command.install import install
-
-from distutils import sysconfig
-from distutils.ccompiler import new_compiler
+from setuptools import setup, find_packages
 
 if sys.version_info < (2, 7):
     print("Python 2.7 or higher required, please upgrade.")
@@ -21,11 +18,16 @@ VERSION = re.findall('__version__ = "(.*)"',
 
 URL = "https://bitbucket.org/fenics-project/ffc/"
 
-SCRIPTS = [os.path.join("scripts", "ffc")]
+if sys.version_info[0] == 2:
+    ENTRY_POINTS = {'console_scripts': ['ffc = ffc.__main__:main',
+                                        'ffc-2 = ffc.__main__:main']}
+else:
+    ENTRY_POINTS = {'console_scripts': ['ffc = ffc.__main__:main',
+                                        'ffc-3 = ffc.__main__:main']}
 
 AUTHORS = """\
 Anders Logg, Kristian Oelgaard, Marie Rognes, Garth N. Wells,
-Martin Sandve Alnaes, Hans Petter Langtangen, Kent-Andre Mardal,
+Martin Sandve Alnæs, Hans Petter Langtangen, Kent-Andre Mardal,
 Ola Skavhaug, et al.
 """
 
@@ -33,41 +35,47 @@ CLASSIFIERS = """\
 Development Status :: 5 - Production/Stable
 Intended Audience :: Developers
 Intended Audience :: Science/Research
-License :: OSI Approved :: GNU General Public License v2 (GPLv2)
-License :: Public Domain
-Operating System :: MacOS :: MacOS X
-Operating System :: Microsoft :: Windows
+License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)
 Operating System :: POSIX
 Operating System :: POSIX :: Linux
-Programming Language :: C++
+Operating System :: MacOS :: MacOS X
+Operating System :: Microsoft :: Windows
 Programming Language :: Python
+Programming Language :: Python :: 2
+Programming Language :: Python :: 2.7
+Programming Language :: Python :: 3
+Programming Language :: Python :: 3.4
+Programming Language :: Python :: 3.5
+Programming Language :: Python :: 3.6
 Topic :: Scientific/Engineering :: Mathematics
-Topic :: Software Development :: Libraries
+Topic :: Software Development :: Libraries :: Python Modules
+Topic :: Software Development :: Code Generators
 """
 
+
 def tarball():
     if "dev" in VERSION:
         return None
     return URL + "downloads/ffc-%s.tar.gz" % VERSION
 
+
 def get_installation_prefix():
     "Get installation prefix"
     prefix = sys.prefix
     for arg in sys.argv[1:]:
         if "--user" in arg:
             import site
-            prefix = site.USER_BASE
-        elif arg in ("--prefix", "--home", "--root", "--install-base"):
-            prefix = sys.argv[sys.argv.index(arg)+1]
-        elif "--prefix=" in arg or "--home=" in arg or \
-          "--root=" in arg or "--install-base=" in arg:
+            prefix = site.getuserbase()
+            break
+        elif arg in ("--prefix", "--home", "--install-base"):
+            prefix = sys.argv[sys.argv.index(arg) + 1]
+            break
+        elif "--prefix=" in arg or "--home=" in arg or "--install-base=" in arg:
             prefix = arg.split("=")[1]
+            break
+
     return os.path.abspath(os.path.expanduser(prefix))
 
-def get_ufc_signature():
-    """Compute SHA-1 hash of ufc.h"""
-    with open(os.path.join('ufc', 'ufc.h'), 'rb') as f:
-        return hashlib.sha1(f.read()).hexdigest()
 
 def get_git_commit_hash():
     """Return git commit hash of currently checked out revision
@@ -84,19 +92,6 @@ def get_git_commit_hash():
     else:
         return hash.strip()
 
-def create_windows_batch_files(scripts):
-    """Create Windows batch files, to get around problem that we
-    cannot run Python scripts in the prompt without the .py
-    extension."""
-    batch_files = []
-    for script in scripts:
-        batch_file = script + ".bat"
-        f = open(batch_file, "w")
-        f.write("python \"%%~dp0\%s\" %%*\n" % os.path.split(script)[1])
-        f.close()
-        batch_files.append(batch_file)
-    scripts.extend(batch_files)
-    return scripts
 
 def write_config_file(infile, outfile, variables={}):
     "Write config file based on template"
@@ -104,261 +99,58 @@ def write_config_file(infile, outfile, variables={}):
         delimiter = "@"
     s = AtTemplate(open(infile, "r").read())
     s = s.substitute(**variables)
-    a = open(outfile, "w")
-    try:
+    with open(outfile, "w") as a:
         a.write(s)
-    finally:
-        a.close()
-
-def find_library(package_name, lib_names):
-    "Return the full path to the library (empty string if not found)"
-    search_dirs = [
-        "%s%slib" % (os.environ.get("%s_DIR" % package_name.upper(), ""), os.path.sep),
-        "%s" % sysconfig.get_config_vars().get("LIBDIR", ""),
-        "/usr/lib/%s" % sysconfig.get_config_vars().get("MULTIARCH", ""),
-        "/usr/local/lib",
-        "/opt/local/lib",
-        "/usr/lib",
-        "/usr/lib64",
-        ]
-    lib = None
-    cc = new_compiler()
-    for name in lib_names:
-        lib = cc.find_library_file(search_dirs, name)
-        if lib is not None:
-            break
-    return lib or ""
-
-def find_python_library():
-    "Return the full path to the Python library (empty string if not found)"
-    pyver = sysconfig.get_python_version()
-    libpython_names = [
-        "python%s" % pyver.replace(".", ""),
-        "python%smu" % pyver,
-        "python%sm" % pyver,
-        "python%su" % pyver,
-        "python%s" % pyver,
-        ]
-    return find_library("python", libpython_names)
 
-def find_boost_math_library():
-    "Return the full path to the Boost math library (empty string if not found)"
-    return find_library("boost", ["boost_math_tr1", "boost_math_tr1-mt"])
 
-def find_include_dir(package_name, header_file):
-    "Return the path to the given header file (empty string if not found)"
-    search_dirs = [
-        "%s%sinclude" % (os.environ.get("%s_DIR" % package_name.upper(), ""), os.path.sep),
-        "/usr/local/include",
-        "/opt/local/include",
-        "/usr/include",
-        ]
-    for inc_dir in search_dirs:
-        if os.path.isfile(os.path.join(inc_dir, header_file)):
-            return inc_dir
-    return ""
-
-def find_boost_include_dir():
-    "Return the path to the Boost include dir (empty string if not found)"
-    return find_include_dir("boost", os.path.join("boost", "version.hpp"))
-
-def generate_git_hash_file():
+def generate_git_hash_file(GIT_COMMIT_HASH):
     "Generate module with git hash"
-
-    # Get git commit hash
-    GIT_COMMIT_HASH = get_git_commit_hash()
-
-    # Generate git_commit_hash.py
     write_config_file(os.path.join("ffc", "git_commit_hash.py.in"),
                       os.path.join("ffc", "git_commit_hash.py"),
                       variables=dict(GIT_COMMIT_HASH=GIT_COMMIT_HASH))
 
-def generate_ufc_signature_file():
-    "Generate module with UFC signature"
-
-    UFC_SIGNATURE = get_ufc_signature()
-
-    # Generate ufc_signature.py
-    write_config_file(os.path.join("ffc", "ufc_signature.py.in"),
-                      os.path.join("ffc", "ufc_signature.py"),
-                      variables=dict(UFC_SIGNATURE=UFC_SIGNATURE))
-
-def generate_ufc_config_files():
-    "Generate and install UFC configuration files"
-
-    # Get variables
-    INSTALL_PREFIX = get_installation_prefix()
-    PYTHON_LIBRARY = os.environ.get("PYTHON_LIBRARY", find_python_library())
-    MAJOR, MINOR, MICRO = VERSION.split(".")
-    UFC_SIGNATURE = get_ufc_signature()
-
-    # Check that compiler supports C++11 features
-    cc = new_compiler()
-    CXX = os.environ.get("CXX")
-    if CXX:
-        cc.set_executables(compiler_so=CXX, compiler=CXX, compiler_cxx=CXX)
-    CXX_FLAGS = os.environ.get("CXXFLAGS", "")
-    if has_cxx_flag(cc, "-std=c++11"):
-        CXX_FLAGS += " -std=c++11"
-    elif has_cxx_flag(cc, "-std=c++0x"):
-        CXX_FLAGS += " -std=c++0x"
-
-    # Generate UFCConfig.cmake
-    write_config_file(os.path.join("cmake", "templates", "UFCConfig.cmake.in"),
-                      os.path.join("cmake", "templates", "UFCConfig.cmake"),
-                      variables=dict(INSTALL_PREFIX=INSTALL_PREFIX,
-                                     CXX_FLAGS=CXX_FLAGS.strip(),
-                                     PYTHON_INCLUDE_DIR=sysconfig.get_python_inc(),
-                                     PYTHON_LIBRARY=PYTHON_LIBRARY,
-                                     PYTHON_EXECUTABLE=sys.executable,
-                                     FULLVERSION=VERSION,
-                                     UFC_SIGNATURE=UFC_SIGNATURE,
-                                     BOOST_INCLUDE_DIR=find_boost_include_dir(),
-                                     BOOST_MATH_LIBRARY=find_boost_math_library()))
-
-    # Generate UFCConfigVersion.cmake
-    write_config_file(os.path.join("cmake", "templates", \
-                                   "UFCConfigVersion.cmake.in"),
-                      os.path.join("cmake", "templates", \
-                                   "UFCConfigVersion.cmake"),
-                      variables=dict(FULLVERSION=VERSION,
-                                     MAJOR=MAJOR, MINOR=MINOR, MICRO=MICRO))
-
-    # Generate UseUFC.cmake
-    write_config_file(os.path.join("cmake", "templates", "UseUFC.cmake.in"),
-                      os.path.join("cmake", "templates", "UseUFC.cmake"))
-
-    # FIXME: Generation of pkgconfig file may no longer be needed, so
-    # FIXME: we may consider removing this.
-
-    # Generate ufc-1.pc
-    write_config_file(os.path.join("cmake", "templates", "ufc-1.pc.in"),
-                      os.path.join("cmake", "templates", "ufc-1.pc"),
-                      variables=dict(FULLVERSION=VERSION,
-                                     INSTALL_PREFIX=INSTALL_PREFIX,
-                                     CXX_FLAGS=CXX_FLAGS))
-
-def has_cxx_flag(cc, flag):
-    "Return True if compiler supports given flag"
-    tmpdir = tempfile.mkdtemp(prefix="ffc-build-")
-    devnull = oldstderr = None
-    try:
-        try:
-            fname = os.path.join(tmpdir, "flagname.cpp")
-            f = open(fname, "w")
-            f.write("int main() { return 0;}")
-            f.close()
-            # Redirect stderr to /dev/null to hide any error messages
-            # from the compiler.
-            devnull = open(os.devnull, 'w')
-            oldstderr = os.dup(sys.stderr.fileno())
-            os.dup2(devnull.fileno(), sys.stderr.fileno())
-            cc.compile([fname], output_dir=tmpdir, extra_preargs=[flag])
-        except:
-            return False
-        return True
-    finally:
-        if oldstderr is not None:
-            os.dup2(oldstderr, sys.stderr.fileno())
-        if devnull is not None:
-            devnull.close()
-        shutil.rmtree(tmpdir)
 
 def run_install():
     "Run installation"
 
-    # Check if we're building inside a 'Read the Docs' container
-    on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+    # Get common variables
+    #INSTALL_PREFIX = get_installation_prefix()
+    GIT_COMMIT_HASH = get_git_commit_hash()
 
-    # Create batch files for Windows if necessary
-    scripts = SCRIPTS
-    if platform.system() == "Windows" or "bdist_wininst" in sys.argv:
-        scripts = create_windows_batch_files(scripts)
+    # Scripts list
+    entry_points = ENTRY_POINTS
 
     # Generate module with git hash from template
-    generate_git_hash_file()
-
-    # Generate module with UFC signature from template
-    generate_ufc_signature_file()
-
-    # Generate config files
-    generate_ufc_config_files()
-
-    class my_install(install):
-        def run(self):
-            if not self.dry_run:
-                # Generate ufc_include.py
-                write_config_file(os.path.join("ffc", "ufc_include.py.in"),
-                                  os.path.join("ffc", "ufc_include.py"),
-                                  variables=dict(INSTALL_PREFIX=get_installation_prefix()))
-
-            # distutils uses old-style classes, so no super()
-            install.run(self)
+    generate_git_hash_file(GIT_COMMIT_HASH)
 
     # FFC data files
     data_files = [(os.path.join("share", "man", "man1"),
                   [os.path.join("doc", "man", "man1", "ffc.1.gz")])]
 
-    # Add UFC data files (need to use complete path because setuptools
-    # installs into the Python package directory, not --prefix). This
-    # can be fixed when Swig, etc are removed from FFC).
-    INSTALL_PREFIX = get_installation_prefix()
-    data_files_ufc = [(os.path.join(INSTALL_PREFIX, "include"),
-                           [os.path.join("ufc", "ufc.h"),
-                            os.path.join("ufc", "ufc_geometry.h")]),
-                          (os.path.join(INSTALL_PREFIX, "share", "ufc"),
-                           [os.path.join("cmake", "templates", \
-                                         "UFCConfig.cmake"),
-                            os.path.join("cmake", "templates", \
-                                         "UFCConfigVersion.cmake"),
-                            os.path.join("cmake", "templates", \
-                                         "UseUFC.cmake")]),
-                          (os.path.join(INSTALL_PREFIX, "lib", "pkgconfig"),
-                           [os.path.join("cmake", "templates", "ufc-1.pc")])]
-
-    data_files = data_files + data_files_ufc
-
     # Call distutils to perform installation
-    setup(name             = "FFC",
-          description      = "The FEniCS Form Compiler",
-          version          = VERSION,
-          author           = AUTHORS,
-          classifiers      = [_f for _f in CLASSIFIERS.split('\n') if _f],
-          license          = "LGPL version 3 or later",
-          author_email     = "fenics-dev at googlegroups.com",
-          maintainer_email = "fenics-dev at googlegroups.com",
-          url              = URL,
-          download_url     = tarball(),
-          platforms        = ["Windows", "Linux", "Solaris", "Mac OS-X",
-                              "Unix"],
-          packages         = ["ffc",
-                              "ffc.quadrature",
-                              "ffc.tensor",
-                              "ffc.uflacsrepr",
-                              "ffc.errorcontrol",
-                              "ffc.backends",
-                              "ffc.backends.dolfin",
-                              "ffc.backends.ufc",
-                              "uflacs",
-                              "uflacs.analysis",
-                              "uflacs.backends",
-                              "uflacs.backends.ffc",
-                              "uflacs.backends.ufc",
-                              "uflacs.datastructures",
-                              "uflacs.elementtables",
-                              "uflacs.generation",
-                              "uflacs.language",
-                              "uflacs.representation",
-                              "ufc"],
-          package_dir      = {"ffc": "ffc",
-                              "uflacs": "uflacs",
-                              "ufc": "ufc"},
-          scripts          = scripts,
-          cmdclass         = {'install': my_install},
-          data_files       = data_files,
-          install_requires = ["numpy", "six", "fiat==2016.1.0",
-                              "ufl==2016.1.0", "instant==2016.1.0"],
-          zip_safe = False)
+    setup(name="FFC",
+          description="The FEniCS Form Compiler",
+          version=VERSION,
+          author=AUTHORS,
+          classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
+          license="LGPL version 3 or later",
+          author_email="fenics-dev at googlegroups.com",
+          maintainer_email="fenics-dev at googlegroups.com",
+          url=URL,
+          download_url=tarball(),
+          platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
+          packages=find_packages("."),
+          package_dir={"ffc": "ffc"},
+          package_data={"ffc" : [os.path.join('backends', 'ufc', '*.h')]},
+          #scripts=scripts,  # Using entry_points instead
+          entry_points=entry_points,
+          data_files=data_files,
+          install_requires=["numpy",
+                            "six",
+                            "fiat==%s" % VERSION,
+                            "ufl==%s" % VERSION,
+                            "dijitso==%s" % VERSION],
+          zip_safe=False)
 
 if __name__ == "__main__":
     run_install()
diff --git a/test/evaluate_basis/cppcode.py b/test/evaluate_basis/cppcode.py
deleted file mode 100644
index d5c4510..0000000
--- a/test/evaluate_basis/cppcode.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"This module provides simple C++ code for verification of UFC code."
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-18
-
-# Common code for all integral types
-evaluate_basis_code = """\
-#include <iostream>
-#include <ufc.h>
-#include "test.h"
-
-int main()
-{
-  // Create element
-  %(element)s element;
-
-  // Size of dof_values
-  // FIXME: This will not work for TensorElements
-  int N = element.value_dimension(0);
-
-  // Create values
-  double* dof_values = new double[N];
-  for (int i = 0; i < N; i++)
-    dof_values[i] = 0.0;
-
-  // Create cell and fill with some arbitrary data
-  double cell_coordinates[8][3] = {{0.90, 0.34, 0.45},
-                                   {0.56, 0.76, 0.83},
-                                   {0.98, 0.78, 0.19},
-                                   {0.12, 0.56, 0.66},
-                                   {0.96, 0.78, 0.63},
-                                   {0.11, 0.35, 0.49},
-                                   {0.51, 0.88, 0.65},
-                                   {0.98, 0.45, 0.01}};
-  ufc::cell cell;
-  cell.coordinates = new double * [8];
-  for (int i = 0; i < 8; i++)
-  {
-    cell.coordinates[i] = new double[3];
-    for (int j = 0; j < 3; j++)
-      cell.coordinates[i][j] = cell_coordinates[i][j];
-  }
-
-  // Random coordinates where we want to evaluate the basis functions
-  double coordinates[3] = {0.32, 0.51, 0.05};
-
-  // Loop element space dimension and call evaluate_basis.
-  for (unsigned int i = 0; i < element.space_dimension(); i++)
-  {
-    element.evaluate_basis(i, dof_values, coordinates, cell);
-    // Print values
-    for (int j = 0; j < N; j++)
-      std::cout << dof_values[j] << " ";
-  }
-  std::cout << std::endl;
-  return 0;
-}
-"""
-
diff --git a/test/evaluate_basis/test.py b/test/evaluate_basis/test.py
deleted file mode 100644
index ebdd691..0000000
--- a/test/evaluate_basis/test.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-18
-
-from __future__ import print_function
-from cppcode import evaluate_basis_code
-from ufl import FiniteElement, MixedElement
-from instant.output import get_status_output
-
-import sys, os, pickle, numpy, shutil
-
-# Elements, supported by FFC and FIAT, and their supported shape and orders
-single_elements = [ {"family": "Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3, 4]},\
-                    {"family": "Discontinuous Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [0, 1, 2, 3, 4]},\
-                    {"family": "Crouzeix-Raviart",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1]},\
-                    {"family": "Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Discontinuous Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Marini",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Fortin-Marini",\
-                      "shapes": ["triangle"],\
-                      "orders": [2]},\
-                    {"family": "Nedelec 1st kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Nedelec 2nd kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]}]
-
-# Create some mixed elements
-dg0_tri = FiniteElement("DG", "triangle", 0)
-dg1_tri = FiniteElement("DG", "triangle", 1)
-cg1_tri = FiniteElement("CG", "triangle", 1)
-cr1_tri = FiniteElement("CR", "triangle", 1)
-rt1_tri = FiniteElement("RT", "triangle", 1)
-drt2_tri = FiniteElement("DRT", "triangle", 2)
-bdm1_tri = FiniteElement("BDM", "triangle", 1)
-ned1_tri = FiniteElement("N1curl", "triangle", 1)
-
-dg0_tet = FiniteElement("DG", "tetrahedron", 0)
-dg1_tet = FiniteElement("DG", "tetrahedron", 1)
-cg1_tet = FiniteElement("CG", "tetrahedron", 1)
-cr1_tet = FiniteElement("CR", "tetrahedron", 1)
-rt1_tet = FiniteElement("RT", "tetrahedron", 1)
-drt2_tet = FiniteElement("DRT", "tetrahedron", 2)
-bdm1_tet = FiniteElement("BDM", "tetrahedron", 1)
-ned1_tet = FiniteElement("N1curl", "tetrahedron", 1)
-
-mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\
-                  MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\
-                  MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\
-                  MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\
-                  MixedElement([drt2_tri, cg1_tri]),\
-                  MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\
-                  MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\
-                  MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\
-                  MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\
-                  MixedElement([drt2_tet, cg1_tet])]
-
-ffc_failed = []
-gcc_failed = []
-run_failed = []
-
-def check_results(values, reference):
-    "Check results and print summary."
-
-    missing_refs = []
-    diffs = []
-    num_ok = 0
-    print("")
-    for element, vals in list(values.items()):
-        print("\nResults for %s:" % element)
-
-        if vals is None:
-            print("Error")
-            continue
-
-        # Get reference values
-        if not element in reference:
-            missing_refs.append(element)
-            print("Missing reference")
-            continue
-        refs = reference[element]
-        tol = 1e-12
-
-        e = max(abs(vals - refs))
-        if e < tol:
-            num_ok += 1
-            print("OK: (diff = %g)" % e)
-        else:
-            print("*** (diff = %g)" % e)
-            diffs.append(element)
-
-    if ffc_failed == gcc_failed == run_failed == missing_refs == diffs:
-        print("\nAll %d elements verified OK" % len(reference))
-        return 0
-    if len(ffc_failed) > 0:
-        print("\n*** FFC compilation failed for the following elements:\n" + "\n".join(ffc_failed))
-    if len(gcc_failed) > 0:
-        print("\n*** g++ compilation failed for the following elements:\n" + "\n".join(gcc_failed))
-    if len(run_failed) > 0:
-        print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n".join(run_failed))
-    if len(missing_refs) > 0:
-        print("\n*** No reference values were found for the following elements:\n" + "\n".join(missing_refs))
-    if len(diffs) > 0:
-        print("\n*** Difference in values were found for the following elements:\n" + "\n".join(diffs))
-    return 1
-
-def compile_element(ufl_element):
-    "Create UFL form file with a single element in it and compile it with FFC"
-    f = open("test.ufl", "w")
-    if isinstance(ufl_element, (FiniteElement, MixedElement)):
-        f.write("element = " + repr(ufl_element))
-    f.close()
-    error, out = get_status_output("ffc test.ufl")
-    if error:
-        ffc_failed.append(repr(ufl_element))
-    return error
-
-def get_element_name(ufl_element):
-    "Extract relevant element name from header file."
-    f = open("test.h")
-    lines = f.readlines()
-    f.close()
-
-    signature = repr(ufl_element)
-    name = None
-    for e, l in enumerate(lines):
-        if "class" in l and "finite_element" in l:
-            name = l
-        if signature in l:
-            break
-    if name is None:
-        raise RuntimeError("No finite element class found")
-    return name.split()[1][:-1]
-
-def compute_values(ufl_element):
-    "Compute values of basis functions for given element."
-
-    # Get relevant element name
-    element_name = get_element_name(ufl_element)
-
-    # Create g++ code
-    options = {"element": element_name}
-    code = evaluate_basis_code % options
-    f = open("evaluate_basis.cpp", "w")
-    f.write(code)
-    f.close()
-
-    # Get UFC flags
-    ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip()
-
-    # Compile g++ code
-    c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags
-    error, output = get_status_output(c)
-    if error:
-        gcc_failed.append(repr(ufl_element))
-        return None
-
-    # Run compiled code and get values
-    error, output = get_status_output(".%sevaluate_basis" % os.path.sep)
-    if error:
-        run_failed.append(repr(ufl_element))
-        return None
-    values = [float(value) for value in output.split(" ") if len(value) > 0]
-    return numpy.array(values)
-
-def print_refs():
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-        for elem, vals in list(reference.items()):
-            print()
-            print(elem)
-            print(vals)
-    else:
-        raise RuntimeError("No references to print")
-
-def main(args):
-    "Call evaluate basis for a range of different elements."
-
-    if "refs" in args:
-        print_refs()
-        return 0
-
-    # Change to temporary folder and copy form files
-    if not os.path.isdir("tmp"):
-        os.mkdir("tmp")
-    os.chdir("tmp")
-
-    values = {}
-    # Evaluate basis for single elements
-    print("\nComputing evaluate_basis for single elements")
-    for element in single_elements:
-        for shape in element["shapes"]:
-            for order in element["orders"]:
-                ufl_element = FiniteElement(element["family"], shape, order)
-                print("Compiling element: ", str(ufl_element))
-                error = compile_element(ufl_element)
-                if error:
-                    continue
-                print("Computing values")
-                values[repr(ufl_element)] = compute_values(ufl_element)
-
-    # Evaluate basis for mixed elements
-    print("\nComputing evaluate_basis for mixed elements")
-    for ufl_element in mixed_elements:
-        print("Compiling element: ", str(ufl_element))
-        error = compile_element(ufl_element)
-        if error:
-            continue
-        print("Computing values")
-        values[repr(ufl_element)] = compute_values(ufl_element)
-
-    # Load or update reference values
-    os.chdir(os.pardir)
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-    else:
-        print("Unable to find reference values, storing current values.")
-        pickle.dump(values, open("reference.pickle", "w"))
-        return 0
-
-    # Check results
-    error = check_results(values, reference)
-
-    if not error:
-        # Remove temporary directory
-        shutil.rmtree("tmp")
-
-    return error
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv[1:]))
diff --git a/test/evaluate_basis_derivatives/cppcode.py b/test/evaluate_basis_derivatives/cppcode.py
deleted file mode 100644
index 4deb380..0000000
--- a/test/evaluate_basis_derivatives/cppcode.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"This module provides simple C++ code for verification of UFC code."
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-18
-
-# Common code for all integral types
-evaluate_basis_derivatives_code = """\
-#include <iostream>
-#include <ufc.h>
-#include "test.h"
-
-int main()
-{
-  // Create element
-  %(element)s element;
-
-  // Size of dof_values
-  // FIXME: This will not work for TensorElements
-  int N = element.value_dimension(0)*%(num_derivatives)d;
-
-  // Create values
-  double* dof_values = new double[N];
-  for (int i = 0; i < N; i++)
-    dof_values[i] = 0.0;
-
-  // Create cell and fill with some arbitrary data
-  double cell_coordinates[8][3] = {{0.90, 0.34, 0.45},
-                                   {0.56, 0.76, 0.83},
-                                   {0.98, 0.78, 0.19},
-                                   {0.12, 0.56, 0.66},
-                                   {0.96, 0.78, 0.63},
-                                   {0.11, 0.35, 0.49},
-                                   {0.51, 0.88, 0.65},
-                                   {0.98, 0.45, 0.01}};
-  ufc::cell cell;
-  cell.coordinates = new double * [8];
-  for (int i = 0; i < 8; i++)
-  {
-    cell.coordinates[i] = new double[3];
-    for (int j = 0; j < 3; j++)
-      cell.coordinates[i][j] = cell_coordinates[i][j];
-  }
-
-  // Random coordinates where we want to evaluate the basis functions
-  double coordinates[3] = {0.32, 0.51, 0.05};
-
-  // Loop element space dimension and call evaluate_basis.
-  for (unsigned int i = 0; i < element.space_dimension(); i++)
-  {
-    element.evaluate_basis_derivatives(i, %(derivative_order)d, dof_values, coordinates, cell);
-    // Print values
-    for (int j = 0; j < N; j++)
-      std::cout << dof_values[j] << " ";
-  }
-  std::cout << std::endl;
-  return 0;
-}
-"""
-
diff --git a/test/evaluate_basis_derivatives/test.py b/test/evaluate_basis_derivatives/test.py
deleted file mode 100644
index ecd80e2..0000000
--- a/test/evaluate_basis_derivatives/test.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-18
-
-from __future__ import print_function
-from cppcode import evaluate_basis_derivatives_code
-from ufl import FiniteElement, MixedElement
-from instant.output import get_status_output
-
-import sys, os, pickle, numpy, shutil
-
-# Elements, supported by FFC and FIAT, and their supported shape and orders
-single_elements = [ {"family": "Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3, 4]},\
-                    {"family": "Discontinuous Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [0, 1, 2, 3, 4]},\
-                    {"family": "Crouzeix-Raviart",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1]},\
-                    {"family": "Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Discontinuous Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Marini",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Fortin-Marini",\
-                      "shapes": ["triangle"],\
-                      "orders": [2]},\
-                    {"family": "Nedelec 1st kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Nedelec 2nd kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]}]
-
-# Create some mixed elements
-dg0_tri = FiniteElement("DG", "triangle", 0)
-dg1_tri = FiniteElement("DG", "triangle", 1)
-cg1_tri = FiniteElement("CG", "triangle", 1)
-cr1_tri = FiniteElement("CR", "triangle", 1)
-rt1_tri = FiniteElement("RT", "triangle", 1)
-drt2_tri = FiniteElement("DRT", "triangle", 2)
-bdm1_tri = FiniteElement("BDM", "triangle", 1)
-ned1_tri = FiniteElement("N1curl", "triangle", 1)
-
-dg0_tet = FiniteElement("DG", "tetrahedron", 0)
-dg1_tet = FiniteElement("DG", "tetrahedron", 1)
-cg1_tet = FiniteElement("CG", "tetrahedron", 1)
-cr1_tet = FiniteElement("CR", "tetrahedron", 1)
-rt1_tet = FiniteElement("RT", "tetrahedron", 1)
-drt2_tet = FiniteElement("DRT", "tetrahedron", 2)
-bdm1_tet = FiniteElement("BDM", "tetrahedron", 1)
-ned1_tet = FiniteElement("N1curl", "tetrahedron", 1)
-
-mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\
-                  MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\
-                  MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\
-                  MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\
-                  MixedElement([drt2_tri, cg1_tri]),\
-                  MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\
-                  MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\
-                  MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\
-                  MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\
-                  MixedElement([drt2_tet, cg1_tet])]
-
-ffc_failed = []
-gcc_failed = []
-run_failed = []
-
-def check_results(values, reference):
-    "Check results and print summary."
-
-    missing_refs = []
-    diffs = []
-    num_ok = 0
-    print("")
-    for element, deriv_orders in list(values.items()):
-        print("\nResults for %s:" % element)
-
-        for deriv_order, vals in list(deriv_orders.items()):
-            if vals is None:
-                print("Error")
-                continue
-
-            # Get reference values
-            if not element in reference or not deriv_order in reference[element]:
-                missing_refs.append(element + "order %d" % deriv_order)
-                print("Missing reference")
-                continue
-
-            refs = reference[element][deriv_order]
-            tol = 1e-12
-
-            e = max(abs(vals - refs))
-            if e < tol:
-                num_ok += 1
-                print("Derivative order: %d, OK: (diff = %g)" % (deriv_order, e))
-            else:
-                print("*** (Derivative order: %d, diff = %g)" % (deriv_order, e))
-                diffs.append(element + "order %d" % deriv_order)
-
-    if ffc_failed == gcc_failed == run_failed == missing_refs == diffs:
-        print("\nAll %d elements verified OK" % len(reference))
-        return 0
-    if len(ffc_failed) > 0:
-        print("\n*** FFC compilation failed for the following elements:\n" + "\n".join(ffc_failed))
-    if len(gcc_failed) > 0:
-        print("\n*** g++ compilation failed for the following elements:\n" + "\n".join(gcc_failed))
-    if len(run_failed) > 0:
-        print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n".join(run_failed))
-    if len(missing_refs) > 0:
-        print("\n*** No reference values were found for the following elements:\n" + "\n".join(missing_refs))
-    if len(diffs) > 0:
-        print("\n*** Difference in values were found for the following elements:\n" + "\n".join(diffs))
-    return 1
-
-def compile_element(ufl_element):
-    "Create UFL form file with a single element in it and compile it with FFC"
-    f = open("test.ufl", "w")
-    if isinstance(ufl_element, (FiniteElement, MixedElement)):
-        f.write("element = " + repr(ufl_element))
-    f.close()
-    error, out = get_status_output("ffc test.ufl")
-    if error:
-        ffc_failed.append(repr(ufl_element))
-    return error
-
-def get_element_name(ufl_element):
-    "Extract relevant element name from header file."
-    f = open("test.h")
-    lines = f.readlines()
-    f.close()
-
-    signature = repr(ufl_element)
-    name = None
-    for e, l in enumerate(lines):
-        if "class" in l and "finite_element" in l:
-            name = l
-        if signature in l:
-            break
-    if name is None:
-        raise RuntimeError("No finite element class found")
-    return name.split()[1][:-1]
-
-def compute_values(ufl_element, deriv_order):
-    "Compute values of basis functions for given element."
-
-    # Get relevant element name
-    element_name = get_element_name(ufl_element)
-
-    # Create g++ code
-    cell = ufl_element.cell()
-    num_derivs = cell.topological_dimension()**deriv_order
-    options = {"element": element_name, "derivative_order":deriv_order, "num_derivatives":num_derivs}
-    code = evaluate_basis_derivatives_code % options
-    f = open("evaluate_basis_derivatives.cpp", "w")
-    f.write(code)
-    f.close()
-
-    # Get UFC flags
-    ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip()
-
-    # Compile g++ code
-    c = "g++ %s -Wall -Werror -o evaluate_basis_derivatives evaluate_basis_derivatives.cpp" % ufc_cflags
-    error, output = get_status_output(c)
-    if error:
-        gcc_failed.append(repr(ufl_element))
-        return None
-
-    # Run compiled code and get values
-    error, output = get_status_output(".%sevaluate_basis_derivatives" % os.path.sep)
-    if error:
-        run_failed.append(repr(ufl_element))
-        return None
-    values = [float(value) for value in output.split(" ") if len(value) > 0]
-    return numpy.array(values)
-
-def print_refs():
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-        for elem, derivs in list(reference.items()):
-            for deriv_order, vals in list(derivs.items()):
-                print()
-                print(elem)
-                print(deriv_order)
-                print(vals)
-    else:
-        raise RuntimeError("No references to print")
-
-def main(args):
-    "Call evaluate basis derivatives for a range of different elements."
-
-    if "refs" in args:
-        print_refs()
-        return 0
-
-    # Change to temporary folder and copy form files
-    if not os.path.isdir("tmp"):
-        os.mkdir("tmp")
-    os.chdir("tmp")
-
-    values = {}
-    # Evaluate basis for single elements
-    print("\nComputing evaluate_basis_derivatives for single elements")
-    for element in single_elements:
-        for shape in element["shapes"]:
-            for order in element["orders"]:
-                ufl_element = FiniteElement(element["family"], shape, order)
-                print("Compiling element: ", str(ufl_element))
-                error = compile_element(ufl_element)
-                if error:
-                    continue
-                print("Computing values")
-                values[repr(ufl_element)] = {}
-                for deriv_order in range(1,4):
-                    values[repr(ufl_element)][deriv_order] = compute_values(ufl_element, deriv_order)
-
-    # Evaluate basis for single elements
-    print("\nComputing evaluate_basis_derivatives for mixed elements")
-    for ufl_element in mixed_elements:
-        print("Compiling element: ", str(ufl_element))
-        error = compile_element(ufl_element)
-        if error:
-            continue
-        print("Computing values")
-        values[repr(ufl_element)] = {}
-        for deriv_order in range(1,4):
-            values[repr(ufl_element)][deriv_order] = compute_values(ufl_element, deriv_order)
-
-    # Load or update reference values
-    os.chdir(os.pardir)
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-    else:
-        print("Unable to find reference values, storing current values.")
-        pickle.dump(values, open("reference.pickle", "w"))
-        return 0
-
-    # Check results
-    error = check_results(values, reference)
-
-    if not error:
-        # Remove temporary directory
-        shutil.rmtree("tmp")
-    return error
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv[1:]))
diff --git a/test/regression/elements.py b/test/regression/elements.py
index cc79d5d..c99ce30 100644
--- a/test/regression/elements.py
+++ b/test/regression/elements.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 interval_2D = "Cell('interval', geometric_dimension=2)"
 interval_3D = "Cell('interval', geometric_dimension=3)"
 triangle_3D = "Cell('triangle', geometric_dimension=3)"
@@ -6,7 +7,6 @@ elements = ["FiniteElement('N1curl', triangle, 2)",
             "MixedElement([FiniteElement('Lagrange', triangle, 3), \
                            VectorElement('Lagrange', triangle, 3)['facet']])",
             "VectorElement('R', triangle, 0, 3)",
-
             "VectorElement('DG', %s, 1)" % interval_2D,
             "VectorElement('DG', %s, 1)" % interval_3D,
             "VectorElement('DG', %s, 1)" % triangle_3D,
@@ -25,6 +25,8 @@ elements = ["FiniteElement('N1curl', triangle, 2)",
                            FiniteElement('DG', %s, 1)])" % (triangle_3D,
                                                             triangle_3D,
                                                             triangle_3D,
-                                                            triangle_3D)
-
+                                                            triangle_3D),
+            "FiniteElement('Regge', triangle, 2)",
+            "MixedElement([FiniteElement('HHJ', triangle, 2), \
+                           FiniteElement('CG', triangle, 3)])",
             ]
diff --git a/test/regression/ffc-reference-data-id b/test/regression/ffc-reference-data-id
index e3659f4..891310b 100644
--- a/test/regression/ffc-reference-data-id
+++ b/test/regression/ffc-reference-data-id
@@ -1 +1 @@
-9ded8c94288f3037595f1c44181caafd84ad1cdf
+830e8f7a0bf7daee4acd819ff854d8d608a69194
diff --git a/test/regression/recdiff.py b/test/regression/recdiff.py
old mode 100755
new mode 100644
index e9dd60f..545c4b2
--- a/test/regression/recdiff.py
+++ b/test/regression/recdiff.py
@@ -1,7 +1,8 @@
-#!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+
 class DiffMarkerType:
+
     def __init__(self, name):
         self.name = name
 
@@ -16,14 +17,15 @@ DiffEqual = DiffMarkerType("<equal>")
 
 _default_recdiff_tolerance = 1e-6
 
+
 def recdiff_dict(data1, data2, tolerance=_default_recdiff_tolerance):
     keys1 = set(data1.keys())
     keys2 = set(data2.keys())
     keys = keys1.intersection(keys2)
     diff = {}
-    for k in keys1-keys:
+    for k in keys1 - keys:
         diff[k] = (data1[k], DiffMissing)
-    for k in keys2-keys:
+    for k in keys2 - keys:
         diff[k] = (DiffMissing, data2[k])
     for k in keys:
         d1 = data1[k]
@@ -33,8 +35,9 @@ def recdiff_dict(data1, data2, tolerance=_default_recdiff_tolerance):
             diff[k] = d
     return diff or DiffEqual
 
+
 def recdiff(data1, data2, tolerance=_default_recdiff_tolerance):
-    if isinstance(data1, (float,int)) and isinstance(data2, (float,int)):
+    if isinstance(data1, (float, int)) and isinstance(data2, (float, int)):
         # This approach allows numbers formatted as ints and floats interchangably as long as the values are equal
         delta = abs(data1 - data2)
         avg = (abs(data1) + abs(data2)) / 2.0
@@ -48,53 +51,58 @@ def recdiff(data1, data2, tolerance=_default_recdiff_tolerance):
             same = delta < tolerance
 
         return DiffEqual if same else (data1, data2)
-    elif type(data1) != type(data2):
+    elif not isinstance(data1, type(data2)):
         return (data1, data2)
     elif isinstance(data1, dict):
         return recdiff_dict(data1, data2, tolerance)
     elif isinstance(data1, list):
-        diff = [recdiff(d1, d2, tolerance) for (d1,d2) in zip(data1, data2)]
+        diff = [recdiff(d1, d2, tolerance) for (d1, d2) in zip(data1, data2)]
         return DiffEqual if all(d is DiffEqual for d in diff) else diff
     else:
         return DiffEqual if data1 == data2 else (data1, data2)
 
+
 def _print(line):
     print(line)
 
+
 def print_recdiff(diff, indent=0, printer=_print, prekey=""):
 
     if isinstance(diff, dict):
         for k in sorted(diff.keys()):
-             key = str(k)
-             if prekey: key = ".".join((prekey, key))
-             printer("%s%s: " % ("  "*indent, key))
-             print_recdiff(diff[k], indent+1, printer, key)
+            key = str(k)
+            if prekey:
+                key = ".".join((prekey, key))
+            printer("%s%s: " % ("  " * indent, key))
+            print_recdiff(diff[k], indent + 1, printer, key)
 
     elif isinstance(diff, list):
         # Limiting this to lists of scalar values!
         for i, d in enumerate(diff):
             if isinstance(d, tuple):
                 data1, data2 = d
-                printer("%s%d: %s != %s" % ("  "*indent, i, data1, data2))
+                printer("%s%d: %s != %s" % ("  " * indent, i, data1, data2))
 
     elif isinstance(diff, tuple):
         assert len(diff) == 2
         data1, data2 = diff
         data1 = str(data1)
         data2 = str(data2)
-        if len(data1) + len(data2) + 2*indent + 4 > 70:
-            printer("%s%s" % ("  "*indent, data1))
-            printer("%s!=" % ("  "*indent))
-            printer("%s%s" % ("  "*indent, data2))
+        if len(data1) + len(data2) + 2 * indent + 4 > 70:
+            printer("%s%s" % ("  " * indent, data1))
+            printer("%s!=" % ("  " * indent))
+            printer("%s%s" % ("  " * indent, data2))
         else:
-            printer("%s%s != %s" % ("  "*indent, data1, data2))
+            printer("%s%s != %s" % ("  " * indent, data1, data2))
 
 
 # ---------- Unittest code
 import unittest
-#from recdiff import recdiff, print_recdiff, DiffEqual, DiffMissing
+# from recdiff import recdiff, print_recdiff, DiffEqual, DiffMissing
+
 
 class RecDiffTestCase(unittest.TestCase):
+
     def assertEqual(self, a, b):
         if not (a == b):
             print(a)
@@ -105,45 +113,45 @@ class RecDiffTestCase(unittest.TestCase):
         self.assertEqual(diff, DiffEqual)
 
     def test_recdiff_equal_items(self):
-        self.assertDiffEqual(recdiff(1,1))
-        self.assertDiffEqual(recdiff(0,0))
-        self.assertDiffEqual(recdiff(0,1e-15))
-        self.assertDiffEqual(recdiff(1.1,1.1+1e-7,tolerance=1e-6))
-        self.assertDiffEqual(recdiff(1.1,1.1-1e-7,tolerance=1e-6))
+        self.assertDiffEqual(recdiff(1, 1))
+        self.assertDiffEqual(recdiff(0, 0))
+        self.assertDiffEqual(recdiff(0, 1e-15))
+        self.assertDiffEqual(recdiff(1.1, 1.1 + 1e-7, tolerance=1e-6))
+        self.assertDiffEqual(recdiff(1.1, 1.1 - 1e-7, tolerance=1e-6))
         self.assertDiffEqual(recdiff("foo", "foo"))
 
     def test_recdiff_not_equal_items(self):
-        self.assertEqual(recdiff(1,2), (1,2))
-        self.assertEqual(recdiff(0,0.0001), (0,0.0001))
-        self.assertEqual(recdiff(0,1e-13), (0,1e-13))
-        self.assertEqual(recdiff(1.1,1.2+1e-7,tolerance=1e-6), (1.1,1.2+1e-7))
-        self.assertEqual(recdiff(1.1,1.2-1e-7,tolerance=1e-6), (1.1,1.2-1e-7))
+        self.assertEqual(recdiff(1, 2), (1, 2))
+        self.assertEqual(recdiff(0, 0.0001), (0, 0.0001))
+        self.assertEqual(recdiff(0, 1e-13), (0, 1e-13))
+        self.assertEqual(recdiff(1.1, 1.2 + 1e-7, tolerance=1e-6), (1.1, 1.2 + 1e-7))
+        self.assertEqual(recdiff(1.1, 1.2 - 1e-7, tolerance=1e-6), (1.1, 1.2 - 1e-7))
         self.assertEqual(recdiff("foo", "bar"), ("foo", "bar"))
 
     def test_recdiff_equal_list(self):
-        self.assertDiffEqual(recdiff([1,2], [1,2]))
+        self.assertDiffEqual(recdiff([1, 2], [1, 2]))
 
     def test_recdiff_not_equal_list(self):
-        self.assertEqual(recdiff([1,2], [1,3]), [DiffEqual, (2,3)])
+        self.assertEqual(recdiff([1, 2], [1, 3]), [DiffEqual, (2, 3)])
 
     def test_recdiff_equal_dict(self):
-        self.assertDiffEqual(recdiff({1:2}, {1:2}))
+        self.assertDiffEqual(recdiff({1: 2}, {1: 2}))
 
     def test_recdiff_not_equal_dict(self):
-        self.assertEqual(recdiff({1:2,2:3}, {1:3,3:4}), {1:(2,3), 2:(3,DiffMissing), 3:(DiffMissing,4)})
+        self.assertEqual(recdiff({1: 2, 2: 3}, {1: 3, 3: 4}), {1: (2, 3), 2: (3, DiffMissing), 3: (DiffMissing, 4)})
 
     def test_recdiff_equal_dict_hierarchy(self):
-        self.assertDiffEqual(recdiff({1:{2:{3:4,5:6}}}, {1:{2:{3:4,5:6}}}))
+        self.assertDiffEqual(recdiff({1: {2: {3: 4, 5: 6}}}, {1: {2: {3: 4, 5: 6}}}))
 
     def test_recdiff_not_equal_dict_hierarchy(self):
-        self.assertEqual(recdiff({1:{2:{3:4,5:6}}}, {1:{2:{3:4,5:7}}}), {1:{2:{5:(6,7)}}})
+        self.assertEqual(recdiff({1: {2: {3: 4, 5: 6}}}, {1: {2: {3: 4, 5: 7}}}), {1: {2: {5: (6, 7)}}})
 
     def test_example(self):
         form1 = {
             "num_coefficients": 2,
             "num_arguments": 2,
             "has_default_cell_integral": 1,
-            "cell_integrals": { 0: { "tabulate_tensor_input1": ["data"] } },
+            "cell_integrals": {0: {"tabulate_tensor_input1": ["data"]}},
         }
 
         form2 = eval("""{
@@ -154,14 +162,15 @@ class RecDiffTestCase(unittest.TestCase):
         }""")
 
         actual_diff = recdiff(form1, form2)
-        if 0: print_recdiff(actual_diff)
+        if 0:
+            print_recdiff(actual_diff)
 
         expected_diff = {
             #"num_coefficients": DiffEqual,
-            "num_arguments": (2,DiffMissing),
-            "rank": (DiffMissing,2),
-            "has_default_cell_integral": (1,0),
-            "cell_integrals": { 0: { "tabulate_tensor_input1": [("data", "data2")] } },
+            "num_arguments": (2, DiffMissing),
+            "rank": (DiffMissing, 2),
+            "has_default_cell_integral": (1, 0),
+            "cell_integrals": {0: {"tabulate_tensor_input1": [("data", "data2")]}},
         }
         self.assertEqual(actual_diff, expected_diff)
 
@@ -173,10 +182,11 @@ def main(a, b, tolerance=_default_recdiff_tolerance):
     d = recdiff(a, b, float(tolerance))
     print_recdiff(d)
 
+
 if __name__ == "__main__":
     import sys
     args = sys.argv[1:]
-    if not args: # Hack to be able to use this as a script, TODO: do something nicer
+    if not args:  # Hack to be able to use this as a script, TODO: do something nicer
         print("No arguments, running tests.")
         unittest.main()
     else:
diff --git a/test/regression/test.py b/test/regression/test.py
old mode 100755
new mode 100644
index dfc2f47..68d3182
--- a/test/regression/test.py
+++ b/test/regression/test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """This script compiles and verifies the output for all form files
 found in the 'demo' directory. The verification is performed in two
 steps. First, the generated code is compared with stored references.
@@ -28,7 +28,7 @@ option --bench.
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes, 2013-2015
+# Modified by Martin Sandve Alnæs, 2013-2016
 # Modified by Johannes Ring, 2013
 # Modified by Kristian B. Oelgaard, 2013
 # Modified by Garth N. Wells, 2014
@@ -36,12 +36,23 @@ option --bench.
 # FIXME: Need to add many more test cases. Quite a few DOLFIN forms
 # failed after the FFC tests passed.
 
-import os, sys, shutil, difflib, sysconfig
+import os
+import sys
+import shutil
+import difflib
+import sysconfig
+import subprocess
+import time
+import logging
 from numpy import array, shape, abs, max, isnan
+import ffc
 from ffc.log import begin, end, info, info_red, info_green, info_blue
+from ffc.log import ffc_logger, ERROR, WARNING
+from ufl.log import ufl_logger
+from ufl.utils.py23 import as_native_str
+from ffc import get_ufc_cxx_flags
+from ffc.backends.ufc import get_include_path as get_ufc_include
 from ufctest import generate_test_code
-from instant.output import get_status_output
-import time
 
 # Parameters TODO: Can make this a cmdline argument, and start
 # crashing programs in debugger automatically?
@@ -51,60 +62,88 @@ demo_directory = "../../../../demo"
 bench_directory = "../../../../bench"
 
 # Global log file
-logfile = None
+logfile = "error.log"
+
+# Remove old log file
+if os.path.isfile(logfile):
+    os.remove(logfile)
+
+class GEFilter(object):
+    """Filter messages that are greater or equal to given log level"""
+    def __init__(self, level):
+        self.__level = level
+
+    def filter(self, record):
+        return record.levelno >= self.__level
+
+class LTFilter(object):
+    """Filter messages that are less than given log level"""
+    def __init__(self, level):
+        self.__level = level
+
+    def filter(self, record):
+        return record.levelno < self.__level
+
+# Filter out error messages from std output
+splitlevel = ERROR
+ffc_logger.get_handler().addFilter(LTFilter(splitlevel))
+ufl_logger.get_handler().addFilter(LTFilter(splitlevel))
+
+# Filter out error messages to log file
+file_handler = logging.FileHandler(logfile)
+file_handler.addFilter(GEFilter(splitlevel))
+ffc_logger.get_logger().addHandler(file_handler)
+ufl_logger.get_logger().addHandler(file_handler)
 
 # Extended quadrature tests (optimisations)
 ext_quad = [
-"-r quadrature -O -feliminate_zeros",
-"-r quadrature -O -fsimplify_expressions",
-"-r quadrature -O -fprecompute_ip_const",
-"-r quadrature -O -fprecompute_basis_const",
-"-r quadrature -O -fprecompute_ip_const -feliminate_zeros",
-"-r quadrature -O -fprecompute_basis_const -feliminate_zeros",
+    "-r quadrature -O -feliminate_zeros",
+    "-r quadrature -O -fsimplify_expressions",
+    "-r quadrature -O -fprecompute_ip_const",
+    "-r quadrature -O -fprecompute_basis_const",
+    "-r quadrature -O -fprecompute_ip_const -feliminate_zeros",
+    "-r quadrature -O -fprecompute_basis_const -feliminate_zeros",
 ]
 
 # Extended uflacs tests (to be extended with optimisation parameters
 # later)
 ext_uflacs = [
-"-r uflacs",
+    "-r uflacs",
 ]
 
 known_uflacs_failures = set([
     "CustomIntegral.ufl",
     "CustomMixedIntegral.ufl",
     "CustomVectorIntegral.ufl",
-    ])
+])
 
 _command_timings = []
+
+
 def run_command(command):
     "Run command and collect errors in log file."
     global _command_timings
-    global logfile
 
     t1 = time.time()
-    (status, output) = get_status_output(command)
-    t2 = time.time()
-    _command_timings.append((command, t2-t1))
-
-    if status == 0:
+    try:
+        output = as_native_str(subprocess.check_output(command, shell=True))
+        t2 = time.time()
+        _command_timings.append((command, t2 - t1))
         verbose = False  # FIXME: Set from --verbose
         if verbose:
             print(output)
         return True
-    else:
-        if logfile is None:
-            logfile = open("../../error.log", "w")
-        logfile.write(output + "\n")
-        print(output)
+    except subprocess.CalledProcessError as e:
+        t2 = time.time()
+        _command_timings.append((command, t2 - t1))
+        log_error(e.output)
+        print(e.output)
         return False
 
 
 def log_error(message):
     "Log error message."
-    global logfile
-    if logfile is None:
-        logfile = open("../../error.log", "w")
-    logfile.write(message + "\n")
+    ffc_logger.get_logger().error(message)
 
 
 def clean_output(output_directory):
@@ -137,21 +176,23 @@ def generate_test_cases(bench, only_forms, skip_forms):
     info_green("Found %d form files" % len(form_files))
 
     # Generate form files for forms
-    info("Generating form files for extra forms: Not implemented")
+    #info("Generating form files for extra forms: Not implemented")
 
     # Generate form files for elements
-    if not bench:
+    if not (bench or only_forms):
         from elements import elements
         info("Generating form files for extra elements (%d elements)"
              % len(elements))
         for (i, element) in enumerate(elements):
-            open("X_Element%d.ufl" % i, "w").write("element = %s" % element)
+            with open("X_Element%d.ufl" % i, "w") as f:
+                f.write("element = %s" % element)
 
     end()
 
 
 def generate_code(args, only_forms, skip_forms):
     "Generate code for all test cases."
+    global _command_timings
 
     # Get a list of all files
     form_files = [f for f in os.listdir(".")
@@ -164,32 +205,44 @@ def generate_code(args, only_forms, skip_forms):
 
     # TODO: Parse additional options from .ufl file? I.e. grep for
     # some sort of tag like '#ffc: <flags>'.
-    special = { "AdaptivePoisson.ufl": "-e", }
+    special = {"AdaptivePoisson.ufl": "-e", }
 
     # Iterate over all files
     for f in form_files:
-        options = special.get(f, "")
+        options = [special.get(f, "")]
+        options.extend(args)
+        options.extend(["-f", "precision=8", "-fconvert_exceptions_to_warnings"])
+        options.append(f)
+        options = list(filter(None, options))
 
-        cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s"
-        % (options, " ".join(args), f))
+        cmd = sys.executable + " -m ffc " + " ".join(options)
 
         # Generate code
-        ok = run_command(cmd)
+        t1 = time.time()
+        try:
+            ok = ffc.main(options)
+        except Exception as e:
+            log_error(e)
+            ok = 1
+            raise
+        finally:
+            t2 = time.time()
+            _command_timings.append((cmd, t2 - t1))
 
         # Check status
-        if ok:
+        if ok == 0:
             info_green("%s OK" % f)
         else:
             info_red("%s failed" % f)
 
     end()
 
+
 def validate_code(reference_dir):
     "Validate generated code against references."
 
     # Get a list of all files
-    header_files = [f for f in os.listdir(".") if f.endswith(".h")]
-    header_files.sort()
+    header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")])
 
     begin("Validating generated code (%d header files found)"
           % len(header_files))
@@ -213,14 +266,18 @@ def validate_code(reference_dir):
             info_green("%s OK" % f)
         else:
             info_red("%s differs" % f)
-            diff = "\n".join([line for line in difflib.unified_diff(reference_code.split("\n"), generated_code.split("\n"))])
+            difflines = difflib.unified_diff(
+                reference_code.split("\n"),
+                generated_code.split("\n"))
+            diff = "\n".join(difflines)
             s = ("Code differs for %s, diff follows (reference first, generated second)"
                  % os.path.join(*reference_file.split(os.path.sep)[-3:]))
-            log_error("\n" + s + "\n" + len(s)*"-")
+            log_error("\n" + s + "\n" + len(s) * "-")
             log_error(diff)
 
     end()
 
+
 def find_boost_cflags():
     # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py)
     # Set a default directory for the boost installation
@@ -266,18 +323,18 @@ set the environment variable BOOST_DIR.
 Forms using bessel functions will fail to build.
 """)
     return boost_cflags, boost_linkflags
-    
+
+
 def build_programs(bench, permissive):
     "Build test programs for all test cases."
 
     # Get a list of all files
-    header_files = [f for f in os.listdir(".") if f.endswith(".h")]
-    header_files.sort()
+    header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")])
 
     begin("Building test programs (%d header files found)" % len(header_files))
 
     # Get UFC flags
-    ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip()
+    ufc_cflags = "-I" + get_ufc_include() + " " + " ".join(get_ufc_cxx_flags())
     boost_cflags, boost_linkflags = find_boost_cflags()
     ufc_cflags += boost_cflags
     linker_options = boost_linkflags
@@ -289,9 +346,9 @@ def build_programs(bench, permissive):
     if bench:
         info("Benchmarking activated")
         # Takes too long to build with -O2
-        #compiler_options += " -O2"
+        # compiler_options += " -O2"
         compiler_options += " -O3"
-        #compiler_options += " -O3 -fno-math-errno -march=native"
+        # compiler_options += " -O3 -fno-math-errno -march=native"
     if debug:
         info("Debugging activated")
         compiler_options += " -g -O0"
@@ -325,8 +382,7 @@ def run_programs(bench):
     bench = 'b' if bench else ''
 
     # Get a list of all files
-    test_programs = [f for f in os.listdir(".") if f.endswith(".bin")]
-    test_programs.sort()
+    test_programs = sorted([f for f in os.listdir(".") if f.endswith(".bin")])
 
     begin("Running generated programs (%d programs found)" % len(test_programs))
 
@@ -407,21 +463,21 @@ def main(args):
     "Run all regression tests."
 
     # Check command-line arguments TODO: Use argparse
-    use_auto       = "--skip-auto" not in args
-    use_uflacs     = "--skip-uflacs" not in args
-    use_quad       = "--skip-quad" not in args
-    use_ext_quad   = "--ext-quad" in args
+    use_auto = "--skip-auto" not in args
+    use_uflacs = "--skip-uflacs" not in args
+    use_quad = "--skip-quad" not in args
+    use_ext_quad = "--ext-quad" in args
 
-    skip_download  = "--skip-download" in args
-    skip_run       = "--skip-run" in args
+    skip_download = "--skip-download" in args
+    skip_run = "--skip-run" in args
     skip_code_diff = "--skip-code-diff" in args
-    skip_validate  = "--skip-validate" in args
-    bench          = "--bench" in args
+    skip_validate = "--skip-validate" in args
+    bench = "--bench" in args
 
-    permissive     = "--permissive" in args
-    tolerant       = "--tolerant" in args
-    print_timing   = "--print-timing" in args
-    show_help      = "--help" in args
+    permissive = "--permissive" in args
+    tolerant = "--tolerant" in args
+    print_timing = "--print-timing" in args
+    show_help = "--help" in args
 
     flags = (
         "--skip-auto",
@@ -437,13 +493,13 @@ def main(args):
         "--tolerant",
         "--print-timing",
         "--help",
-        )
-    args = [arg for arg in args if not arg in flags]
+    )
+    args = [arg for arg in args if arg not in flags]
 
     if show_help:
         info("Valid arguments:\n" + "\n".join(flags))
         return 0
-    
+
     if bench or not skip_validate:
         skip_run = False
     if bench:
@@ -458,12 +514,14 @@ def main(args):
     if skip_download:
         info_blue("Skipping reference data download")
     else:
-        failure, output = get_status_output("./scripts/download")
-        print(output)
-        if failure:
-            info_red("Download reference data failed")
-        else:
+        try:
+            cmd = "./scripts/download"
+            output = as_native_str(subprocess.check_output(cmd, shell=True))
+            print(output)
             info_green("Download reference data ok")
+        except subprocess.CalledProcessError as e:
+            print(e.output)
+            info_red("Download reference data failed")
 
     if tolerant:
         global output_tolerance
@@ -485,8 +543,6 @@ def main(args):
     if use_ext_quad:
         test_cases += ext_quad
 
-    _permissive = permissive
-
     test_case_timings = {}
 
     for argument in test_cases:
@@ -506,17 +562,11 @@ def main(args):
         else:
             skip_forms = set()
 
-        # uflacs needs permissive, a few variables are generated but not used
-        if "uflacs" in argument:
-            permissive = True
-        else:
-            permissive = _permissive
-
         # Generate test cases
         generate_test_cases(bench, only_forms, skip_forms)
 
         # Generate code
-        generate_code(args + [argument], only_forms, skip_forms)
+        generate_code(args + argument.split(), only_forms, skip_forms)
 
         # Location of reference directories
         reference_directory = os.path.abspath("../../ffc-reference-data/")
@@ -553,22 +603,25 @@ def main(args):
         end()
         test_case_timings[argument] = time.time() - test_case_timings[argument]
 
+    # Go back up
+    os.chdir(os.path.pardir)
+
     # Print results
     if print_timing:
         info_green("Timing of all commands executed:")
         timings = '\n'.join("%10.2e s  %s" % (t, name) for (name, t)
                             in _command_timings)
-        info(timings)
+        info_blue(timings)
 
     for argument in test_cases:
-        info("Total time for %s: %d s" % (argument, test_case_timings[argument]))
+        info_blue("Total time for %s: %.1f s" % (argument, test_case_timings[argument]))
 
-    if logfile is None:
+    if not os.path.isfile(logfile) or os.stat(logfile).st_size == 0:
         info_green("Regression tests OK")
         return 0
     else:
         info_red("Regression tests failed")
-        info("Error messages stored in error.log")
+        info_red("Error messages stored in %s" % logfile)
         return 1
 
 
diff --git a/test/regression/ufctest.py b/test/regression/ufctest.py
index 1ef4427..34dee90 100644
--- a/test/regression/ufctest.py
+++ b/test/regression/ufctest.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 # Copyright (C) 2010-2013 Anders Logg, Kristian B. Oelgaard and Marie E. Rognes
 #
 # This file is part of FFC.
@@ -15,7 +16,7 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
-# Modified by Martin Alnaes, 2013-2015
+# Modified by Martin Sandve Alnæs, 2013-2016
 
 _test_code = """\
 #include "../../ufctest.h"
@@ -37,6 +38,7 @@ int main(int argc, char * argv[])
 }}
 """
 
+
 def generate_test_code(header_file):
     "Generate test code for given header file."
 
diff --git a/test/test.py b/test/test.py
index 775e905..726b9c2 100644
--- a/test/test.py
+++ b/test/test.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """Run all tests, including unit tests and regression tests"""
 
 # Copyright (C) 2007 Anders Logg
@@ -36,8 +37,8 @@ for test in tests:
     print("Running tests: %s" % test)
     print("----------------------------------------------------------------------")
     os.chdir(os.path.join(pwd, test))
-    #failure = os.system("python test.py | tee -a %s" % logfile)
-    failure = os.system("python test.py")
+    #failure = os.system(sys.executable + " test.py | tee -a %s" % logfile)
+    failure = os.system(sys.executable + " test.py")
     if failure:
         print("Test FAILED")
         failed.append(test)
diff --git a/test/uflacs/README b/test/uflacs/README
deleted file mode 100644
index 4835e91..0000000
--- a/test/uflacs/README
+++ /dev/null
@@ -1,21 +0,0 @@
-Test structure:
-
-  unit/          - unit tests of internal components of uflacs
-  crosslanguage/ - unit tests which produce C++ tests of generated code which is then executed by gtest
-  system/        - tests that use external software with uflacs, in particular integration with dolfin
-
-Build gtest:
-
-  cd ../libs/gtest-1.7.0/   # This is a copy of the Google Test Framework.
-  mkdir lib
-  cd lib
-  cmake ..
-  make
-
-Running examples:
-
-  cd test/
-  py.test
-  py.test unit
-  py.test system
-  py.test crosslanguage
diff --git a/test/uflacs/README.md b/test/uflacs/README.md
new file mode 100644
index 0000000..1a8b062
--- /dev/null
+++ b/test/uflacs/README.md
@@ -0,0 +1,24 @@
+# Test structure for UFLACS
+
+- unit/
+
+    unit tests of internal components of UFLACS
+
+- crosslanguage/
+
+    unit tests which produce C++ tests of generated code which is then
+    executed by Google Test
+
+- system/
+
+    tests that use external software with uflacs, in particular
+    integration with DOLFIN
+
+
+## Running examples
+
+    cd test/
+    py.test
+    py.test unit
+    py.test system
+    py.test crosslanguage
diff --git a/test/uflacs/crosslanguage/Makefile b/test/uflacs/crosslanguage/Makefile
index fd89d6d..a5d7830 100644
--- a/test/uflacs/crosslanguage/Makefile
+++ b/test/uflacs/crosslanguage/Makefile
@@ -1,12 +1,12 @@
 
 #
-# Build C++ tests generated by python tests, configured to get access to gtest and ufc.
+# Build C++ tests generated by python tests, configured to get access
+# to gtest and ufc.
 #
 
 # These are the commands we assume
-CXX=g++ # TODO: Allow overriding
-RM=rm
-PYTEST=py.test
+CXX?=c++
+RM?=rm
 
 # Local filenames
 TESTBINARY=run_gtest
@@ -15,17 +15,16 @@ TESTSUPPORTDIR=cppsupport
 TESTMAINSRC=${TESTSRCDIR}/main.cpp
 
 # Configure ufc
-UFCINCDIR=`pkg-config ufc-1 --cflags`
-#UFCINCDIR=-I${UFC_INCLUDE_DIR}
+UFC_INCLUDE_DIR?=../../../ffc/backends/ufc
+UFCINCDIR=-I${UFC_INCLUDE_DIR}
 
 # Configure gtest
 GTEST_DIR?=../../../libs/gtest-1.7.0
 GTESTINCDIR=-I${GTEST_DIR}/include
 GTESTLIBS=-L${GTEST_DIR}/lib -lgtest -lpthread
 
-# Flags set for easy debugging # TODO: Allow overriding
-CXXFLAGS=-g -O0
-
+# Flags set for easy debugging
+CXXFLAGS?=-g -O0 -std=c++11 -Wall
 
 .PHONY: clean
 
@@ -36,18 +35,14 @@ default: ${TESTBINARY}
 run: ${TESTBINARY}
 	./${TESTBINARY}
 
-# Also depends on ${TESTSRCDIR}/*.h but this rule seems to be sufficient
-# because the test binary is touched every time the python tests run
+# Also depends on ${TESTSRCDIR}/*.h but this rule seems to be
+# sufficient because the test binary is touched every time the python
+# tests run
 ${TESTBINARY}: ${TESTMAINSRC} ${TESTSUPPORTDIR}/*.h
 	${CXX} ${CXXFLAGS} -o ${TESTBINARY} \
 		${TESTMAINSRC} -I${TESTSRCDIR} -I${TESTSUPPORTDIR} \
 		${UFCINCDIR} ${GTESTINCDIR} ${GTESTLIBS}
 
-# Enable this to run py.test to regenerate main.cpp each time python tests are changed.
-# Currently disabled because I think py.test should call make and not the other way around.
-#${TESTMAINSRC}: test_*.py
-#	py.test
-
 clean:
 	${RM} -f `find -name \*.pyc`
 	${RM} -f `find -name \*.py~`
diff --git a/test/uflacs/crosslanguage/conftest.py b/test/uflacs/crosslanguage/conftest.py
index b6fc5b5..bcc6f95 100644
--- a/test/uflacs/crosslanguage/conftest.py
+++ b/test/uflacs/crosslanguage/conftest.py
@@ -1,13 +1,17 @@
+# -*- coding: utf-8 -*-
+"""This file contains code for setting up C++ unit tests with gtest
+from within Python tests using py.test.
+
 """
-This file contains code for setting up C++ unit tests with gtest from within Python tests using py.test.
-"""
+from __future__ import print_function
 
 import pytest
 import os
 import inspect
 from collections import defaultdict
+import subprocess
 
-from instant.output import get_status_output
+from ffc.backends.ufc import get_include_path
 
 
 # TODO: For a generic framework, this needs to change somewhat:
@@ -18,7 +22,6 @@ _supportcode = '''
 //#include "debugging.h"
 '''
 
-
 _gtest_runner_template = """
 #include <gtest/gtest.h>
 
@@ -45,6 +48,7 @@ TEST ({suite}, {case})
 }}
 """
 
+
 def find_parent_test_function():
     """Return (filename, lineno, functionname) of the
     first function named "test_*" found on the stack."""
@@ -62,19 +66,18 @@ def find_parent_test_function():
     filename = info[0]
     lineno = info[1]
     function = info[2]
-    #context = info[3]
-    #contextindex = info[4]
 
     assert len(info) == 5
     assert function.startswith("test_")
 
     return filename, lineno, function
 
+
 class GTestContext:
     _all = []
 
     def __init__(self, config):
-        self._basedir = os.path.split(__file__)[0]
+        self._basedir = os.path.dirname(__file__)
         self._gendir = os.path.join(self._basedir, "generated")
         self._binary_filename = os.path.join(self._basedir, "run_gtest")
         self._gtest_log = os.path.join(self._basedir, "gtest.log")
@@ -82,6 +85,7 @@ class GTestContext:
         self._dirlist = []
         GTestContext._all.append(self)
 
+
     def info(self, msg):
         pre = "In gtest generation:"
         if '\n' in msg:
@@ -90,18 +94,13 @@ class GTestContext:
         else:
             print(pre, msg)
 
-    def pushdir(self):
-        self._dirlist.append(os.path.abspath(os.curdir))
-        os.chdir(self._basedir)
-
-    def popdir(self):
-        os.chdir(self._dirlist.pop())
 
     def add(self, body):
         # Look through stack to find frame with test_... function name
         filename, lineno, function = find_parent_test_function()
 
-        # Using function testcase class name as suite and function name as case
+        # Using function testcase class name as suite and function
+        # name as case
         basename = os.path.basename(filename)
         suite = basename.replace(".py", "")
         case = function
@@ -121,6 +120,7 @@ class GTestContext:
                                       body=body)
         self._code[hfilename].append(code)
 
+
     def write(self):
         # Make sure we have the directory for generated code
         if not os.path.isdir(self._gendir):
@@ -141,43 +141,98 @@ class GTestContext:
         testincludes = '\n'.join('#include "{0}"'.format(h) for h in self._test_header_names)
 
         # Write test runner code to file
-        runner_code = _gtest_runner_template.format(supportcode=_supportcode, testincludes=testincludes)
+        runner_code = _gtest_runner_template.format(supportcode=_supportcode,
+                                                    testincludes=testincludes)
 
         self._main_filename = os.path.join(self._gendir, "main.cpp")
         with open(self._main_filename, "w") as f:
             f.write(runner_code)
 
+
+    def build_gtest(self):
+        "Build gtest library"
+
+        # Source and build directories
+        gtest_dir = "../../../libs/gtest-1.7.0"
+        gtest_dir = os.path.abspath(os.path.join(self._basedir, gtest_dir))
+        build_dir = os.path.join(gtest_dir, "lib")
+
+        # Check if GTest source can be found
+        if os.path.isdir(gtest_dir):
+            # Make build directory, if required
+            if not os.path.isdir(build_dir):
+                os.mkdir(build_dir)
+
+            # Configure gtest using cmake
+            error = subprocess.call("cmake ..", cwd=build_dir, shell=True)
+            if error:
+                raise RuntimeError("Could not call CMake successfully to build gtest")
+
+            # Build gtest library
+            err = subprocess.call("make", cwd=build_dir, shell=True)
+            if error:
+                raise RuntimeError("Could not call make successfully to build gtest")
+
+        else:
+            raise RuntimeError("Cannot find gtest source")
+
+
     def build(self):
-        s, o = get_status_output("make")
-        if s:
+        # Prepare command
+        UFC_INCLUDE_DIR = get_include_path()
+        if not os.path.exists(os.path.join(UFC_INCLUDE_DIR, "ufc.h")):
+            import IPython; IPython.embed()
+            raise RuntimeError("Cannot find ufc.h in provided include path: %s" % (UFC_INCLUDE_DIR,))
+        UFC_CXX_FLAGS = " ".join(["-g", "-O0", "-std=c++11", "-Wall"])
+        cmd = ['make', 'UFC_INCLUDE_DIR="%s"' % UFC_INCLUDE_DIR, 'CXXFLAGS="%s"' % UFC_CXX_FLAGS]
+        self.info("Running command: " + " ".join(cmd))
+
+        # Execute
+        try:
+            out = subprocess.check_output(cmd,
+                                          cwd=self._basedir, shell=True,
+                                          universal_newlines=True)
+            self.info(out)
+            self.info("Building ok.")
+        except subprocess.CalledProcessError as e:
             self.info("Building '{0}' FAILED (code {1}, headers: {2})".format(self._binary_filename,
-                                                                              s, self._test_header_names))
+                                                                              e.returncode, self._test_header_names))
             self.info("Build output:")
-            self.info(o)
-        else:
-            self.info("Building ok.")
+            self.info(e.output)
+            pytest.fail()
+
 
     def run(self):
-        s, o = get_status_output(self._binary_filename)
-        if s:
-            self.info("Gtest running FAILED with code {0}!".format(s))
-        else:
+        try:
+            out = subprocess.check_output(self._binary_filename,
+                                          cwd=self._basedir, shell=True,
+                                          universal_newlines=True)
             self.info("Gtest running ok!")
-        with open(self._gtest_log, "w") as f:
-            f.write(o)
-        self.info(o)
+            with open(self._gtest_log, "w") as f:
+                f.write(out)
+            self.info(out)
+        except subprocess.CalledProcessError as e:
+            self.info("Gtest running FAILED with code {0}!".format(e.returncode))
+            with open(self._gtest_log, "w") as f:
+                f.write(e.output)
+            self.info(e.output)
+            pytest.fail()
+
 
     def finalize(self):
-        # Write generated test code to files, build and run, all from within a stable basedir
-        self.pushdir()
-        try:
-            self.write()
-            self.build()
-            self.run()
-        finally:
-            self.popdir()
+        # Build gtest library itself
+        self.build_gtest()
+
+        # Write collected test code to files
+        self.write()
+
+        # Build test code
+        self.build()
+
+        # Run compiled tests
+        self.run()
+
 
-#@pytest.fixture("module")
 @pytest.fixture("session")
 def gtest():
     "create initial files for gtest generation"
@@ -185,6 +240,7 @@ def gtest():
     gtc = GTestContext(config)
     return gtc
 
+
 def gtest_sessionfinish(session):
     session.trace("finalizing gtest contexts")
     while GTestContext._all:
@@ -192,5 +248,6 @@ def gtest_sessionfinish(session):
         gtc.finalize()
     session.trace("done finalizing gtest contexts")
 
+
 def pytest_sessionfinish(session):
     gtest_sessionfinish(session)
diff --git a/test/uflacs/crosslanguage/cppsupport/mock_cells.h b/test/uflacs/crosslanguage/cppsupport/mock_cells.h
index aec8ab9..80f398d 100644
--- a/test/uflacs/crosslanguage/cppsupport/mock_cells.h
+++ b/test/uflacs/crosslanguage/cppsupport/mock_cells.h
@@ -12,7 +12,8 @@
 //{
     struct mock_cell
     {
-        // These coordinates are all that generated code should care about, the rest is to support the tests
+        // These coordinates are all that generated code should care
+        // about, the rest is to support the tests
         double coordinate_dofs[8*3]; // Worst case hexahedron: 8 vertices in 3d
 
         // Dimensions needed for generic cell transformations
@@ -27,13 +28,17 @@
         }
 
         // Utility initialization function
-        void init_dimensions(size_t geometric_dimension, size_t topological_dimension, size_t num_vertices)
+        void init_dimensions(size_t geometric_dimension,
+                             size_t topological_dimension, size_t num_vertices)
         {
             this->geometric_dimension = geometric_dimension;
             this->topological_dimension = topological_dimension;
             this->num_vertices = num_vertices;
-            for (int i=0; i<sizeof(coordinate_dofs)/sizeof(coordinate_dofs[0]); ++i)
-                coordinate_dofs[i] = 0.0;
+            int n = int(sizeof(coordinate_dofs) / sizeof(coordinate_dofs[0]));
+            for (int i=0; i<n; ++i)
+            {
+              coordinate_dofs[i] = 0.0;
+            }
         }
 
         void fill_reference_interval(size_t geometric_dimension)
@@ -144,21 +149,19 @@
             for (size_t i=0; i<num_vertices; ++i)
             {
                 for (size_t j=0; j<geometric_dimension; ++j)
-                {
                     coordinate_dofs[i*geometric_dimension + j] *= factor;
-                }
             }
         }
 
-        // Scale cell coordinates differently in each geometric dimension
+        // Scale cell coordinates differently in each geometric
+        // dimension
         void scale(const double * factors)
         {
             for (size_t i=0; i<num_vertices; ++i)
             {
                 for (size_t j=0; j<geometric_dimension; ++j)
-                {
                     coordinate_dofs[i*geometric_dimension + j] *= factors[j];
-                }
+
             }
         }
 
@@ -169,26 +172,27 @@
             double t[3] = { x, 0.0, 0.0 };
             translate(t);
         }
+
         void translate(double x, double y)
         {
             assert(geometric_dimension == 2);
             double t[3] = { x, y, 0.0 };
             translate(t);
         }
+
         void translate(double x, double y, double z)
         {
             assert(geometric_dimension == 3);
             double t[3] = { x, y, z };
             translate(t);
         }
+
         void translate(const double * x)
         {
             for (size_t i=0; i<num_vertices; ++i)
             {
                 for (size_t j=0; j<geometric_dimension; ++j)
-                {
                     coordinate_dofs[i*geometric_dimension + j] += x[j];
-                }
             }
         }
 
@@ -207,9 +211,7 @@
                     }
                 }
                 for (size_t j=0; j<geometric_dimension; ++j)
-                {
                     coordinate_dofs[i*geometric_dimension + j] = result[j];
-                }
             }
         }
     };
diff --git a/test/uflacs/crosslanguage/test_element_combinations.py b/test/uflacs/crosslanguage/test_element_combinations.py
index 09e83bc..f0b7356 100755
--- a/test/uflacs/crosslanguage/test_element_combinations.py
+++ b/test/uflacs/crosslanguage/test_element_combinations.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 class MockBasicElementCodeGenerator:
     pass
diff --git a/test/uflacs/crosslanguage/test_gtest_framework.py b/test/uflacs/crosslanguage/test_gtest_framework.py
index 50afe07..ad5881c 100755
--- a/test/uflacs/crosslanguage/test_gtest_framework.py
+++ b/test/uflacs/crosslanguage/test_gtest_framework.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 def test_example_showing_how_to_test_generated_code_with_gtest(gtest):
     "This is an example test explaining the py.test/gtest integration framework."
diff --git a/test/uflacs/crosslanguage/test_mock_cells.py b/test/uflacs/crosslanguage/test_mock_cells.py
index a67b984..ec3b49f 100755
--- a/test/uflacs/crosslanguage/test_mock_cells.py
+++ b/test/uflacs/crosslanguage/test_mock_cells.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 """
 These tests check that the mock implementations
diff --git a/test/uflacs/crosslanguage/test_ufc_integral_types.py b/test/uflacs/crosslanguage/test_ufc_integral_types.py
index 09d02b5..7c556e8 100755
--- a/test/uflacs/crosslanguage/test_ufc_integral_types.py
+++ b/test/uflacs/crosslanguage/test_ufc_integral_types.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 """
 Tests generating code for the different ufc integral types,
@@ -7,7 +8,7 @@ reflected in the code.
 """
 
 from ufl import *
-#from uflacs.backends.? import ?
+#from ffc.uflacs.backends.? import ?
 
 def test_cell_integral_body(gtest):
     """
diff --git a/test/uflacs/crosslanguage/xtest_tabulate_tensor_body.py b/test/uflacs/crosslanguage/xtest_tabulate_tensor_body.py
index 79c7a16..4116c72 100755
--- a/test/uflacs/crosslanguage/xtest_tabulate_tensor_body.py
+++ b/test/uflacs/crosslanguage/xtest_tabulate_tensor_body.py
@@ -1,11 +1,12 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 from six.moves import xrange as range
 import ufl
 from ufl import *
 #from ufl import product
 
-from uflacs.backends.toy.toy_compiler import compile_expression
+from ffc.uflacs.backends.toy.toy_compiler import compile_expression
 
 def compile_expression0(expr):
     code = ""
@@ -222,7 +223,7 @@ def test_tabulate_tensor_interval_facet(gtest):
 
     gtest.add(pre + code + post)
 
-def test_tabulate_tensor_interval_facet(gtest):
+def test_tabulate_tensor_interval_interior_facet(gtest):
     """Test code generation of body of the ufc function:
 
     void tabulate_tensor(
diff --git a/test/uflacs/crosslanguage/xtest_ufl_expression_compilation.py b/test/uflacs/crosslanguage/xtest_ufl_expression_compilation.py
index 783d9dd..f87a13b 100755
--- a/test/uflacs/crosslanguage/xtest_ufl_expression_compilation.py
+++ b/test/uflacs/crosslanguage/xtest_ufl_expression_compilation.py
@@ -1,9 +1,10 @@
 #!/usr/bin/env py.test
+# -*- coding: utf-8 -*-
 
 import ufl
 from ufl import product
 
-from uflacs.backends.toy.toy_compiler import compile_expression
+from ffc.uflacs.backends.toy.toy_compiler import compile_expression
 
 """
 Unit tests of generated geometry snippet code.
diff --git a/test/uflacs/system/xtest_dolfin_expression_compilation.py b/test/uflacs/system/xtest_dolfin_expression_compilation.py
index 550f741..3453a03 100644
--- a/test/uflacs/system/xtest_dolfin_expression_compilation.py
+++ b/test/uflacs/system/xtest_dolfin_expression_compilation.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of dolfin Expression formatting.
 """
@@ -12,12 +12,9 @@ import numpy
 
 import hashlib
 
-#import uflacs, uflacs.codeutils
-#from uflacs.codeutils.expr_formatter import ExprFormatter
-from uflacs.params import default_parameters
-
-from uflacs.backends.dolfin.expression import format_dolfin_expression
-from uflacs.backends.dolfin.dolfin_compiler import compile_dolfin_expression_body
+from ffc.uflacs.params import default_parameters
+from ffc.uflacs.backends.dolfin.expression import format_dolfin_expression
+from ffc.uflacs.backends.dolfin.dolfin_compiler import compile_dolfin_expression_body
 
 
 # FIXME: Make tests with dolfin optional
@@ -256,7 +253,7 @@ def check_dolfin_expression_compilation(uexpr, expected_lines, expected_values,
     dexpr = dolfin.Expression(cppcode=code)
 
     # Connect compiled dolfin::Expression object with dolfin::Function instances
-    for name, value in iteritems(members):
+    for name, value in members.items():
         setattr(dexpr, name, value)
 
     # Evaluate and assert compiled value!
diff --git a/test/uflacs/unit/test_cnodes.py b/test/uflacs/unit/test_cnodes.py
index 689d6d6..33943a9 100644
--- a/test/uflacs/unit/test_cnodes.py
+++ b/test/uflacs/unit/test_cnodes.py
@@ -1,11 +1,11 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of CNode formatting.
 """
 
 from __future__ import print_function
 
-from uflacs.language.cnodes import *
+from ffc.uflacs.language.cnodes import *
 
 
 def test_cnode_expression_precedence():
@@ -24,6 +24,7 @@ def test_cnode_expression_precedence():
 
     assert str(Mul(Add(1, 2), Add(3, 4))) == "(1 + 2) * (3 + 4)"
 
+
 def test_cnode_expressions():
     A = Symbol("A")
     B = Symbol("B")
@@ -32,28 +33,28 @@ def test_cnode_expressions():
     assert str(LiteralInt(123)) == "123"
     assert str(LiteralFloat(0.0)) == "0.0"
     assert str(LiteralFloat(1.0)) == "1.0"
-    assert str(LiteralFloat(12.3)) == "12.3" #1.23e+01"
+    assert str(LiteralFloat(12.3)) == "12.3"  # 1.23e+01"
 
     # Variables
     # TODO: VariableAccess
 
     # Arrays
     assert str(ArrayAccess("A", (1,))) == "A[1]"
-    assert str(ArrayAccess(A, (1,2))) == "A[1][2]"
-    assert str(A[1,2,3]) == "A[1][2][3]"
+    assert str(ArrayAccess(A, (1, 2))) == "A[1][2]"
+    assert str(A[1, 2, 3]) == "A[1][2][3]"
     assert str(ArrayAccess(ArrayDecl("double", "A", (2,)), 1)) == "A[1]"
-    assert str(ArrayDecl("double", A, (2,3))[1,2]) == "A[1][2]"
+    assert str(ArrayDecl("double", A, (2, 3))[1, 2]) == "A[1][2]"
 
     # FlattenedArray
     n = Symbol("n")
     decl = ArrayDecl("double", A, (4,))
     assert str(FlattenedArray(decl, strides=(2,), offset=3)[0]) == "A[3 + 2 * 0]"
     assert str(FlattenedArray(decl, strides=(2,))[0]) == "A[2 * 0]"
-    decl = ArrayDecl("double", A, (2,3,4))
-    flattened = FlattenedArray(decl, strides=(7,8*n,n-1))
-    assert str(flattened[0,n,n*7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
-    assert str(flattened[0,n][n*7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
-    assert str(flattened[0][n][n*7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    decl = ArrayDecl("double", A, (2, 3, 4))
+    flattened = FlattenedArray(decl, strides=(7, 8 * n, n - 1))
+    assert str(flattened[0, n, n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    assert str(flattened[0, n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    assert str(flattened[0][n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
 
     # Unary operators
     assert str(Pos(1)) == "+1"
@@ -96,13 +97,14 @@ def test_cnode_expressions():
     assert str(Conditional(1, 2, 3)) == "1 ? 2 : 3"
 
     # N-ary "operators" simplify code generation
-    assert str(Sum([1,2,3,4])) == "1 + 2 + 3 + 4"
-    assert str(Product([1,2,3,4])) == "1 * 2 * 3 * 4"
-    assert str(Product([Sum([1,2,3]), Sub(4,5)])) == "(1 + 2 + 3) * (4 - 5)"
+    assert str(Sum([1, 2, 3, 4])) == "1 + 2 + 3 + 4"
+    assert str(Product([1, 2, 3, 4])) == "1 * 2 * 3 * 4"
+    assert str(Product([Sum([1, 2, 3]), Sub(4, 5)])) == "(1 + 2 + 3) * (4 - 5)"
 
     # Custom expression
     assert str(Mul(VerbatimExpr("1 + std::foo(3)"), Add(5, 6))) == "(1 + std::foo(3)) * (5 + 6)"
 
+
 def test_cnode_assignments():
     x = Symbol("x")
     y = Symbol("y")
@@ -120,45 +122,51 @@ def test_cnode_assignments():
     assert str(AssignBitXor(x, y)) == "x ^= y"
     assert str(AssignBitOr(x, y)) == "x |= y"
 
+
 def test_cnode_variable_declarations():
     assert str(VariableDecl("foo", "x")) == "foo x;"
     assert str(VariableDecl("int", "n", 1)) == "int n = 1;"
     assert str(VariableDecl("double", "x", Mul("y", 3.0))) == "double x = y * 3.0;"
 
+
 def test_1d_initializer_list():
-    fmt = lambda v,s: format_indented_lines(build_initializer_lists(v, s, 0, str))
+    fmt = lambda v, s: format_indented_lines(build_initializer_lists(v, s, 0, str))
     assert fmt([], (0,)) == "{  }"
     assert fmt([1], (1,)) == "{ 1 }"
-    assert fmt([1,2], (2,)) == "{ 1, 2 }"
-    assert fmt([1,2,3], (3,)) == "{ 1, 2, 3 }"
+    assert fmt([1, 2], (2,)) == "{ 1, 2 }"
+    assert fmt([1, 2, 3], (3,)) == "{ 1, 2, 3 }"
+
 
 def test_nd_initializer_list_oneitem():
-    fmt = lambda v,s: format_indented_lines(build_initializer_lists(v, s, 0, str))
+    fmt = lambda v, s: format_indented_lines(build_initializer_lists(v, s, 0, str))
     assert fmt([1], (1,)) == "{ 1 }"
-    assert fmt([[1]], (1,1)) == "{ { 1 } }"
-    assert fmt([[[1]]], (1,1,1)) == "{ { { 1 } } }"
-    assert fmt([[[[1]]]], (1,1,1,1)) == "{ { { { 1 } } } }"
+    assert fmt([[1]], (1, 1)) == "{ { 1 } }"
+    assert fmt([[[1]]], (1, 1, 1)) == "{ { { 1 } } }"
+    assert fmt([[[[1]]]], (1, 1, 1, 1)) == "{ { { { 1 } } } }"
+
 
 def test_nd_initializer_list_twoitems():
-    fmt = lambda v,s: format_indented_lines(build_initializer_lists(v, s, 0, str))
-    assert fmt([1,2], (2,)) == "{ 1, 2 }"
-    assert fmt([[1,2]], (1,2)) == "{ { 1, 2 } }"
-    assert fmt([[[1,2]]], (1,1,2)) == "{ { { 1, 2 } } }"
-    assert fmt([[[[1,2]]]], (1,1,1,2)) == "{ { { { 1, 2 } } } }"
+    fmt = lambda v, s: format_indented_lines(build_initializer_lists(v, s, 0, str))
+    assert fmt([1, 2], (2,)) == "{ 1, 2 }"
+    assert fmt([[1, 2]], (1, 2)) == "{ { 1, 2 } }"
+    assert fmt([[[1, 2]]], (1, 1, 2)) == "{ { { 1, 2 } } }"
+    assert fmt([[[[1, 2]]]], (1, 1, 1, 2)) == "{ { { { 1, 2 } } } }"
     # transpose it:
-    assert fmt([[1], [2]], (2,1)) == "{ { 1 },\n  { 2 } }"
-    assert fmt([[[1], [2]]], (1,2,1)) == "{ { { 1 },\n    { 2 } } }"
+    assert fmt([[1], [2]], (2, 1)) == "{ { 1 },\n  { 2 } }"
+    assert fmt([[[1], [2]]], (1, 2, 1)) == "{ { { 1 },\n    { 2 } } }"
+
 
 def test_nd_initializer_list_twobytwoitems():
-    fmt = lambda v,s: format_indented_lines(build_initializer_lists(v, s, 0, str))
-    assert fmt([[1,2],[3,4]], (2,2)) == "{ { 1, 2 },\n  { 3, 4 } }"
-    assert fmt([[[1,2],[3,4]]], (1,2,2)) == "{ { { 1, 2 },\n    { 3, 4 } } }"
-    assert fmt([[[[1,2],[3,4]]]], (1,1,2,2)) == "{ { { { 1, 2 },\n      { 3, 4 } } } }"
+    fmt = lambda v, s: format_indented_lines(build_initializer_lists(v, s, 0, str))
+    assert fmt([[1, 2], [3, 4]], (2, 2)) == "{ { 1, 2 },\n  { 3, 4 } }"
+    assert fmt([[[1, 2], [3, 4]]], (1, 2, 2)) == "{ { { 1, 2 },\n    { 3, 4 } } }"
+    assert fmt([[[[1, 2], [3, 4]]]], (1, 1, 2, 2)) == "{ { { { 1, 2 },\n      { 3, 4 } } } }"
+
 
 def test_2d_initializer_list():
-    assert format_indented_lines(build_initializer_lists([[1,2,3], [4,5,6]], (2,3), 0, str)) == "{ { 1, 2, 3 },\n  { 4, 5, 6 } }"
+    assert format_indented_lines(build_initializer_lists([[1, 2, 3], [4, 5, 6]], (2, 3), 0, str)) == "{ { 1, 2, 3 },\n  { 4, 5, 6 } }"
 
-    values = [ [[1], [2]],  [[3], [4]],  [[5], [6]] ]
+    values = [[[1], [2]], [[3], [4]], [[5], [6]]]
     reference = """\
 { { { 1 },
     { 2 } },
@@ -166,21 +174,22 @@ def test_2d_initializer_list():
     { 4 } },
   { { 5 },
     { 6 } } }"""
-    assert format_indented_lines(build_initializer_lists(values, (3,2,1), 0, str)) == reference
+    assert format_indented_lines(build_initializer_lists(values, (3, 2, 1), 0, str)) == reference
+
 
 def test_2d_numpy_initializer_list():
     import numpy
-    values = [[1,2,3], [4,5,6]]
+    values = [[1, 2, 3], [4, 5, 6]]
     array = numpy.asarray(values)
-    sh = (2,3)
+    sh = (2, 3)
     assert array.shape == sh
-    fmt = lambda v,s: format_indented_lines(build_initializer_lists(v, s, 0, str))
+    fmt = lambda v, s: format_indented_lines(build_initializer_lists(v, s, 0, str))
     assert fmt(values, sh) == "{ { 1, 2, 3 },\n  { 4, 5, 6 } }"
     assert fmt(array, sh) == "{ { 1, 2, 3 },\n  { 4, 5, 6 } }"
 
-    values = [ [[1], [2]],  [[3], [4]],  [[5], [6]] ]
+    values = [[[1], [2]], [[3], [4]], [[5], [6]]]
     array = numpy.asarray(values)
-    sh = (3,2,1)
+    sh = (3, 2, 1)
     assert sh == array.shape
     reference = """\
 { { { 1 },
@@ -192,36 +201,40 @@ def test_2d_numpy_initializer_list():
     assert fmt(values, sh) == reference
     assert fmt(array, sh) == reference
 
+
 def test_cnode_array_declarations():
     assert str(ArrayDecl("double", "x", 3)) == "double x[3];"
     assert str(ArrayDecl("double", "x", (3,))) == "double x[3];"
-    assert str(ArrayDecl("double", "x", (3,4))) == "double x[3][4];"
+    assert str(ArrayDecl("double", "x", (3, 4))) == "double x[3][4];"
 
-    assert str(ArrayDecl("double", "x", 3, [1.,2.,3.])) == "double x[3] = { 1.0, 2.0, 3.0 };"
-    assert str(ArrayDecl("double", "x", (3,), [1.,2.,3.])) == "double x[3] = { 1.0, 2.0, 3.0 };"
+    assert str(ArrayDecl("double", "x", 3, [1., 2., 3.])) == "double x[3] = { 1.0, 2.0, 3.0 };"
+    assert str(ArrayDecl("double", "x", (3,), [1., 2., 3.])) == "double x[3] = { 1.0, 2.0, 3.0 };"
     reference = """\
 {
     double x[2][3] =
         { { 1.0, 2.0, 3.0 },
           { 4.0, 5.0, 6.0 } };
 }"""
-    assert str(Scope(ArrayDecl("double", "x", (2,3), [[1.,2.,3.], [4.,5.,6.]]))) == reference
+    assert str(Scope(ArrayDecl("double", "x", (2, 3), [[1., 2., 3.], [4., 5., 6.]]))) == reference
+
 
 def test_cnode_comments():
     assert str(Comment("hello world")) == "// hello world"
     assert str(Comment("  hello\n world  ")) == "// hello\n// world"
     assert format_indented_lines(Indented(Comment("  hello\n world  ").cs_format())) == "    // hello\n    // world"
 
+
 def test_cnode_statements():
     assert str(Break()) == "break;"
     assert str(Continue()) == "continue;"
-    assert str(Return(Add(1,2))) == "return 1 + 2;"
+    assert str(Return(Add(1, 2))) == "return 1 + 2;"
     assert str(Case("x")) == "case x:"
     assert str(Default()) == "default:"
 
     code = "for (std::vector<int>::iterator it = v.begin(); it != v.end(); ++it)\n{    /* foobar */\n}"
     assert str(VerbatimStatement(code)) == code
 
+
 def test_cnode_loop_statements():
     body = [Assign("x", 3), AssignAdd("x", 5)]
     body_fmt = "{\n    x = 3;\n    x += 5;\n}"
@@ -240,19 +253,21 @@ def test_cnode_loop_statements():
     assert str(While(LT(AssignAdd("x", 4.0), 17.0), AssignAdd("A", "y"))) == "while ((x += 4.0) < 17.0)\n{\n    A += y;\n}"
     assert str(ForRange("i", 3, 7, AssignAdd("A", "i"))) == "for (int i = 3; i < 7; ++i)\n{\n    A += i;\n}"
 
+
 def test_cnode_loop_helpers():
     i = Symbol("i")
     j = Symbol("j")
     A = Symbol("A")
     B = Symbol("B")
     C = Symbol("C")
-    src = A[i + 4*j]
+    src = A[i + 4 * j]
     dst = 2.0 * B[j] * C[i]
     ranges = [(i, 0, 2), (j, 1, 3)]
     assert str(assign_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] = 2.0 * B[j] * C[i];\n    }\n}"
     assert str(scale_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] *= 2.0 * B[j] * C[i];\n    }\n}"
     assert str(accumulate_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] += 2.0 * B[j] * C[i];\n    }\n}"
 
+
 def test_cnode_switch_statements():
     assert str(Switch("x", [])) == "switch (x)\n{\n}"
     assert str(Switch("x", [], default=Assign("i", 3))) == "switch (x)\n{\ndefault:\n    {\n        i = 3;\n    }\n}"
@@ -275,7 +290,7 @@ default:
     }
 }"""
     cnode_switch = str(Switch("x",
-                              [(1, Assign("y",3)), (2, Assign("y",4)),],
+                              [(1, Assign("y", 3)), (2, Assign("y", 4)), ],
                               default=Assign("y", 5)))
     assert cnode_switch == reference_switch
 
@@ -289,13 +304,14 @@ default:
     y = 5;
 }"""
     cnode_switch = str(Switch("x",
-                              [(1, Assign("y",3)), (2, Assign("y",4)),],
+                              [(1, Assign("y", 3)), (2, Assign("y", 4)), ],
                               default=Assign("y", 5),
                               autobreak=False, autoscope=False))
     assert cnode_switch == reference_switch
 
+
 def test_conceptual_tabulate_tensor():
-    A = ArrayDecl("double", "A", (4,6), values=0.0)
+    A = ArrayDecl("double", "A", (4, 6), values=0.0)
     code = StatementList([
         A,
         ForRange("q", 0, 2, [
@@ -305,8 +321,8 @@ def test_conceptual_tabulate_tensor():
                     AssignAdd(ArrayAccess(A, ("i", "j")),
                               Mul(ArrayAccess("FE0", ("q", "i")),
                                   ArrayAccess("FE1", ("q", "j"))))
-                    ])
                 ])
             ])
         ])
+    ])
     print(str(code))
diff --git a/test/uflacs/unit/test_cpp_compiler.py b/test/uflacs/unit/test_cpp_compiler.py
index 3b1b3d8..1bd99d6 100644
--- a/test/uflacs/unit/test_cpp_compiler.py
+++ b/test/uflacs/unit/test_cpp_compiler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of generic C++ compilation code.
 """
@@ -7,21 +7,26 @@ from __future__ import print_function
 
 # FIXME: These are all disabled, don't remember why, turn them on again!
 
+import numpy
+
 from six.moves import zip
-import uflacs
-#from uflacs.backends.toy.toy_compiler import compile_form
 
 import ufl
 from ufl import as_ufl
 from ufl import *
-from uflacs.datastructures.arrays import object_array, int_array
+
+import ffc.uflacs
+# from ffc.uflacs.backends.toy.toy_compiler import compile_form
+
 
 def format_variable(i):
     return "s[%d]" % i
 
+
 def format_assignment(v, e):
     return "%s = %s;" % (v, e)
 
+
 def format_code_lines(expressions, expression_dependencies, want_to_cache,
                       format_expression, format_variable, format_assignment):
     """FIXME: Test this.
@@ -37,31 +42,32 @@ def format_code_lines(expressions, expression_dependencies, want_to_cache,
     def par(c):
         return "(%s)" % (c,)
     n = len(expressions)
-    code_ref = object_array(n)
-    precedence = int_array(n)
+    code_ref = numpy.empty(n, dtype=object)
+    precedence = numpy.zeros(n, dtype=int)
     code_lines = []
     j = 0
     for i, e in enumerate(expressions):
-        p = 0 # TODO: precedence of e
+        p = 0  # TODO: precedence of e
         deps = expression_dependencies[i]
         code_ops = [code_ref[d] for d in deps]
         code_ops = [par(c) if precedence[d] > p else c for c, d in zip(code_ops, deps)]
         code_e = format_expression(e, code_ops)
         if want_to_cache[i]:
             varname = format_variable(j)
-            j += 1 # TODO: allocate free variable instead of just adding a new one
+            j += 1  # TODO: allocate free variable instead of just adding a new one
             assignment = format_assignment(varname, code_e)
             code_lines.append(assignment)
             code_ref[i] = varname
-            precedence[i] = 9999 # TODO: highest # no () needed around variable
+            precedence[i] = 9999  # TODO: highest # no () needed around variable
         else:
             code_ref[i] = code_e
             precedence[i] = p
         # TODO: deallocate variables somehow when they have been last used
-    final_expressions = [] # TODO: Insert code expressions for given output expression indices here
+    final_expressions = []  # TODO: Insert code expressions for given output expression indices here
     return code_lines, final_expressions
 
-#class CppExpressionCompilerTest(UflTestCase):
+# class CppExpressionCompilerTest(UflTestCase):
+
 
 def xtest_literal_zero_compilation():
 
@@ -75,6 +81,7 @@ def xtest_literal_zero_compilation():
     assert lines == expected_lines
     assert finals == expected_finals
 
+
 def xtest_literal_int_compilation():
 
     uexpr = as_ufl(2)
@@ -87,6 +94,7 @@ def xtest_literal_int_compilation():
     assert lines == expected_lines
     assert finals == expected_finals
 
+
 def xtest_literal_float_compilation():
 
     uexpr = as_ufl(2.56)
@@ -99,6 +107,7 @@ def xtest_literal_float_compilation():
     assert lines == expected_lines
     assert finals == expected_finals
 
+
 def xtest_geometry_h_compilation():
     h = ufl.Circumradius(ufl.triangle)
 
@@ -112,6 +121,7 @@ def xtest_geometry_h_compilation():
     assert lines == expected_lines
     assert finals == expected_finals
 
+
 def xtest_terminal_sum_compilation():
     h = ufl.Circumradius(ufl.triangle)
 
@@ -126,104 +136,116 @@ def xtest_terminal_sum_compilation():
     assert finals == expected_finals
 
 
-#class CppCompilerTest(UflTestCase):
+# class CppCompilerTest(UflTestCase):
 import pytest
 
+
 @pytest.fixture
 def u():
     return Coefficient(FiniteElement("U", triangle, 1))
+
+
 @pytest.fixture
 def v():
     return Coefficient(VectorElement("U", triangle, 1))
+
+
 @pytest.fixture
 def w():
     return Coefficient(TensorElement("U", triangle, 1))
 
+
 def test_fixtures(u, v, w):
     "Just checking that fixtures work!"
     assert u == Coefficient(FiniteElement("U", triangle, 1), count=u.count())
     assert v == Coefficient(VectorElement("U", triangle, 1), count=v.count())
     assert w == Coefficient(TensorElement("U", triangle, 1), count=w.count())
 
+
 def xtest_cpp2_compile_scalar_literals():
-    M = as_ufl(0)*dx
+    M = as_ufl(0) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = as_ufl(3)*dx
+    M = as_ufl(3) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = as_ufl(1.03)*dx
+    M = as_ufl(1.03) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
+
 def xtest_cpp2_compile_geometry():
-    M = CellVolume(triangle)*dx
+    M = CellVolume(triangle) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = SpatialCoordinate(triangle)[0]*dx
+    M = SpatialCoordinate(triangle)[0] * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
+
 def xtest_cpp2_compile_coefficients(u, v, w):
-    M = u*dx
+    M = u * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = v[0]*dx
+    M = v[0] * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = w[1, 0]*dx
+    M = w[1, 0] * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
+
 def xtest_cpp2_compile_sums(u, v, w):
-    M = (2 + u)*dx
+    M = (2 + u) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = (v[1] + w[1, 1] + 3 + u)*dx
+    M = (v[1] + w[1, 1] + 3 + u) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
+
 def xtest_cpp2_compile_products(u, v, w):
-    M = (2*u)*dx
+    M = (2 * u) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
-    M = (v[1] * w[1, 1] * 3 * u)*dx
+    M = (v[1] * w[1, 1] * 3 * u) * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
     assert code == expected
 
+
 def xtest_cpp_compilation():
-    M = u**2/2*dx
+    M = u**2 / 2 * dx
     code = compile_form(M, 'unittest')
     print('\n', code)
     expected = 'TODO'
diff --git a/test/uflacs/unit/test_crs.py b/test/uflacs/unit/test_crs.py
index 9a0ca16..2f22937 100644
--- a/test/uflacs/unit/test_crs.py
+++ b/test/uflacs/unit/test_crs.py
@@ -1,14 +1,15 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
-Tests of CRS data structure.
+Tests of CRSArray data structure.
 """
 
 from six.moves import xrange
-from uflacs.datastructures.crs import CRS
+from ffc.uflacs.analysis.crsarray import CRSArray
+
 
 def test_crs_can_have_zero_element_rows():
     rcap, ecap = 3, 1
-    A = CRS(rcap, ecap, int)
+    A = CRSArray(rcap, ecap, int)
     for i in range(rcap):
         row = []
         A.push_row(row)
@@ -17,9 +18,10 @@ def test_crs_can_have_zero_element_rows():
         row = []
         assert list(A[i]) == row
 
+
 def test_crs_can_have_one_element_rows():
     rcap, ecap = 3, 3
-    A = CRS(rcap, ecap, int)
+    A = CRSArray(rcap, ecap, int)
     for i in range(rcap):
         row = [i]
         A.push_row(row)
@@ -28,15 +30,16 @@ def test_crs_can_have_one_element_rows():
         row = [i]
         assert list(A[i]) == row
 
+
 def test_crs_can_have_n_element_rows():
     rcap, ecap = 5, 25
-    A = CRS(rcap, ecap, int)
+    A = CRSArray(rcap, ecap, int)
     k = 0
     for i in range(rcap):
-        row = [i+2, i+1] + [i]*i
+        row = [i + 2, i + 1] + [i] * i
         k += len(row)
         A.push_row(row)
     assert A.num_elements == k
     for i in range(rcap):
-        row = [i+2, i+1] + [i]*i
+        row = [i + 2, i + 1] + [i] * i
         assert list(A[i]) == row
diff --git a/test/uflacs/unit/test_factorization.py b/test/uflacs/unit/test_factorization.py
index 4d184f4..64af1d2 100644
--- a/test/uflacs/unit/test_factorization.py
+++ b/test/uflacs/unit/test_factorization.py
@@ -1,23 +1,23 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of algorithm for factorization of integrand w.r.t. Argument terms.
 """
 
 from six.moves import xrange as range
 from ufl import *
-from uflacs.analysis.factorization import compute_argument_factorization
+from ffc.uflacs.analysis.factorization import compute_argument_factorization
 
 # TODO: Restructure these tests using py.test fixtures and parameterization?
 
-def compare_compute_argument_factorization(SV, dependencies, expected_AV, expected_FV, expected_IM):
-    target_variables = [len(SV)-1]
 
-    argument_factorization, modified_arguments, V, target_variables, dependencies = \
-        compute_argument_factorization(SV, target_variables, dependencies)
+def compare_compute_argument_factorization(SV, SV_deps, expected_AV, expected_FV, expected_IM):
+    SV_targets = [len(SV) - 1]
+    rank = max(len(k) for k in expected_IM.keys())
+    argument_factorizations, modified_arguments, FV, FV_deps, FV_targets = \
+        compute_argument_factorization(SV, SV_deps, SV_targets, rank)
+    argument_factorization, = argument_factorizations
 
-    # TODO: Rename in tests
     AV = modified_arguments
-    FV = V
     IM = argument_factorization
 
     assert AV == expected_AV
@@ -27,6 +27,7 @@ def compare_compute_argument_factorization(SV, dependencies, expected_AV, expect
     assert FV == expected_FV
     assert IM == expected_IM
 
+
 def test_compute_argument_factorization():
     V = FiniteElement("CG", triangle, 1)
     u = TrialFunction(V)
@@ -42,71 +43,71 @@ def test_compute_argument_factorization():
 
     # Test basic non-argument terminal
     SV = [f]
-    dependencies = [()]
+    SV_deps = [()]
     AV = []
     FV = FVpre + [f]
-    IM = { (): 0 + offset }
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    IM = {(): 0 + offset}
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test basic non-argument sum
-    SV = [f, g, f+g]
-    dependencies = [(), (), (0, 1)]
+    SV = [f, g, f + g]
+    SV_deps = [(), (), (0, 1)]
     AV = []
-    FV = FVpre + [f, g, f+g]
-    IM = { (): 2 + offset }
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    FV = FVpre + [f, g, f + g]
+    IM = {(): 2 + offset}
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test basic non-argument product
-    SV = [f, g, f*g]
-    dependencies = [(), (), (0, 1)]
+    SV = [f, g, f * g]
+    SV_deps = [(), (), (0, 1)]
     AV = []
-    FV = FVpre + [f, g, f*g]
-    IM = { (): 2 + offset }
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    FV = FVpre + [f, g, f * g]
+    IM = {(): 2 + offset}
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test basic single-argument-only expression
     SV = [v]
-    dependencies = [()]
+    SV_deps = [()]
     AV = [v]
     FV = FVpre + [one]
-    IM = { (0,): 1 } # v == AV[0] * FV[1]
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    IM = {(0,): 1}  # v == AV[0] * FV[1]
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test basic coefficient-argument product
-    SV = [f, v, f*v]
-    dependencies = [(), (), (0, 1)]
+    SV = [f, v, f * v]
+    SV_deps = [(), (), (0, 1)]
     AV = [v]
-    FV = FVpre + [f, one] # TODO: Why is one at the end here?
-    IM = { (0,): offset } # f*v == AV[0] * FV[1]
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    FV = FVpre + [f, one]  # TODO: Why is one at the end here?
+    IM = {(0,): offset}  # f*v == AV[0] * FV[1]
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test basic argument product
-    SV = [u, v, u*v]
-    dependencies = [(), (), (0, 1)]
-    AV = [v, u] # Test function < trial function
+    SV = [u, v, u * v]
+    SV_deps = [(), (), (0, 1)]
+    AV = [v, u]  # Test function < trial function
     FV = FVpre + [one]
-    IM = { (0, 1): 1 } # v*u == (AV[0] * AV[1]) * FV[1]
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    IM = {(0, 1): 1}  # v*u == (AV[0] * AV[1]) * FV[1]
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test coefficient-argument products
-    SV = [u, f, v, (f*v), u*(f*v)]
-    dependencies = [(), (), (), (1, 2), (0, 3)]
+    SV = [u, f, v, (f * v), u * (f * v)]
+    SV_deps = [(), (), (), (1, 2), (0, 3)]
     AV = [v, u]
     FV = FVpre + [one, f]
-    IM = { (0, 1): 1 + offset } # f*(u*v) == (AV[0] * AV[1]) * FV[2]
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    IM = {(0, 1): 1 + offset}  # f*(u*v) == (AV[0] * AV[1]) * FV[2]
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test more complex situation
-    SV = [u, u.dx(0), v, #0..2
-          a, b, c, d, e, #3..7
-          a*u, b*u.dx(0), # 8..9
-          c*v, d*v, # 10..11
-          a*u + b*u.dx(0), # 12
-          c*v + d*v, # 13
-          e*(a*u + b*u.dx(0)), # 14
-          (e*(a*u + b*u.dx(0))) * (c*v + d*v), # 15
+    SV = [u, u.dx(0), v,  # 0..2
+          a, b, c, d, e,  # 3..7
+          a * u, b * u.dx(0),  # 8..9
+          c * v, d * v,  # 10..11
+          a * u + b * u.dx(0),  # 12
+          c * v + d * v,  # 13
+          e * (a * u + b * u.dx(0)),  # 14
+          (e * (a * u + b * u.dx(0))) * (c * v + d * v),  # 15
           ]
-    dependencies = [(), (), (),
+    SV_deps = [(), (), (),
                     (), (), (), (), (),
                     (0, 3), (1, 4),
                     (2, 5), (2, 6),
@@ -116,13 +117,13 @@ def test_compute_argument_factorization():
                     (13, 14),
                     ]
     AV = [v, u, u.dx(0)]
-    FV = FVpre + [one] + [a, b, c, d, e, # 0..5
-          c+d, # 6, introduced by SV[13]
-          e*a, # 7, introduced by SV[14]
-          e*b, # 8, introduced by SV[14]
-          (e*a)*(c+d), # 9
-          (e*b)*(c+d), # 10
-          ]
-    IM = { (0, 1): 9 + offset,  # (a*e)*(c+d)*(u*v) == (AV[0] * AV[2]) * FV[13]
-           (0, 2): 10 + offset } # (b*e)*(c+d)*(u.dx(0)*v) == (AV[1] * AV[2]) * FV[12]
-    compare_compute_argument_factorization(SV, dependencies, AV, FV, IM)
+    FV = FVpre + [one] + [a, b, c, d, e,  # 0..5
+                          c + d,  # 6, introduced by SV[13]
+                          e * a,  # 7, introduced by SV[14]
+                          e * b,  # 8, introduced by SV[14]
+                          (e * a) * (c + d),  # 9
+                          (e * b) * (c + d),  # 10
+                          ]
+    IM = {(0, 1): 9 + offset,  # (a*e)*(c+d)*(u*v) == (AV[0] * AV[2]) * FV[13]
+          (0, 2): 10 + offset}  # (b*e)*(c+d)*(u.dx(0)*v) == (AV[1] * AV[2]) * FV[12]
+    compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
diff --git a/test/uflacs/unit/test_format_code_structure.py b/test/uflacs/unit/test_format_code_structure.py
index 4171400..c063e5d 100644
--- a/test/uflacs/unit/test_format_code_structure.py
+++ b/test/uflacs/unit/test_format_code_structure.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of generic code formatting utilities,
 which focus on the overall structure of the code,
@@ -8,7 +8,8 @@ Some of this is C++ specific, some is more generic.
 """
 
 import pytest
-from uflacs.language.cnodes import *
+from ffc.uflacs.language.cnodes import *
+
 
 def test_format_basics():
     # Reproduce a string
@@ -38,6 +39,7 @@ def test_format_basics():
     code = format_indented_lines(("hello", "world"), 2)
     assert code == "        hello\n        world"
 
+
 def test_format_indented_lines():
     # Strings and lists can be put in Indented containers
     assert format_indented_lines(Indented("hei")) == "    hei"
@@ -46,6 +48,7 @@ def test_format_indented_lines():
     code = format_indented_lines(["{", Indented("fee\nfie\nfoe"), "}"])
     assert code == "{\n    fee\n    fie\n    foe\n}"
 
+
 def xtest_format_blocks():
     # A Scope is an indented body with brackets before and after
     code = format_code(Scope("fee\nfie\nfoe"))
@@ -61,6 +64,7 @@ def xtest_format_blocks():
     code = format_code(["iq = 0;", "do", (Scope("foo(iq);"), " while (iq < nq);")])
     assert code == "iq = 0;\ndo\n{\n    foo(iq);\n} while (iq < nq);"
 
+
 def xtest_format_class():
     # Making a class declaration
     assert format_code(Class('Car')) == 'class Car\n{\n};'
@@ -73,6 +77,7 @@ def xtest_format_class():
     code = format_code(Class('Car', private_body='void eval()\n{\n}'))
     assert code == 'class Car\n{\nprivate:\n    void eval()\n    {\n    }\n};'
 
+
 def xtest_format_template_argument_list():
     def t(args, mlcode, slcode):
         code = format_code(TemplateArgumentList(args, False))
@@ -82,6 +87,7 @@ def xtest_format_template_argument_list():
     t(('A',), '<\n    A\n>', '<A>')
     t(('A', 'B'), '<\n    A,\n    B\n>', '<A, B>')
 
+
 def xtest_format_templated_type():
     code = format_code(Type('Foo'))
     assert code == 'Foo'
@@ -90,11 +96,13 @@ def xtest_format_templated_type():
     code = format_code(Type('Foo', ('int', Type('Bar', ('123', 'float')))))
     assert code == 'Foo<int, Bar<123, float> >'
 
+
 def xtest_format_typedef():
     assert format_code(TypeDef('int', 'myint')) == 'typedef int myint;'
     code = format_code(TypeDef(Type('Foo', ('int', Type('Bar', ('123', 'float')))), 'Thing'))
     assert code == 'typedef Foo<int, Bar<123, float> > Thing;'
 
+
 def xtest_format_template_class():
     expected = "template<typename T, typename R>\nclass MyClass\n{\npublic:\n    void hello(int world) {}\n};"
 
@@ -106,17 +114,20 @@ def xtest_format_template_class():
                  template_arguments=('typename T', 'typename R'))
     assert format_code(code) == expected
 
+
 def test_format_variable_decl():
     code = VariableDecl("double", "foo")
     expected = "double foo;"
     assert str(code) == expected
 
+
 def test_literal_cexpr_value_conversion():
-    assert bool(LiteralBool(True)) == True
-    assert bool(LiteralBool(False)) == False
+    assert bool(LiteralBool(True)) is True
+    assert bool(LiteralBool(False)) is False
     assert int(LiteralInt(2)) == 2
     assert float(LiteralFloat(2.0)) == 2.0
-    #assert complex(LiteralFloat(2.0+4.0j)) == 2.0+4.0j
+    # assert complex(LiteralFloat(2.0+4.0j)) == 2.0+4.0j
+
 
 def test_format_array_decl():
     expected = "double foo[3];"
@@ -159,6 +170,7 @@ def test_format_array_decl():
     with pytest.raises(ValueError):
         ArrayAccess(decl, (3, -1))
 
+
 def test_format_array_def():
     expected = "double foo[3] = { 1.0, 2.0, 3.0 };"
     code = ArrayDecl("double", "foo", 3, [1.0, 2.0, 3.0])
@@ -176,9 +188,10 @@ def test_format_array_def():
       { { 1.0, 2.0, 3.0 },
         { 6.0, 5.0, 4.0 } } };"""
     code = ArrayDecl("double", "foo", (2, 2, 3), [[[1.0, 2.0, 3.0], [6.0, 5.0, 4.0]],
-                                                [[1.0, 2.0, 3.0], [6.0, 5.0, 4.0]]])
+                                                  [[1.0, 2.0, 3.0], [6.0, 5.0, 4.0]]])
     assert str(code) == expected
 
+
 def test_format_array_def_zero():
     expected = "double foo[3] = {};"
     code = ArrayDecl("double", "foo", 3, [0.0, 0.0, 0.0])
@@ -190,6 +203,7 @@ def test_format_array_def_zero():
     code = ArrayDecl("double", "foo", (2, 3), [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
     assert str(code) == expected
 
+
 def xtest_class_with_arrays():
     adecl = ArrayDecl('double', 'phi', (3, 5))
     aacc = ArrayAccess(adecl, (2, 1))
@@ -207,6 +221,7 @@ def xtest_class_with_arrays():
     actual = str(classcode)
     assert actual == expected
 
+
 def test_class_array_access():
     vname = 'phi'
     shape = (3, 4)
@@ -217,6 +232,7 @@ def test_class_array_access():
     assert dcode == 'double phi[3][4];'
     assert acode == 'phi[i0][2]'
 
+
 def test_while_loop():
     code = While("--k < 3", [])
     actual = str(code)
@@ -228,6 +244,7 @@ def test_while_loop():
     expected = "while (--k < 3)\n{\n    ting;\n    tang;\n}"
     assert actual == expected
 
+
 def test_for_loop():
     code = For("int i = 0;", "i < 3", "++i", [])
     actual = str(code)
@@ -239,6 +256,7 @@ def test_for_loop():
     expected = "for (int i = 0; i < 3; ++i)\n{\n    ting;\n    tang;\n}"
     assert actual == expected
 
+
 def test_for_range():
     code = ForRange("i", 0, 3, [])
     actual = str(code)
diff --git a/test/uflacs/unit/test_graph_algorithm.py b/test/uflacs/unit/test_graph_algorithm.py
index 7edb6b2..b512c54 100644
--- a/test/uflacs/unit/test_graph_algorithm.py
+++ b/test/uflacs/unit/test_graph_algorithm.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of graph representation of expressions.
 """
@@ -10,13 +10,13 @@ from ufl import *
 from ufl import product
 from ufl.permutation import compute_indices
 
-from uflacs.analysis.graph import build_graph
-from uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
-#from uflacs.analysis.graph_rebuild import rebuild_scalar_e2i
-#from uflacs.analysis.graph_dependencies import (compute_dependencies,
+from ffc.uflacs.analysis.graph import build_graph
+from ffc.uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
+# from ffc.uflacs.analysis.graph_rebuild import rebuild_scalar_e2i
+# from ffc.uflacs.analysis.graph_dependencies import (compute_dependencies,
 #                                                mark_active,
 #                                                mark_image)
-#from uflacs.analysis.graph_ssa import (mark_partitions,
+# from ffc.uflacs.analysis.graph_ssa import (mark_partitions,
 #                                       compute_dependency_count,
 #                                       invert_dependencies,
 #                                       default_cache_score_policy,
@@ -25,6 +25,7 @@ from uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
 
 from operator import eq as equal
 
+
 def test_graph_algorithm_allocates_correct_number_of_symbols():
     U = FiniteElement("CG", triangle, 1)
     V = VectorElement("CG", triangle, 1)
@@ -44,44 +45,44 @@ def test_graph_algorithm_allocates_correct_number_of_symbols():
     assert G.V_symbols.num_elements == 3
     assert G.total_unique_symbols == 3
 
-    expr = u**2/2
+    expr = u**2 / 2
     G = build_graph([expr], DEBUG=0)
     assert G.V_symbols.num_elements == 4
     assert G.total_unique_symbols == 4
 
-    expr = dot(v, v)/2
+    expr = dot(v, v) / 2
     G = build_graph([expr], DEBUG=0)
     assert G.V_symbols.num_elements == 5
     assert G.total_unique_symbols == 5
 
     # Testing Indexed
-    expr = v[i]*v[i]
+    expr = v[i] * v[i]
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+2+2+1
-    assert G.total_unique_symbols == 2+2+1
+    assert G.V_symbols.num_elements == 2 + 2 + 2 + 1
+    assert G.total_unique_symbols == 2 + 2 + 1
 
     # Reusing symbols for indexed with different ordering
     # Note that two index sums are created, giving 2+1 symbols
-    expr = w[i, j]*w[j, i]
+    expr = w[i, j] * w[j, i]
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 4+4+4+4+2+1
-    assert G.total_unique_symbols == 4+4+2+1
+    assert G.V_symbols.num_elements == 4 + 4 + 4 + 4 + 2 + 1
+    assert G.total_unique_symbols == 4 + 4 + 2 + 1
 
     # Testing ComponentTensor
-    expr = dot(as_vector(2*v[i], i), v)
+    expr = dot(as_vector(2 * v[i], i), v)
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+1 + 2+2+2 + 1
-    assert G.total_unique_symbols == 2+1 + 2+1
+    assert G.V_symbols.num_elements == 2 + 1 + 2 + 2 + 2 + 1
+    assert G.total_unique_symbols == 2 + 1 + 2 + 1
 
-    expr = dot(v+2*v, v)
+    expr = dot(v + 2 * v, v)
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+1 + 2+2+2+2 + 1
-    assert G.total_unique_symbols == 2+1 + 2+2 + 1
+    assert G.V_symbols.num_elements == 2 + 1 + 2 + 2 + 2 + 2 + 1
+    assert G.total_unique_symbols == 2 + 1 + 2 + 2 + 1
 
-    expr = outer(v, v)[i, j]*outer(v, v)[j, i]
+    expr = outer(v, v)[i, j] * outer(v, v)[j, i]
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 21 # 2+4+4+4 + 4+2+1
-    assert G.total_unique_symbols == 13 # 2+4+4 + 2+1
+    assert G.V_symbols.num_elements == 21  # 2+4+4+4 + 4+2+1
+    assert G.total_unique_symbols == 13  # 2+4+4 + 2+1
 
     # Testing tensor/scalar
     expr = as_ufl(2)
@@ -96,23 +97,24 @@ def test_graph_algorithm_allocates_correct_number_of_symbols():
 
     expr = outer(v, v)
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+4
-    assert G.total_unique_symbols == 2+4
+    assert G.V_symbols.num_elements == 2 + 4
+    assert G.total_unique_symbols == 2 + 4
 
-    expr = as_tensor(v[i]*v[j], (i, j))
+    expr = as_tensor(v[i] * v[j], (i, j))
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+2+2+4+4
-    assert G.total_unique_symbols == 2+4
+    assert G.V_symbols.num_elements == 2 + 2 + 2 + 4 + 4
+    assert G.total_unique_symbols == 2 + 4
 
-    expr = as_tensor(v[i]*v[j]/2, (i, j))
+    expr = as_tensor(v[i] * v[j] / 2, (i, j))
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+2+2+4+4+4+1
-    assert G.total_unique_symbols == 2+1+4+4
+    assert G.V_symbols.num_elements == 2 + 2 + 2 + 4 + 4 + 4 + 1
+    assert G.total_unique_symbols == 2 + 1 + 4 + 4
 
-    expr = outer(v, v)/2 # converted to the tensor notation above
+    expr = outer(v, v) / 2  # converted to the tensor notation above
     G = build_graph([expr], DEBUG=0)
-    assert G.V_symbols.num_elements == 2+2+2+4+4+4+1
-    assert G.total_unique_symbols == 2+1+4+4
+    assert G.V_symbols.num_elements == 2 + 2 + 2 + 4 + 4 + 4 + 1
+    assert G.total_unique_symbols == 2 + 1 + 4 + 4
+
 
 def test_rebuild_expression_from_graph_basic_scalar_expressions():
     U = FiniteElement("CG", triangle, 1)
@@ -135,11 +137,12 @@ def test_rebuild_expression_from_graph_basic_scalar_expressions():
     assert v1 == v2
 
     # ... Simple operators are reproduced
-    for v1 in [2+u, u+u, u*u, u*2, u**2, u**u, u/u, sin(u)]:
+    for v1 in [2 + u, u + u, u * u, u * 2, u**2, u**u, u / u, sin(u)]:
         G = build_graph([v1])
         v2 = rebuild_expression_from_graph(G)
         assert v1 == v2
 
+
 def test_rebuild_expression_from_graph_on_products_with_indices():
     U = FiniteElement("CG", triangle, 1)
     V = VectorElement("CG", triangle, 1)
@@ -150,24 +153,24 @@ def test_rebuild_expression_from_graph_on_products_with_indices():
     i, j, k, l = indices(4)
 
     # Test fixed index
-    fixed = [u*v[0], v[1]*v[0], w[0, 1]*w[0, 0]]
+    fixed = [u * v[0], v[1] * v[0], w[0, 1] * w[0, 0]]
     for v1 in fixed:
         G = build_graph([v1])
         v2 = rebuild_expression_from_graph(G)
         assert v1 == v2
 
     # Test simple repeated index
-    v1 = v[i]*v[i]
+    v1 = v[i] * v[i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = v[0]*v[0] + v[1]*v[1]
+    ve = v[0] * v[0] + v[1] * v[1]
     assert ve == v2
 
     # Test double repeated index
-    v1 = w[i, j]*w[j, i]
+    v1 = w[i, j] * w[j, i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = (w[1, 1]*w[1, 1] + w[1, 0]*w[0, 1]) + (w[0, 1]*w[1, 0] + w[0, 0]*w[0, 0])
+    ve = (w[1, 1] * w[1, 1] + w[1, 0] * w[0, 1]) + (w[0, 1] * w[1, 0] + w[0, 0] * w[0, 0])
     if 0:
         print()
         print(v1)
@@ -177,13 +180,14 @@ def test_rebuild_expression_from_graph_on_products_with_indices():
     assert ve == v2
 
     # Test mix of repeated and non-repeated index
-    v1 = (w[i, j]*w[j, 0] + v[i])*v[i]
+    v1 = (w[i, j] * w[j, 0] + v[i]) * v[i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = ( (w[0, 0]*w[0, 0] + w[0, 1]*w[1, 0] + v[0])*v[0]
-          +(w[1, 0]*w[0, 0] + w[1, 1]*w[1, 0] + v[1])*v[1])
+    ve = ((w[0, 0] * w[0, 0] + w[0, 1] * w[1, 0] + v[0]) * v[0]
+          + (w[1, 0] * w[0, 0] + w[1, 1] * w[1, 0] + v[1]) * v[1])
     assert ve == v2
 
+
 def test_rebuild_expression_from_graph_basic_tensor_expressions():
     U = FiniteElement("CG", triangle, 1)
     V = VectorElement("CG", triangle, 1)
@@ -207,92 +211,94 @@ def test_rebuild_expression_from_graph_basic_tensor_expressions():
     assert as_vector((v1[0, 0], v1[0, 1], v1[1, 0], v1[1, 1])) == v2
 
     # Vector sum
-    v1 = v+v
+    v1 = v + v
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((v[0]+v[0], v[1]+v[1]))
+    ve = as_vector((v[0] + v[0], v[1] + v[1]))
     assert ve == v2
 
-    v1 = v+vb
+    v1 = v + vb
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((v[0]+vb[0], v[1]+vb[1]))
+    ve = as_vector((v[0] + vb[0], v[1] + vb[1]))
     assert ve == v2
 
     # Tensor sum
-    v1 = w+w
+    v1 = w + w
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((w[0, 0]+w[0, 0], w[0, 1]+w[0, 1], w[1, 0]+w[1, 0], w[1, 1]+w[1, 1]))
+    ve = as_vector((w[0, 0] + w[0, 0], w[0, 1] + w[0, 1], w[1, 0] + w[1, 0], w[1, 1] + w[1, 1]))
     assert ve == v2
 
-    v1 = w+wb
+    v1 = w + wb
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((w[0, 0]+wb[0, 0], w[0, 1]+wb[0, 1], w[1, 0]+wb[1, 0], w[1, 1]+wb[1, 1]))
+    ve = as_vector((w[0, 0] + wb[0, 0], w[0, 1] + wb[0, 1], w[1, 0] + wb[1, 0], w[1, 1] + wb[1, 1]))
     assert ve == v2
 
     # Scalar-vector product
-    v1 = u*v
+    v1 = u * v
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((u*v[0], u*v[1]))
+    ve = as_vector((u * v[0], u * v[1]))
     assert ve == v2
 
     # Scalar-tensor product
-    v1 = u*w
+    v1 = u * w
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((u*w[0, 0], u*w[0, 1], u*w[1, 0], u*w[1, 1]))
+    ve = as_vector((u * w[0, 0], u * w[0, 1], u * w[1, 0], u * w[1, 1]))
     assert ve == v2
 
     # Vector-vector index based inner product
-    v1 = v[i]*v[i]
+    v1 = v[i] * v[i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = v[0]*v[0] + v[1]*v[1]
+    ve = v[0] * v[0] + v[1] * v[1]
     assert ve == v2
 
-    v1 = v[i]*vb[i]
+    v1 = v[i] * vb[i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = v[0]*vb[0] + v[1]*vb[1]
+    ve = v[0] * vb[0] + v[1] * vb[1]
     assert ve == v2
 
     # Tensor-tensor index based transposed inner product
-    v1 = w[i, j]*w[j, i]
+    v1 = w[i, j] * w[j, i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = (w[0, 0]*w[0, 0] + w[0, 1]*w[1, 0]) \
-       + (w[1, 0]*w[0, 1] + w[1, 1]*w[1, 1])
+    ve = (w[0, 0] * w[0, 0] + w[0, 1] * w[1, 0]) \
+        + (w[1, 0] * w[0, 1] + w[1, 1] * w[1, 1])
     assert ve == v2
 
-    v1 = w[i, j]*wb[j, i]
+    v1 = w[i, j] * wb[j, i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = (w[0, 0]*wb[0, 0] + w[1, 0]*wb[0, 1]) \
-       + (w[0, 1]*wb[1, 0] + w[1, 1]*wb[1, 1])
+    ve = (w[0, 0] * wb[0, 0] + w[1, 0] * wb[0, 1]) \
+        + (w[0, 1] * wb[1, 0] + w[1, 1] * wb[1, 1])
     assert ve == v2
 
     # Vector/scalar division
-    v1 = v/u
+    v1 = v / u
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((v[0]/u, v[1]/u))
+    ve = as_vector((v[0] / u, v[1] / u))
     assert ve == v2
 
     # Tensor/scalar division
-    v1 = w/u
+    v1 = w / u
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    ve = as_vector((w[0, 0]/u, w[0, 1]/u, w[1, 0]/u, w[1, 1]/u))
+    ve = as_vector((w[0, 0] / u, w[0, 1] / u, w[1, 0] / u, w[1, 1] / u))
     assert ve == v2
 
     # FIXME: Write more tests to discover bugs in ReconstructScalarSubexpressions.element_wise*
 
-    #assert False
+    # assert False
 
 # Compounds not implemented, not expecting to do this anytime soon
+
+
 def xtest_rebuild_expression_from_graph_on_compounds():
     U = FiniteElement("CG", triangle, 1)
     V = VectorElement("CG", triangle, 1)
@@ -309,15 +315,16 @@ def xtest_rebuild_expression_from_graph_on_compounds():
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
 
-    v1 = outer(v, v)[i, j]*outer(v, v)[j, i]
+    v1 = outer(v, v)[i, j] * outer(v, v)[j, i]
     G = build_graph([v1])
     v2 = rebuild_expression_from_graph(G)
-    #print v1
-    #print v2
+    # print v1
+    # print v2
     # FIXME: Assert something
 
+
 def test_flattening_of_tensor_valued_expression_symbols():
-    #from uflacs.analysis.graph import foo
+    # from ffc.uflacs.analysis.graph import foo
     def flatten_expression_symbols(v, vsyms, opsyms):
         sh = v.ufl_shape
         if sh == ():
@@ -331,7 +338,7 @@ def test_flattening_of_tensor_valued_expression_symbols():
             res = []
             if isinstance(v, ufl.classes.Sum):
                 for i in range(len(vsyms)):
-                    u = None # sum of component i for syms in opsyms
+                    u = None  # sum of component i for syms in opsyms
                     res += (u, vsyms[i], tuple(syms[i] for syms in opsyms))
         return res
 
diff --git a/test/uflacs/unit/test_snippets.py b/test/uflacs/unit/test_snippets.py
index d16f5d4..6859310 100644
--- a/test/uflacs/unit/test_snippets.py
+++ b/test/uflacs/unit/test_snippets.py
@@ -1,6 +1,8 @@
+# -*- coding: utf-8 -*-
+
+from ffc.uflacs.language.format_value import format_float, set_float_precision, reset_float_precision
+from ffc.uflacs.language.format_lines import iter_indented_lines, Indented, format_indented_lines
 
-from uflacs.language.format_value import format_float, set_float_precision, reset_float_precision
-from uflacs.language.format_lines import iter_indented_lines, Indented, format_indented_lines
 
 def test_format_float():
     reset_float_precision()
@@ -17,10 +19,11 @@ def test_format_float():
     set_float_precision(15, 1e-15)
     assert format_float(0.0) == "0.0"
     assert format_float(1.0) == "1.0"
-    assert format_float(12.) == "12.0" # 1.2e+01
+    assert format_float(12.) == "12.0"  # 1.2e+01
 
     reset_float_precision()
 
+
 def test_iter_indented_lines():
     assert list(iter_indented_lines("single line")) == ["single line"]
     assert list(iter_indented_lines(["word"])) == ["word"]
@@ -32,6 +35,7 @@ def test_iter_indented_lines():
     assert list(iter_indented_lines([Indented("line one"), ["line two"]])) == ["    line one", "line two"]
     assert list(iter_indented_lines([[Indented("line one"), "line two"]])) == ["    line one", "line two"]
 
+
 def test_format_indented_lines_example():
     forheader = "for (int i=begin; i!=end; ++i)"
     code = ["{",
diff --git a/test/uflacs/unit/test_ssa_manipulations.py b/test/uflacs/unit/test_ssa_manipulations.py
index 9e8d594..e33d23c 100644
--- a/test/uflacs/unit/test_ssa_manipulations.py
+++ b/test/uflacs/unit/test_ssa_manipulations.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of manipulations of the ssa form of expressions.
 """
@@ -10,28 +10,29 @@ from ufl import *
 from ufl import product
 from ufl.permutation import compute_indices
 
-from uflacs.analysis.graph import build_graph
+from ffc.uflacs.analysis.graph import build_graph
 
 # Tests need this but it has been removed. Rewrite tests!
-#from uflacs.analysis.graph_rebuild import rebuild_scalar_e2i
+# from ffc.uflacs.analysis.graph_rebuild import rebuild_scalar_e2i
 
-#from uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
+# from ffc.uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
 
-#from uflacs.analysis.indexing import (map_indexed_arg_components,
+# from ffc.uflacs.analysis.indexing import (map_indexed_arg_components,
 #                                        map_indexed_arg_components2,
 #                                        map_component_tensor_arg_components)
-#from uflacs.analysis.graph_symbols import (map_list_tensor_symbols,
+# from ffc.uflacs.analysis.graph_symbols import (map_list_tensor_symbols,
 #                                             map_transposed_symbols, get_node_symbols)
-#from uflacs.analysis.graph_dependencies import (compute_dependencies,
+# from ffc.uflacs.analysis.graph_dependencies import (compute_dependencies,
 #                                                mark_active,
 #                                                mark_image)
-#from uflacs.analysis.graph_ssa import (mark_partitions,
+# from ffc.uflacs.analysis.graph_ssa import (mark_partitions,
 #                                       compute_dependency_count,
 #                                       invert_dependencies,
 #                                       default_cache_score_policy,
 #                                       compute_cache_scores,
 #                                       allocate_registers)
 
+
 def xtest_dependency_construction():
     cell = triangle
     d = cell.geometric_dimension()
@@ -56,8 +57,8 @@ def xtest_dependency_construction():
                    v[0],
                    v[1],
                    w[0, 1],
-                   w[0, 0]+w[1, 1],
-                   (2*v+w[1,:])[i]*v[i],
+                   w[0, 0] + w[1, 1],
+                   (2 * v + w[1, :])[i] * v[i],
                    ]
 
     for expr in expressions:
@@ -70,7 +71,7 @@ def xtest_dependency_construction():
         dependencies = compute_dependencies(e2i, V)
 
         max_symbol = len(V)
-        targets = (max_symbol-1,)
+        targets = (max_symbol - 1,)
         active, num_active = mark_active(max_symbol, dependencies, targets)
 
         partitions = mark_partitions(V, active, dependencies, {})
diff --git a/test/uflacs/unit/test_table_utils.py b/test/uflacs/unit/test_table_utils.py
index e3c11ce..4746791 100644
--- a/test/uflacs/unit/test_table_utils.py
+++ b/test/uflacs/unit/test_table_utils.py
@@ -1,13 +1,14 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of table manipulation utilities.
 """
 
 from __future__ import print_function
 
+from ufl import triangle
 from six import itervalues, iteritems
 from six.moves import xrange as range
-from uflacs.elementtables.table_utils import equal_tables, strip_table_zeros, build_unique_tables, get_ffc_table_values
+from ffc.uflacs.elementtables.table_utils import equal_tables, strip_table_zeros, build_unique_tables, get_ffc_table_values
 
 import numpy as np
 default_tolerance = 1e-14
@@ -126,9 +127,9 @@ def test_unique_tables_some_equal():
         np.zeros((2,)),
         np.ones((2,)),
         np.zeros((3,)),
-        #np.ones((2,)),
+        # np.ones((2,)),
         np.ones((2,))*2,
-        #np.ones((2,))*2,
+        # np.ones((2,))*2,
         ]
     expected_mapping = dict((i, v) for i, v in enumerate([0, 1, 2, 1, 3, 3]))
     assert mapping == expected_mapping
@@ -175,56 +176,69 @@ def test_unique_tables_string_keys():
         np.zeros((2,)),
         np.ones((2,)),
         np.zeros((3,)),
-        #np.ones((2,)),
+        # np.ones((2,)),
         np.ones((2,))*2,
-        #np.ones((2,))*2,
+        # np.ones((2,))*2,
         ]
-    expected_mapping = { 'a':0, 'b':1, 'c':2, 'd':1, 'e':3, 'f':3 }
+    expected_mapping = { 'a': 0, 'b': 1, 'c': 2, 'd': 1, 'e': 3, 'f': 3 }
     assert mapping == expected_mapping
     assert len(set(itervalues(mapping))) == len(unique)
     for i, t in iteritems(tables):
         assert equal_tables(t, unique[mapping[i]], default_tolerance)
 
-def test_get_ffc_table_values_scalar_cell():
+
+def xtest_get_ffc_table_values_scalar_cell():
+    cell = triangle
+    integral_type = "cell"
     entitytype = "cell"
     class MockElement:
         def value_shape(self): return ()
     element = MockElement()
     component = ()
 
+    avg = None
+    entity = 0
     for num_points in (1, 3):
         for num_dofs in (1, 5):
             arr = np.ones((num_dofs, num_points))
-            for derivatives in [(), (0,)]:
+            for derivatives in [(0, 0), (1, 1)]:
                 # Mocking table built by ffc
                 ffc_tables = {
                     num_points: {
                         element: {
-                            None: { # avg
-                                None: { # entityid
+                            avg: { # avg
+                                entity: { # entityid
                                     derivatives: arr
                                 }
                             }
                         }
                     }
                 }
-                table = get_ffc_table_values(ffc_tables, entitytype, num_points, element, component, derivatives, default_tolerance)
+                table = get_ffc_table_values(ffc_tables,
+                    cell, integral_type,
+                    num_points, element, avg,
+                    entitytype, derivatives, component, 
+                    default_tolerance)
                 assert equal_tables(table[0, ...], np.transpose(arr), default_tolerance)
 
-def test_get_ffc_table_values_vector_facet():
+
+def xtest_get_ffc_table_values_vector_facet():
+    cell = triangle
+    integral_type = "exterior_facet"
     entitytype = "facet"
-    num_facets = 3
+    num_entities = 3
     class MockElement:
         def value_shape(self): return (2,)
     element = MockElement()
     num_components = 2
 
+    avg = None
     for num_points in (1, 5):
         for num_dofs in (4, 7):
             # Make ones array of the right shape (all dimensions differ to detect algorithm bugs better)
             arr1 = np.ones((num_dofs, num_components, num_points))
             arrays = []
-            for i in range(num_facets):
+            for i in range(num_entities):
                 arr = (i+1.0)*arr1 # Make first digit the facet number (1,2,3)
                 for j in range(num_components):
                     arr[:, j,:] += 0.1*(j+1.0) # Make first decimal the component number (1,2)
@@ -239,18 +253,20 @@ def test_get_ffc_table_values_vector_facet():
                 ffc_tables = {
                     num_points: {
                         element: {
-                            None: { # avg
-                                # entityid:
-                                0: { derivatives: arrays[0] },
-                                1: { derivatives: arrays[1] },
-                                2: { derivatives: arrays[2] },
+                            avg: {
+                                entity: { derivatives: arrays[entity] }
+                                for entity in range(num_entities)
                             }
                         }
                     }
                 }
                 # Tables use flattened component, so we can loop over them as integers:
                 for component in range(num_components):
-                    table = get_ffc_table_values(ffc_tables, entitytype, num_points, element, component, derivatives, default_tolerance)
-                    for i in range(num_facets):
-                        #print table[i,...]
+                    table = get_ffc_table_values(ffc_tables,
+                        cell, integral_type,
+                        num_points, element, avg,
+                        entitytype, derivatives, component,
+                        default_tolerance)
+                    for i in range(num_entities):
+                        # print table[i,...]
                         assert equal_tables(table[i, ...], np.transpose(arrays[i][:, component,:]), default_tolerance)
diff --git a/test/uflacs/unit/test_ufc_backend.py b/test/uflacs/unit/test_ufc_backend.py
index 102700d..dc4da82 100644
--- a/test/uflacs/unit/test_ufc_backend.py
+++ b/test/uflacs/unit/test_ufc_backend.py
@@ -1,9 +1,10 @@
+# -*- coding: utf-8 -*-
 
 import numpy
 
-from uflacs.backends.ufc.generators import *
+from ffc.uflacs.backends.ufc.generators import *
 
-import uflacs.language.cnodes as L
+import ffc.uflacs.language.cnodes as L
 
 
 # TODO: Make this a feature of dijitso: dijitso show-function modulehash functionname
@@ -13,7 +14,7 @@ def extract_function(name, code):
     begin = None
     body = None
     for i in range(n):
-        if (name+"(") in lines[i]:
+        if (name + "(") in lines[i]:
             for j in range(i, n):
                 if lines[j] == "{":
                     begin = i
@@ -25,7 +26,7 @@ def extract_function(name, code):
     end = n
     for i in range(body, n):
         if lines[i] == "}":
-            end = i+1
+            end = i + 1
             break
     sublines = lines[begin:end]
     return '\n'.join(sublines)
@@ -40,7 +41,8 @@ def basic_class_properties(classname):
         "destructor": "",
         "members": "",
         "preamble": "",
-        }
+        "jit": False,
+    }
     return ir
 
 
@@ -53,7 +55,7 @@ def mock_form_ir():
         "rank": 2,
         "num_coefficients": 3,
         "original_coefficient_position": [0, 2],
-        })
+    })
 
     ir.update({
         "create_coordinate_finite_element": ["mock_coordinate_finite_element_classname"],
@@ -61,69 +63,86 @@ def mock_form_ir():
         "create_coordinate_mapping": ["mock_coordinate_mapping_classname"],
         "create_finite_element": ["mock_finite_element_classname_%d" % (i,) for i in range(ir["num_coefficients"])],
         "create_dofmap": ["mock_dofmap_classname_%d" % (i,) for i in range(ir["num_coefficients"])],
-        })
+    })
 
     # These are the method names in ufc::form that are specialized for each integral type
     template = "max_%s_subdomain_id"
     for i, integral_type in enumerate(ufc_integral_types):
         key = template % integral_type
-        ir[key] = i # just faking some integers
+        ir[key] = i  # just faking some integers
 
     template = "has_%s_integrals"
     for i, integral_type in enumerate(ufc_integral_types):
         key = template % integral_type
-        ir[key] = (i % 2 == 0) # faking some bools
+        ir[key] = (i % 2 == 0)  # faking some bools
 
     template = "create_%s_integral"
     for i, integral_type in enumerate(ufc_integral_types):
         key = template % integral_type
-        ir[key] = [key.replace("create_", "") + str(j) for j in range(i)] # faking list of classnames
+        ir[key] = [key.replace("create_", "") + str(j) for j in range(i)]  # faking list of classnames
 
     template = "create_default_%s_integral"
     for i, integral_type in enumerate(ufc_integral_types):
         key = template % integral_type
-        ir[key] = key.replace("create_", "") # faking classname
+        ir[key] = key.replace("create_", "")  # faking classname
 
     return ir
 
+
 def mock_dofmap_ir():
     ir = basic_class_properties("mock_dofmap_classname")
+    num_dofs_per_entity = [1, 1, 1]
+    entity_dofs = [[(0,), (1,), (2,)],
+                   [(3,), (4,), (5,)],
+                   [(6,)]]
+    entity_closure_dofs = {
+        (0, 0): [0],
+        (0, 1): [1],
+        (0, 2): [2],
+        (1, 0): [0, 1],
+        (1, 1): [0, 2],
+        (1, 2): [1, 2],
+        (2, 0): [0, 1, 2],
+        }
     ir.update({
         "signature": "mock element signature",
         "geometric_dimension": 3,
         "topological_dimension": 2,
-        "global_dimension": ([3,2,1], 4),
-        "tabulate_dofs": ([[[(0,),(1,),(2,)],[(3,),(4,),(5,)],[(6,)]], None], [7, 1], True, [False, True]),
-        "tabulate_facet_dofs": [[0,1,2], [1,2,3], [0,2,3]],
-        "tabulate_entity_dofs": ([[(0,),(1,),(2,)],[(3,),(4,),(5,)],[(6,)]], [1,1,1]),
+        "global_dimension": ([3, 2, 1], 4),
+        "tabulate_dofs": ([[[(0,), (1,), (2,)], [(3,), (4,), (5,)], [(6,)]], None], [7, 1], True, [False, True]),
+        "tabulate_facet_dofs": [[0, 1, 2], [1, 2, 3], [0, 2, 3]],
+        "tabulate_entity_dofs": (entity_dofs, num_dofs_per_entity),
+        "tabulate_entity_closure_dofs": (entity_closure_dofs, entity_dofs, num_dofs_per_entity),
         "needs_mesh_entities": [True, False, True],
         "num_element_dofs": 7,
-        "num_entity_dofs": [3,0,1],
+        "num_entity_dofs": num_dofs_per_entity,
+        "num_entity_closure_dofs": [3, 6, 10],
         "num_facet_dofs": 7,
         "num_sub_dofmaps": 3,
         "create_sub_dofmap": ["mock_dofmap_classname_sub_%d" % (i,) for i in range(3)],
-        })
+    })
     return ir
 
+
 def mock_evaluate_basis_ir():
     dofs_data = [
         {
-        "embedded_degree": 5,
-        "num_components": 2,
-        "num_expansion_members": 7,
-        "coeffs": [list(range(20, 27)), list(range(30,37))],
-        "reference_offset": 3,
-        "physical_offset": 5,
+            "embedded_degree": 5,
+            "num_components": 2,
+            "num_expansion_members": 7,
+            "coeffs": [list(range(20, 27)), list(range(30, 37))],
+            "reference_offset": 3,
+            "physical_offset": 5,
         },
         {
-        "embedded_degree": 1,
-        "num_components": 2,
-        "num_expansion_members": 7,
-        "coeffs": [list(range(7)), list(range(10,17))],
-        "reference_offset": 3,
-        "physical_offset": 5,
+            "embedded_degree": 1,
+            "num_components": 2,
+            "num_expansion_members": 7,
+            "coeffs": [list(range(7)), list(range(10, 17))],
+            "reference_offset": 3,
+            "physical_offset": 5,
         }
-        ]
+    ]
     data = {
         "cellname": "triangle",
         "geometric_dimension": 3,
@@ -131,16 +150,18 @@ def mock_evaluate_basis_ir():
         "reference_value_size": 3,
         "physical_value_size": 2,
         "dofs_data": dofs_data,
-        }
+    }
     return data
 
+
 def test_mock_evaluate_basis():
-    from uflacs.backends.ufc.evaluatebasis import generate_evaluate_reference_basis
-    import uflacs.language.cnodes as L
+    from ffc.uflacs.backends.ufc.evaluatebasis import generate_evaluate_reference_basis
+    import ffc.uflacs.language.cnodes as L
     data = mock_evaluate_basis_ir()
     code = generate_evaluate_reference_basis(L, data)
     print(code)
 
+
 def mock_finite_element_ir():
     ir = basic_class_properties("mock_finite_element_classname")
     ebir = mock_evaluate_basis_ir()
@@ -151,10 +172,10 @@ def mock_finite_element_ir():
         "topological_dimension": 2,
         "degree": 2,
         "family": "Lagrange",
-        "value_dimension": (3,3),
-        "reference_value_dimension": (2,2),
+        "value_dimension": (3, 3),
+        "reference_value_dimension": (2, 2),
         "space_dimension": 6,
-        "tabulate_dof_coordinates": { "gdim": 3, "tdim": 2, "points": [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0)] },
+        "tabulate_dof_coordinates": {"gdim": 3, "tdim": 2, "points": [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0)]},
         "evaluate_basis": ebir,
         "evaluate_basis_derivatives": ebir,
         "evaluate_basis_all": ebir,
@@ -164,9 +185,10 @@ def mock_finite_element_ir():
         "interpolate_vertex_values": "fixme",
         "num_sub_elements": 3,
         "create_sub_element": ["mock_finite_element_classname_sub_%d" % (i,) for i in range(3)],
-        })
+    })
     return ir
 
+
 def mock_integral_ir():
     ir = basic_class_properties("mock_integral_classname")
     ir.update({
@@ -174,9 +196,10 @@ def mock_integral_ir():
         "enabled_coefficients": [True, False, True],
         "tabulate_tensor": "    mock_body_of_tabulate_tensor();",
         "num_cells": 1,
-        })
+    })
     return ir
 
+
 def mock_coordinate_mapping_ir():
     ir = basic_class_properties("mock_coordinate_mapping_classname")
     ir.update({
@@ -191,39 +214,42 @@ def mock_coordinate_mapping_ir():
         "num_scalar_coordinate_element_dofs": 5,
         "tables": {"x0": numpy.ones((5,)),
                    "xm": numpy.ones((5,)),
-                   "J0": numpy.ones((2,5)),
-                   "Jm": numpy.ones((2,5)),}
-        })
+                   "J0": numpy.ones((2, 5)),
+                   "Jm": numpy.ones((2, 5)), }
+    })
     return ir
 
 
-
-
 def compile_mock_coordinate_mapping():
     ir = mock_coordinate_mapping_ir()
     gen = ufc_coordinate_mapping()
     return gen.generate(L, ir)
 
+
 def compile_mock_form():
     ir = mock_form_ir()
     gen = ufc_form()
     return gen.generate(L, ir)
 
+
 def compile_mock_dofmap():
     ir = mock_dofmap_ir()
     gen = ufc_dofmap()
     return gen.generate(L, ir)
 
+
 def compile_mock_finite_element():
     ir = mock_finite_element_ir()
     gen = ufc_finite_element()
     return gen.generate(L, ir)
 
+
 def compile_mock_integral(integral_type):
     ir = mock_integral_ir()
     gen = eval("ufc_%s_integral" % integral_type)()
     return gen.generate(L, ir)
 
+
 def compile_mock_all():
     mocks = [compile_mock_integral(integral_type) for integral_type in ufc_integral_types]
     mocks += [compile_mock_form(), compile_mock_dofmap(), compile_mock_finite_element()]
@@ -235,32 +261,38 @@ def test_mock_coordinate_mapping():
     print(h)
     print(cpp)
 
+
 def test_mock_form():
     h, cpp = compile_mock_form()
     print(h)
     print(cpp)
 
+
 def test_mock_dofmap():
     h, cpp = compile_mock_dofmap()
     print(h)
     print(cpp)
 
+
 def test_mock_finite_element():
     h, cpp = compile_mock_finite_element()
     print(h)
     print(cpp)
 
+
 def test_mock_integral():
     for integral_type in ufc_integral_types:
         h, cpp = compile_mock_integral(integral_type)
         print(h)
         print(cpp)
 
+
 def test_foo_integral_properties():
     ir = mock_form_ir()
     assert "cell_integral" in ufc_form.create_cell_integral.__doc__
     assert "return" in str(ufc_form().create_cell_integral(L, ir))
 
+
 def test_mock_extract_function():
     h, cpp = compile_mock_coordinate_mapping()
     name = "compute_reference_coordinates"
@@ -269,11 +301,12 @@ def test_mock_extract_function():
     print(extract_function(name, cpp))
     print("/// end")
 
+
 def test_debug_by_printing_extracted_function():
     h, cpp = compile_mock_coordinate_mapping()
-    #name = "compute_reference_coordinates"
-    #name = "compute_physical_coordinates"
-    #name = "compute_jacobians"
+    # name = "compute_reference_coordinates"
+    # name = "compute_physical_coordinates"
+    # name = "compute_jacobians"
     name = "compute_jacobian_inverses"
     print("/// Extracted", name, ":")
     print("/// begin")
diff --git a/test/uflacs/unit/test_ufl_to_cnodes.py b/test/uflacs/unit/test_ufl_to_cnodes.py
index 9b6e8f4..e91edf2 100644
--- a/test/uflacs/unit/test_ufl_to_cnodes.py
+++ b/test/uflacs/unit/test_ufl_to_cnodes.py
@@ -1,15 +1,17 @@
+# -*- coding: utf-8 -*-
 
 import ufl
 from ufl import *
 from ufl import as_ufl
 
-import uflacs.language
-from uflacs.language.ufl_to_cnodes import UFL2CNodesTranslator
+import ffc.uflacs.language
+from ffc.uflacs.language.ufl_to_cnodes import UFL2CNodesTranslatorCpp, UFL2CNodesTranslatorC
+
 
 def test_ufl_to_cnodes():
 
-    L = uflacs.language.cnodes
-    translate = UFL2CNodesTranslator(L)
+    L = ffc.uflacs.language.cnodes
+    translate = UFL2CNodesTranslatorCpp(L)
 
     f = ufl.CellVolume(ufl.triangle)
     g = ufl.CellVolume(ufl.triangle)
@@ -20,11 +22,11 @@ def test_ufl_to_cnodes():
     z = L.Symbol("z")
 
     examples = [
-        (f+g, (x,y), "x + y"),
-        #(f-g, (x,y), "x - y"), # - is not currently a UFL operator
-        (f*g, (x,y), "x * y"),
-        (f/g, (x,y), "x / y"),
-        (f**g, (x,y), "std::pow(x, y)"),
+        (f + g, (x, y), "x + y"),
+        # (f-g, (x,y), "x - y"), # - is not currently a UFL operator
+        (f * g, (x, y), "x * y"),
+        (f / g, (x, y), "x / y"),
+        (f**g, (x, y), "std::pow(x, y)"),
         (exp(f), (x,), "std::exp(x)"),
         (ln(f), (x,), "std::log(x)"),
         (abs(f), (x,), "std::abs(x)"),
@@ -41,22 +43,38 @@ def test_ufl_to_cnodes():
         (tanh(f), (x,), "std::tanh(x)"),
         (erf(f), (x,), "std::erf(x)"),
         #(erfc(f), (x,), "std::erfc(x)"),
-        (min_value(f,g), (x,y), "std::min(x, y)"),
-        (max_value(f,g), (x,y), "std::max(x, y)"),
-        (bessel_I(1, g), (x,y), "boost::math::cyl_bessel_i(x, y)"),
-        (bessel_J(1, g), (x,y), "boost::math::cyl_bessel_j(x, y)"),
-        (bessel_K(1, g), (x,y), "boost::math::cyl_bessel_k(x, y)"),
-        (bessel_Y(1, g), (x,y), "boost::math::cyl_neumann(x, y)"),
-        (f < g, (x,y), "x < y"),
-        (f > g, (x,y), "x > y"),
-        (f <= g, (x,y), "x <= y"),
-        (f >= g, (x,y), "x >= y"),
-        (eq(f, g), (x,y), "x == y"),
-        (ne(f, g), (x,y), "x != y"),
-        (And(f<g, f>g), (x,y), "x && y"),
-        (Or(f<g, f>g), (x,y), "x || y"),
-        (Not(f<g), (x,), "!x"),
-        (conditional(f<g,g,h), (x,y,z), "x ? y : z"),
+        (min_value(f, g), (x, y), "std::min(x, y)"),
+        (max_value(f, g), (x, y), "std::max(x, y)"),
+        (bessel_I(1, g), (x, y), "boost::math::cyl_bessel_i(x, y)"),
+        (bessel_J(1, g), (x, y), "boost::math::cyl_bessel_j(x, y)"),
+        (bessel_K(1, g), (x, y), "boost::math::cyl_bessel_k(x, y)"),
+        (bessel_Y(1, g), (x, y), "boost::math::cyl_neumann(x, y)"),
+        (f < g, (x, y), "x < y"),
+        (f > g, (x, y), "x > y"),
+        (f <= g, (x, y), "x <= y"),
+        (f >= g, (x, y), "x >= y"),
+        (eq(f, g), (x, y), "x == y"),
+        (ne(f, g), (x, y), "x != y"),
+        (And(f < g, f > g), (x, y), "x && y"),
+        (Or(f < g, f > g), (x, y), "x || y"),
+        (Not(f < g), (x,), "!x"),
+        (conditional(f < g, g, h), (x, y, z), "x ? y : z"),
+    ]
+    for expr, args, code in examples:
+        # Warning: This test is subtle: translate will look at the type of expr and
+        #  ignore its operands, i.e. not translating the full tree but only one level.
+        assert str(translate(expr, *args)) == code
+
+
+    # C specific translation:
+    translate = UFL2CNodesTranslatorC(L)
+    examples = [
+        (sin(f), (x,), "sin(x)"),
+        (f**g, (x, y), "pow(x, y)"),
+        (exp(f), (x,), "exp(x)"),
+        (abs(f), (x,), "fabs(x)"),
+        (min_value(f, g), (x, y), "fmin(x, y)"),
+        (max_value(f, g), (x, y), "fmax(x, y)"),
         ]
     for expr, args, code in examples:
         # Warning: This test is subtle: translate will look at the type of expr and
diff --git a/test/uflacs/unit/test_valuenumbering.py b/test/uflacs/unit/test_valuenumbering.py
new file mode 100644
index 0000000..91d84fe
--- /dev/null
+++ b/test/uflacs/unit/test_valuenumbering.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+from ufl import *
+from ffc.uflacs.analysis.indexing import map_indexed_arg_components
+from ffc.uflacs.analysis.indexing import map_component_tensor_arg_components
+
+def test_map_index_arg_components():
+    x = SpatialCoordinate(triangle)
+    i, j, k = indices(3)
+
+    # Tensors of rank 1, 2, 3
+    A1 = x
+    A2 = outer(x, x)
+    A3 = outer(A2, x)
+
+    indexed = A1[i]
+    assert map_indexed_arg_components(indexed) == [0, 1]
+
+    # Rank 2
+    indexed = A2[i, j]
+    assert map_indexed_arg_components(indexed) == [0, 1, 2, 3]
+    indexed = A2[j, i]
+    assert map_indexed_arg_components(indexed) == [0, 2, 1, 3]
+
+    # Rank 3
+    indexed = A3[i, j, k]
+    assert map_indexed_arg_components(indexed) == [0, 1, 2, 3,
+                                                   4, 5, 6, 7]
+    indexed = A3[j, i, k]
+    assert map_indexed_arg_components(indexed) == [0, 1, 4, 5,
+                                                   2, 3, 6, 7]
+    indexed = A3[i, k, j]
+    assert map_indexed_arg_components(indexed) == [0, 2, 1, 3,
+                                                   4, 6, 5, 7]
+    indexed = A3[j, k, i]
+    assert map_indexed_arg_components(indexed) == [0, 2, 4, 6,
+                                                   1, 3, 5, 7]
+    indexed = A3[k, i, j]
+    assert map_indexed_arg_components(indexed) == [0, 4, 1, 5,
+                                                   2, 6, 3, 7]
+    indexed = A3[k, j, i]
+    assert map_indexed_arg_components(indexed) == [0, 4, 2, 6,
+                                                   1, 5, 3, 7]
+    # Explanation:
+    assert [ii*4+jj*2+kk  # strides are determined by relative position of i,j,k
+            for kk in range(2)  # loop order matches indexing order
+            for jj in range(2)  # loop range matches index dimensions
+            for ii in range(2)] == [0, 4, 2, 6, 1, 5, 3, 7]
+
+
+def test_map_component_tensor_arg_components():
+    x = SpatialCoordinate(triangle)
+    i, j, k = indices(3)
+
+    # Tensors of rank 1, 2, 3
+    A1 = x
+    A2 = outer(x, x)
+    A3 = outer(A2, x)
+    Aij = A2[i,j]
+
+    # Rank 1
+    assert map_component_tensor_arg_components((x[i]*2.0)^(i,)) == [0,1]
+
+    # Rank 2
+    assert map_component_tensor_arg_components((A2[i,j]*2.0)^(i,j)) == [0,1,2,3]
+    assert map_component_tensor_arg_components((A2[i,j]*2.0)^(j,i)) == [0,2,1,3]
+    assert map_component_tensor_arg_components(A2[i,j]^(j,i)) == [0,2,1,3]
+
+    # Rank 3
+    assert map_component_tensor_arg_components((A3[i,j,k]*2.0)^(i,j,k)) == [0,1,2,3,4,5,6,7]
+    assert map_component_tensor_arg_components((A3[i,j,k]*2.0)^(i,k,j)) == [0,2,1,3,4,6,5,7]
+    assert map_component_tensor_arg_components((A3[i,j,k]*2.0)^(k,i,j)) == [0,2,4,6,1,3,5,7]
+
+    # Explanation:
+    assert [ii*4+jj*2+kk  # strides are determined by relative order of i,j,k in indexed expr
+            for kk in range(2)  # loop order matches un-indexing order in component tensor
+            for ii in range(2)  # loop range matches index dimensions
+            for jj in range(2)] == [0,2,4,6,1,3,5,7]
diff --git a/test/uflacs/unit/xtest_latex_formatting.py b/test/uflacs/unit/xtest_latex_formatting.py
index 6c0c6bb..7210665 100644
--- a/test/uflacs/unit/xtest_latex_formatting.py
+++ b/test/uflacs/unit/xtest_latex_formatting.py
@@ -1,21 +1,22 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of LaTeX formatting rules.
 """
 
 from ffc.log import ffc_assert, info, warning, error
-from uflacs.codeutils.expr_formatter import ExprFormatter
-from uflacs.codeutils.latex_expr_formatting_rules import LatexFormatter
+from ffc.uflacs.codeutils.expr_formatter import ExprFormatter
+from ffc.uflacs.codeutils.latex_expr_formatting_rules import LatexFormatter
 
 import ufl
 from ufl.algorithms import preprocess_expression, expand_indices
 
+
 def expr2latex(expr, variables=None):
     "This is a test specific function for formatting ufl to LaTeX."
 
-    # Preprocessing expression before applying formatting.
-    # In a compiler, one should probably assume that these
-    # have been applied and use ExprFormatter directly.
+    # Preprocessing expression before applying formatting.  In a
+    # compiler, one should probably assume that these have been
+    # applied and use ExprFormatter directly.
     expr_data = preprocess_expression(expr)
     expr = expand_indices(expr_data.preprocessed_expr)
 
@@ -23,16 +24,19 @@ def expr2latex(expr, variables=None):
     # formatting rules for generic LaTeX formatting
     latex_formatter = LatexFormatter()
 
-    # This final formatter implements a generic framework handling indices etc etc.
+    # This final formatter implements a generic framework handling
+    # indices etc etc.
     variables = variables or {}
     expr_formatter = ExprFormatter(latex_formatter, variables)
     code = expr_formatter.visit(expr)
     return code
 
+
 def assertLatexEqual(self, expr, code, variables=None):
     r = expr2latex(expr, variables)
     self.assertEqual(code, r)
 
+
 def test_latex_formatting_of_literals():
     # Test literals
     assert expr2latex(ufl.as_ufl(2)) == "2"
@@ -47,6 +51,7 @@ def test_latex_formatting_of_literals():
     assert expr2latex(ufl.PermutationSymbol(3)[2, 1, 3]) == "-1"
     assert expr2latex(ufl.PermutationSymbol(3)[1, 1, 3]) == "0"
 
+
 def test_latex_formatting_of_geometry():
     # Test geometry quantities
     x = ufl.SpatialCoordinate(ufl.interval)[0]
@@ -62,6 +67,7 @@ def test_latex_formatting_of_geometry():
     Kr = ufl.Circumradius(ufl.triangle)
     assert expr2latex(Kr) == r"K_{\text{rad}}"
 
+
 def test_latex_formatting_of_form_arguments():
     # Test form arguments (faked for testing!)
     U = ufl.FiniteElement("CG", ufl.triangle, 1)
@@ -74,27 +80,29 @@ def test_latex_formatting_of_form_arguments():
     assert expr2latex(f) == r"\overset{0}{w}"
 
     f = ufl.Coefficient(V, count=1)
-    assert expr2latex(f[0]) == r"\overset{1}{w}_{0}" # NOT renumbered to 0...
+    assert expr2latex(f[0]) == r"\overset{1}{w}_{0}"  # NOT renumbered to 0...
     v = ufl.Argument(V, number=3)
-    assert expr2latex(v[1]) == r"\overset{3}{v}_{1}" # NOT renumbered to 0...
+    assert expr2latex(v[1]) == r"\overset{3}{v}_{1}"  # NOT renumbered to 0...
 
     f = ufl.Coefficient(W, count=2)
-    assert expr2latex(f[1, 0]) == r"\overset{2}{w}_{1 0}" # NOT renumbered to 0...
+    assert expr2latex(f[1, 0]) == r"\overset{2}{w}_{1 0}"  # NOT renumbered to 0...
     v = ufl.Argument(W, number=3)
-    assert expr2latex(v[0, 1]) == r"\overset{3}{v}_{0 1}" # NOT renumbered to 0...
+    assert expr2latex(v[0, 1]) == r"\overset{3}{v}_{0 1}"  # NOT renumbered to 0...
 
     # TODO: Test mixed functions
     # TODO: Test tensor functions with symmetries
 
+
 def test_latex_formatting_of_arithmetic():
     x = ufl.SpatialCoordinate(ufl.triangle)[0]
     assert expr2latex(x + 3) == "3 + x_0"
     assert expr2latex(x * 2) == "2 x_0"
     assert expr2latex(x / 2) == r"\frac{x_0}{2}"
-    assert expr2latex(x*x) == r"{x_0}^{2}" # TODO: Will gcc optimize this to x*x for us?
+    assert expr2latex(x * x) == r"{x_0}^{2}"  # TODO: Will gcc optimize this to x*x for us?
     assert expr2latex(x**3) == r"{x_0}^{3}"
     # TODO: Test all basic operators
 
+
 def test_latex_formatting_of_cmath():
     x = ufl.SpatialCoordinate(ufl.triangle)[0]
     assert expr2latex(ufl.exp(x)) == r"e^{x_0}"
@@ -108,6 +116,7 @@ def test_latex_formatting_of_cmath():
     assert expr2latex(ufl.acos(x)) == r"\arccos(x_0)"
     assert expr2latex(ufl.atan(x)) == r"\arctan(x_0)"
 
+
 def test_latex_formatting_of_derivatives():
     xx = ufl.SpatialCoordinate(ufl.triangle)
     x = xx[0]
@@ -127,14 +136,16 @@ def test_latex_formatting_of_derivatives():
     # TODO: Test more derivatives
     # TODO: Test variable derivatives using diff
 
+
 def xtest_latex_formatting_of_conditionals():
     # Test conditional expressions
     assert expr2latex(ufl.conditional(ufl.lt(x, 2), y, 3)) == "x_0 < 2 ? x_1: 3"
-    assert expr2latex(ufl.conditional(ufl.gt(x, 2), 4+y, 3)) == "x_0 > 2 ? 4 + x_1: 3"
+    assert expr2latex(ufl.conditional(ufl.gt(x, 2), 4 + y, 3)) == "x_0 > 2 ? 4 + x_1: 3"
     assert expr2latex(ufl.conditional(ufl.And(ufl.le(x, 2), ufl.ge(y, 4)), 7, 8)) == "x_0 <= 2 && x_1 >= 4 ? 7: 8"
     assert expr2latex(ufl.conditional(ufl.Or(ufl.eq(x, 2), ufl.ne(y, 4)), 7, 8)) == "x_0 == 2 || x_1 != 4 ? 7: 8"
     # TODO: Some tests of nested conditionals with correct precedences?
 
+
 def test_latex_formatting_precedence_handling():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test precedence handling with sums
@@ -161,8 +172,8 @@ def test_latex_formatting_precedence_handling():
 
     # Test precedence handling with highest level types
     assert expr2latex(ufl.sin(x)) == r"\sin(x_0)"
-    assert expr2latex(ufl.cos(x+2)) == r"\cos(2 + x_0)"
-    assert expr2latex(ufl.tan(x/2)) == r"\tan(\frac{x_0}{2})"
+    assert expr2latex(ufl.cos(x + 2)) == r"\cos(2 + x_0)"
+    assert expr2latex(ufl.tan(x / 2)) == r"\tan(\frac{x_0}{2})"
     assert expr2latex(ufl.acos(x + 3 * y)) == r"\arccos(x_0 + 3 x_1)"
     assert expr2latex(ufl.asin(ufl.atan(x**4))) == r"\arcsin(\arctan({x_0}^{4}))"
     assert expr2latex(ufl.sin(y) + ufl.tan(x)) == r"\sin(x_1) + \tan(x_0)"
@@ -171,10 +182,13 @@ def test_latex_formatting_precedence_handling():
     assert expr2latex(3 * (2 + x)) == "3 (2 + x_0)"
     assert expr2latex((2 * x) + (3 * y)) == "2 x_0 + 3 x_1"
     assert expr2latex(2 * (x + 3) * y) == "x_1 (2 (3 + x_0))"
+
+
 def _fixme():
-    assert expr2latex(2 * (x + 3)**4 * y) == "x_1 (2 {(3 + x_0)}^{4)" # FIXME: Precedence handling fails here
+    assert expr2latex(2 * (x + 3)**4 * y) == "x_1 (2 {(3 + x_0)}^{4)"  # FIXME: Precedence handling fails here
     # TODO: More tests covering all types and more combinations!
 
+
 def test_latex_formatting_of_variables():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test user-provided C variables for subexpressions
diff --git a/test/uflacs/unit/xtest_ufl_shapes_and_indexing.py b/test/uflacs/unit/xtest_ufl_shapes_and_indexing.py
index dcb501e..a088edd 100644
--- a/test/uflacs/unit/xtest_ufl_shapes_and_indexing.py
+++ b/test/uflacs/unit/xtest_ufl_shapes_and_indexing.py
@@ -1,20 +1,23 @@
-#!/usr/bin/env python
-"""
-Tests of utilities for dealing with ufl indexing and components vs flattened index spaces.
+# -*- coding: utf-8 -*-
+"""Tests of utilities for dealing with ufl indexing and components vs
+flattened index spaces.
+
 """
 
 from ufl import *
 from ufl import product
 from ufl.permutation import compute_indices
 
-from uflacs.analysis.indexing import (map_indexed_arg_components,
-                                        map_component_tensor_arg_components)
-from uflacs.analysis.graph_symbols import (map_list_tensor_symbols,
-                                             map_transposed_symbols, get_node_symbols)
-from uflacs.analysis.graph import build_graph
+from ffc.uflacs.analysis.indexing import (map_indexed_arg_components,
+                                      map_component_tensor_arg_components)
+from ffc.uflacs.analysis.graph_symbols import (map_list_tensor_symbols,
+                                           map_transposed_symbols,
+                                           get_node_symbols)
+from ffc.uflacs.analysis.graph import build_graph
 
 from operator import eq as equal
 
+
 def test_map_indexed_arg_components():
     W = TensorElement("CG", triangle, 1)
     A = Coefficient(W)
@@ -28,12 +31,14 @@ def test_map_indexed_arg_components():
     d = map_indexed_arg_components(A[j, i])
     assert equal(d, [0, 2, 1, 3])
 
+
 def test_map_indexed_arg_components2():
 
-    # This was the previous return type, copied here to preserve the test without having to rewrite
+    # This was the previous return type, copied here to preserve the
+    # test without having to rewrite
     def map_indexed_arg_components2(Aii):
         c1, c2 = map_indexed_to_arg_components(Aii)
-        d = [None]*len(c1)
+        d = [None] * len(c1)
         for k in range(len(c1)):
             d[c1[k]] = k
         return d
@@ -50,34 +55,39 @@ def test_map_indexed_arg_components2():
     d = map_indexed_arg_components2(A[j, i])
     assert equal(d, [0, 2, 1, 3])
 
+
 def test_map_componenttensor_arg_components():
     W = TensorElement("CG", triangle, 1)
     A = Coefficient(W)
     i, j = indices(2)
 
     # Ordered indices:
-    d = map_component_tensor_arg_components(as_tensor(2*A[i, j], (i, j)))
+    d = map_component_tensor_arg_components(as_tensor(2 * A[i, j], (i, j)))
     assert equal(d, [0, 1, 2, 3])
 
     # Swapped ordering of indices:
-    d = map_component_tensor_arg_components(as_tensor(2*A[i, j], (j, i)))
+    d = map_component_tensor_arg_components(as_tensor(2 * A[i, j], (j, i)))
     assert equal(d, [0, 2, 1, 3])
 
+
 def test_map_list_tensor_symbols():
     U = FiniteElement("CG", triangle, 1)
     u = Coefficient(U)
-    A = as_tensor(((u+1, u+2, u+3), (u**2+1, u**2+2, u**2+3)))
-    # Would be nicer to refactor build_graph a bit so we could call map_list_tensor_symbols directly...
+    A = as_tensor(((u + 1, u + 2, u + 3), (u**2 + 1, u**2 + 2, u**2 + 3)))
+    # Would be nicer to refactor build_graph a bit so we could call
+    # map_list_tensor_symbols directly...
     G = build_graph([A], DEBUG=False)
     s1 = list(get_node_symbols(A, G.e2i, G.V_symbols))
-    s2 = [get_node_symbols(e, G.e2i, G.V_symbols)[0] for e in (u+1, u+2, u+3, u**2+1, u**2+2, u**2+3)]
+    s2 = [get_node_symbols(e, G.e2i, G.V_symbols)[0] for e in (u + 1, u + 2, u + 3, u**2 + 1, u**2 + 2, u**2 + 3)]
     assert s1 == s2
 
+
 def test_map_transposed_symbols():
     W = TensorElement("CG", triangle, 1)
     w = Coefficient(W)
     A = w.T
-    # Would be nicer to refactor build_graph a bit so we could call map_transposed_symbols directly...
+    # Would be nicer to refactor build_graph a bit so we could call
+    # map_transposed_symbols directly...
     G = build_graph([A], DEBUG=False)
     s1 = list(get_node_symbols(A, G.e2i, G.V_symbols))
     s2 = list(get_node_symbols(w, G.e2i, G.V_symbols))
@@ -87,7 +97,8 @@ def test_map_transposed_symbols():
     W = TensorElement("CG", tetrahedron, 1)
     w = Coefficient(W)
     A = w.T
-    # Would be nicer to refactor build_graph a bit so we could call map_transposed_symbols directly...
+    # Would be nicer to refactor build_graph a bit so we could call
+    # map_transposed_symbols directly...
     G = build_graph([A], DEBUG=False)
     s1 = list(get_node_symbols(A, G.e2i, G.V_symbols))
     s2 = list(get_node_symbols(w, G.e2i, G.V_symbols))
diff --git a/test/uflacs/unit/xtest_ufl_to_cpp_formatting.py b/test/uflacs/unit/xtest_ufl_to_cpp_formatting.py
index a331aa0..12f04d4 100644
--- a/test/uflacs/unit/xtest_ufl_to_cpp_formatting.py
+++ b/test/uflacs/unit/xtest_ufl_to_cpp_formatting.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 """
 Tests of ufl to C++ expression formatting rules.
 
@@ -11,9 +11,9 @@ import ufl
 from ufl.constantvalue import as_ufl
 from ufl.algorithms import preprocess_expression, expand_indices
 
-import uflacs
-from uflacs.codeutils.cpp_expr_formatting_rules import CppFormattingRules
-from uflacs.codeutils.expr_formatter import ExprFormatter
+from ffc.uflacs.codeutils.cpp_expr_formatting_rules import CppFormattingRules
+from ffc.uflacs.codeutils.expr_formatter import ExprFormatter
+
 
 def expr2cpp(expr, variables=None):
     "This is a test specific function for formatting ufl to C++."
@@ -39,6 +39,7 @@ def expr2cpp(expr, variables=None):
     code = expr_formatter.visit(expr)
     return code
 
+
 def test_cpp_formatting_of_literals():
     # Test literals
     assert expr2cpp(ufl.as_ufl(2)) == "2"
@@ -53,6 +54,7 @@ def test_cpp_formatting_of_literals():
     assert expr2cpp(ufl.PermutationSymbol(3)[2, 1, 3]) == "-1"
     assert expr2cpp(ufl.PermutationSymbol(3)[1, 1, 3]) == "0"
 
+
 def test_cpp_formatting_of_geometry():
     # Test geometry quantities (faked for testing!)
     x = ufl.SpatialCoordinate(ufl.interval)[0]
@@ -68,6 +70,7 @@ def test_cpp_formatting_of_geometry():
     Kr = ufl.Circumradius(ufl.triangle)
     assert expr2cpp(Kr) == "circumradius"
 
+
 def test_cpp_formatting_of_form_arguments():
     # Test form arguments (faked for testing!)
     V = ufl.FiniteElement("CG", ufl.triangle, 1)
@@ -78,29 +81,31 @@ def test_cpp_formatting_of_form_arguments():
 
     V = ufl.VectorElement("CG", ufl.triangle, 1)
     f = ufl.Coefficient(V, count=1)
-    assert expr2cpp(f[0]) == "w0[0]" # Renumbered to 0...
+    assert expr2cpp(f[0]) == "w0[0]"  # Renumbered to 0...
     v = ufl.Argument(V, number=3)
-    assert expr2cpp(v[1]) == "v3[1]" # NOT renumbered to 0...
+    assert expr2cpp(v[1]) == "v3[1]"  # NOT renumbered to 0...
 
     V = ufl.TensorElement("CG", ufl.triangle, 1)
     f = ufl.Coefficient(V, count=2)
-    assert expr2cpp(f[1, 0]) == "w0[1][0]" # Renumbered to 0...
+    assert expr2cpp(f[1, 0]) == "w0[1][0]"  # Renumbered to 0...
     v = ufl.Argument(V, number=3)
-    assert expr2cpp(v[0, 1]) == "v3[0][1]" # NOT renumbered to 0...
+    assert expr2cpp(v[0, 1]) == "v3[0][1]"  # NOT renumbered to 0...
 
     # TODO: Test mixed functions
     # TODO: Test tensor functions with symmetries
 
+
 def test_cpp_formatting_of_arithmetic():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test basic arithmetic operators
     assert expr2cpp(x + 3) == "3 + x[0]"
     assert expr2cpp(x * 2) == "2 * x[0]"
     assert expr2cpp(x / 2) == "x[0] / 2"
-    assert expr2cpp(x*x) == "pow(x[0], 2)" # TODO: Will gcc optimize this to x*x for us?
+    assert expr2cpp(x * x) == "pow(x[0], 2)"  # TODO: Will gcc optimize this to x*x for us?
     assert expr2cpp(x**3) == "pow(x[0], 3)"
     # TODO: Test all basic operators
 
+
 def test_cpp_formatting_of_cmath():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test cmath functions
@@ -115,6 +120,7 @@ def test_cpp_formatting_of_cmath():
     assert expr2cpp(ufl.acos(x)) == "acos(x[0])"
     assert expr2cpp(ufl.atan(x)) == "atan(x[0])"
 
+
 def test_cpp_formatting_of_derivatives():
     xx = ufl.SpatialCoordinate(ufl.triangle)
     x, y = xx
@@ -130,16 +136,17 @@ def test_cpp_formatting_of_derivatives():
     f = ufl.Coefficient(V, count=0)
     assert expr2cpp(f.dx(0)) == "d1_w0[0]"
     v = ufl.Argument(V, number=3)
-    assert expr2cpp(v.dx(1)) == "d1_v3[1]" # NOT renumbered to 0...
+    assert expr2cpp(v.dx(1)) == "d1_v3[1]"  # NOT renumbered to 0...
     # TODO: Test more derivatives
     # TODO: Test variable derivatives using diff
 
+
 def test_cpp_formatting_of_conditionals():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test conditional expressions
     assert expr2cpp(ufl.conditional(ufl.lt(x, 2), y, 3)) \
         == "x[0] < 2 ? x[1]: 3"
-    assert expr2cpp(ufl.conditional(ufl.gt(x, 2), 4+y, 3)) \
+    assert expr2cpp(ufl.conditional(ufl.gt(x, 2), 4 + y, 3)) \
         == "x[0] > 2 ? 4 + x[1]: 3"
     assert expr2cpp(ufl.conditional(ufl.And(ufl.le(x, 2), ufl.ge(y, 4)), 7, 8)) \
         == "x[0] <= 2 && x[1] >= 4 ? 7: 8"
@@ -147,6 +154,7 @@ def test_cpp_formatting_of_conditionals():
         == "x[0] == 2 || x[1] != 4 ? 7: 8"
     # TODO: Some tests of nested conditionals with correct precedences?
 
+
 def test_cpp_formatting_precedence_handling():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test precedence handling with sums
@@ -173,8 +181,8 @@ def test_cpp_formatting_precedence_handling():
 
     # Test precedence handling with highest level types
     assert expr2cpp(ufl.sin(x)) == "sin(x[0])"
-    assert expr2cpp(ufl.cos(x+2)) == "cos(2 + x[0])"
-    assert expr2cpp(ufl.tan(x/2)) == "tan(x[0] / 2)"
+    assert expr2cpp(ufl.cos(x + 2)) == "cos(2 + x[0])"
+    assert expr2cpp(ufl.tan(x / 2)) == "tan(x[0] / 2)"
     assert expr2cpp(ufl.acos(x + 3 * y)) == "acos(x[0] + 3 * x[1])"
     assert expr2cpp(ufl.asin(ufl.atan(x**4))) == "asin(atan(pow(x[0], 4)))"
     assert expr2cpp(ufl.sin(y) + ufl.tan(x)) == "sin(x[1]) + tan(x[0])"
@@ -186,6 +194,7 @@ def test_cpp_formatting_precedence_handling():
     assert expr2cpp(2 * (x + 3)**4 * y) == "x[1] * (2 * pow(3 + x[0], 4))"
     # TODO: More tests covering all types and more combinations!
 
+
 def test_cpp_formatting_with_variables():
     x, y = ufl.SpatialCoordinate(ufl.triangle)
     # Test user-provided C variables for subexpressions
diff --git a/test/unit/elements/test.py b/test/unit/elements/test.py
deleted file mode 100644
index a8b625b..0000000
--- a/test/unit/elements/test.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""Unit tests for FFC finite elements"""
-
-# Copyright (C) 2013 Marie E. Rognes
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-
-import unittest
-
-from ufl import interval
-from ufl import FiniteElement
-
-from ffc import compile_element
-
-class TestCompileElements(unittest.TestCase):
-
-    def testRadau(self):
-        "Test that Radau elements compile."
-        for degree in range(3):
-            element = FiniteElement("Radau", interval, degree)
-            compile_element(element)
-
-    def testLobatto(self):
-        "Test that Lobatto elements compile."
-        for degree in range(1, 4):
-            element = FiniteElement("Lobatto", interval, degree)
-            compile_element(element)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/test/unit/evaluate_basis/__init__.py b/test/unit/evaluate_basis/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/test/unit/evaluate_basis/cppcode.py b/test/unit/evaluate_basis/cppcode.py
index 00c19c8..f6c039a 100644
--- a/test/unit/evaluate_basis/cppcode.py
+++ b/test/unit/evaluate_basis/cppcode.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 "This module provides simple C++ code for verification of UFC code."
 
 # Copyright (C) 2010 Kristian B. Oelgaard
@@ -16,54 +17,6 @@
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-18
-
-#evaluate_basis_code = """\
-##include <iostream>
-##include <ufc.h>
-##include "test.h"
-
-#int main()
-#{
-#  // Create element
-#  %(element)s element;
-
-#  // Size of dof_values
-#  // FIXME: This will not work for TensorElements
-#  int N = element.value_dimension(0);
-
-#  // Create values
-#  double* dof_values = new double[N];
-#  for (unsigned int i = 0; i < N; i++)
-#    dof_values[i] = 0.0;
-
-#  // Create vertex coordinates and fill with some arbitrary data
-#  double coordinate_dofs[24] = {0.90, 0.34, 0.45,
-#                                   0.56, 0.76, 0.83,
-#                                   0.98, 0.78, 0.19,
-#                                   0.12, 0.56, 0.66,
-#                                   0.96, 0.78, 0.63,
-#                                   0.11, 0.35, 0.49,
-#                                   0.51, 0.88, 0.65,
-#                                   0.98, 0.45, 0.01};
-
-#  // Random coordinates where we want to evaluate the basis functions
-#  double coordinates[3] = {0.32, 0.51, 0.05};
-
-#  // Loop element space dimension and call evaluate_basis.
-#  for (unsigned int i = 0; i < element.space_dimension(); i++)
-#  {
-#    element.evaluate_basis(i, dof_values, coordinates, coordinate_dofs, 0);
-#    // Print values
-#    for (unsigned int j = 0; j < N; j++)
-#      std::cout << dof_values[j] << " ";
-#  }
-#  std::cout << std::endl;
-#  return 0;
-#}
-#"""
 
 evaluate_basis_code_fiat = """\
 #include <iostream>
@@ -76,31 +29,24 @@ int main(int argc, char* argv[])
   // Create element
   %(element)s element;
 
-
   // Get derivative order
   unsigned int n = std::atoi(argv[1]);
 
   // Value dimension
   int N;
   if (element.value_rank() == 0)
-  {
     N = 1;
-  }
   else
   {
     N = 1;
     for (unsigned int i = 0; i < element.value_rank(); i++)
-    {
       N = N * element.value_dimension(i);
-    }
   }
 
   // Compute number of derivatives.
   unsigned int  num_derivatives = 1;
   for (unsigned int r = 0; r < n; r++)
-  {
     num_derivatives *= %(dim)d;
-  }
 
   // Create values
   unsigned int num_dof_vals = N*num_derivatives;
@@ -133,9 +79,8 @@ int main(int argc, char* argv[])
     for (unsigned int p = 0; p < %(num_points)d; p++)
     {
       for (unsigned int d = 0; d < %(dim)d; d++)
-      {
         coordinates[d] = points[p][d];
-      }
+
       // Loop element space dimension and call evaluate_basis.
       for (unsigned int i = 0; i < element.space_dimension(); i++)
       {
@@ -155,13 +100,13 @@ int main(int argc, char* argv[])
     for (unsigned int p = element.space_dimension(); p < %(num_points)d; p++)
     {
       for (unsigned int d = 0; d < %(dim)d; d++)
-      {
         coordinates[d] = points[p][d];
-      }
+
       // Loop element space dimension and call evaluate_basis.
       for (unsigned int i = 0; i < element.space_dimension(); i++)
       {
-        element.evaluate_basis_derivatives(i, n, dof_values, coordinates, coordinate_dofs, 0);
+        element.evaluate_basis_derivatives(i, n, dof_values, coordinates,
+                                           coordinate_dofs, 0);
 
         // Print values
         for (unsigned int j = 0; j < num_dof_vals; j++)
diff --git a/test/unit/evaluate_basis/elements.py b/test/unit/evaluate_basis/elements.py
deleted file mode 100644
index 6977491..0000000
--- a/test/unit/evaluate_basis/elements.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2014-01-28
-# Last changed: 2014-01-28
-
-from ufl import FiniteElement, MixedElement
-from .test_common import xcomb
-__all__ = ["single_elements", "mixed_elements"]
-
-# Elements, supported by FFC and FIAT, and their supported shape and orders
-single_elements = [ {"family": "Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3, 4]},\
-                    {"family": "Discontinuous Lagrange",\
-                      "shapes": ["interval", "triangle", "tetrahedron"],\
-                      "orders": [0, 1, 2, 3, 4]},\
-                    {"family": "Crouzeix-Raviart",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1]},\
-                    {"family": "Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Discontinuous Raviart-Thomas",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Marini",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Brezzi-Douglas-Fortin-Marini",\
-                      "shapes": ["triangle"],\
-                      "orders": [2]},\
-                    {"family": "Nedelec 1st kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Nedelec 2nd kind H(curl)",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [1, 2, 3]},\
-                    {"family": "Regge",\
-                      "shapes": ["triangle", "tetrahedron"],\
-                      "orders": [0, 1, 2, 3]}]
-
-# Create some mixed elements
-dg0_tri = FiniteElement("DG", "triangle", 0)
-dg1_tri = FiniteElement("DG", "triangle", 1)
-cg1_tri = FiniteElement("CG", "triangle", 1)
-cr1_tri = FiniteElement("CR", "triangle", 1)
-rt1_tri = FiniteElement("RT", "triangle", 1)
-drt2_tri = FiniteElement("DRT", "triangle", 2)
-bdm1_tri = FiniteElement("BDM", "triangle", 1)
-ned1_tri = FiniteElement("N1curl", "triangle", 1)
-reg0_tri = FiniteElement("Regge", "triangle", 0)
-
-dg0_tet = FiniteElement("DG", "tetrahedron", 0)
-dg1_tet = FiniteElement("DG", "tetrahedron", 1)
-cg1_tet = FiniteElement("CG", "tetrahedron", 1)
-cr1_tet = FiniteElement("CR", "tetrahedron", 1)
-rt1_tet = FiniteElement("RT", "tetrahedron", 1)
-drt2_tet = FiniteElement("DRT", "tetrahedron", 2)
-bdm1_tet = FiniteElement("BDM", "tetrahedron", 1)
-ned1_tet = FiniteElement("N1curl", "tetrahedron", 1)
-reg0_tet = FiniteElement("Regge", "tetrahedron", 0)
-
-# Create combinations in pairs.
-mix_tri = [MixedElement(e) for e in xcomb([dg0_tri, dg1_tri, cg1_tri, cr1_tri, rt1_tri, drt2_tri, bdm1_tri, ned1_tri, reg0_tri], 2)]
-mix_tet = [MixedElement(e) for e in xcomb([dg0_tet, dg1_tet, cg1_tet, cr1_tet, rt1_tet, drt2_tet, bdm1_tet, ned1_tet, reg0_tet], 2)]
-
-mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\
-                  MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\
-                  MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\
-                  MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\
-                  MixedElement([drt2_tri, cg1_tri]),\
-                  MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\
-                  MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\
-                  MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\
-                  MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\
-                  MixedElement([drt2_tet, cg1_tet]),\
-                  MixedElement([cg1_tet, cg1_tet, cg1_tet, reg0_tet])] + mix_tri + mix_tet
diff --git a/test/unit/evaluate_basis/test.py b/test/unit/evaluate_basis/test.py
deleted file mode 100644
index b80fd84..0000000
--- a/test/unit/evaluate_basis/test.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Unit tests for FFC. This test compares values computed by the two UFC
-functions evaluate_basis and evaluate_basis_derivatives generated by FFC to the
-values tabulated by FIAT and to reference values computed by an older version of FFC."""
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-02-01
-# Last changed: 2010-02-01
-
-import unittest
-from .test_against_fiat import main as fiat_main
-from ffc.log import CRITICAL, INFO
-
-class EvaluateBasisTests(unittest.TestCase):
-
-    def testAgainstFiat(self):
-        "Test evaluate basis against FIAT.FiniteElement.tabulate()."
-
-        error = fiat_main(INFO)
-        self.assertEqual(error, 0, "Errors while testing evaluate_basis against FIAT, see fiat_errors.log for details")
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/test/unit/evaluate_basis/test_against_fiat.py b/test/unit/evaluate_basis/test_against_fiat.py
deleted file mode 100644
index e36f082..0000000
--- a/test/unit/evaluate_basis/test_against_fiat.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-29
-# Last changed: 2013-01-31
-
-from .cppcode import evaluate_basis_code_fiat
-from ufl import FiniteElement
-from ffc.fiatinterface import create_element, reference_cell
-from ffc.mixedelement import MixedElement as FFCMixedElement
-from ffc.log import info, error, debug
-import numpy
-import sys, os, numpy, shutil
-from .test_common import compile_element, print_results, compile_gcc_code,\
-                        run_code, get_element_name, verify_values
-import time
-from ffc.log import push_level, pop_level, CRITICAL, INFO
-from .elements import single_elements, mixed_elements
-
-# Some random points
-random_points = {1: [(0.114,), (0.349,), (0.986,)],
-                  2: [(0.114, 0.854), (0.349, 0.247), (0.986, 0.045)],
-                  3: [(0.114, 0.854, 0.126), (0.349, 0.247, 0.457), (0.986, 0.045, 0.127)]}
-
-ffc_fail = []
-gcc_fail = []
-run_fail = []
-dif_cri = []
-dif_acc = []
-correct = []
-log_file = "fiat_errors.log"
-def matrix(points):
-    return "{%s};" % ", ".join(["{%s}" % ", ".join([str(c) for c in p]) for p in points])
-
-def get_data(ufl_element):
-    "Get needed data to run tests."
-
-    # Create fiat element.
-    element = create_element(ufl_element)
-
-    # The derivative order that we are interested in is the degree of the element.
-    if isinstance(element, FFCMixedElement):
-        deriv_order = max([e.degree() for e in element.elements()])
-    else:
-        deriv_order = element.degree()
-
-    # Get coordinates of the reference cell.
-    cell = ufl_element.cell()
-    ref_coords = reference_cell(cell.cellname()).get_vertices()
-
-    # Get the locations of the fiat element dofs.
-    elem_points =  [list(L.pt_dict.keys())[0] for L in element.dual_basis()]
-
-    # Add some random points.
-    geo_dim = cell.geometric_dimension()
-    points = elem_points + random_points[geo_dim]
-
-    return (element, points, geo_dim, ref_coords, deriv_order)
-
-def compute_derivative_combinations(deriv_order, geo_dim):
-    "Compute combinations of derivatives in spatial directions (like code snippet)."
-
-    if deriv_order == 0:
-        return [(0,)*geo_dim]
-
-    num_derivatives = geo_dim**deriv_order
-    combinations = [[0]*deriv_order for n in range(num_derivatives)]
-
-    for i in range(1, num_derivatives):
-        for k in range(i):
-            j = deriv_order - 1
-            while j + 1 > 0:
-                j -= 1
-                if combinations[i][j] + 1 > geo_dim - 1:
-                    combinations[i][j] = 0
-                else:
-                    combinations[i][j] += 1
-                    break
-    # Convert to fiat tuples.
-    for i in range(num_derivatives):
-        combinations[i] = to_fiat_tuple(combinations[i], geo_dim)
-
-    return combinations
-
-def to_fiat_tuple(comb, geo_dim):
-    """Convert a list of combinations of derivatives to a fiat tuple of derivatives.
-    FIAT expects a list with the number of derivatives in each spatial direction.
-    E.g., in 2D: u_{xyy} --> [0, 1, 1] in FFC --> (1, 2) in FIAT."""
-    new_comb = [0]*geo_dim
-    if comb == []:
-        return tuple(new_comb)
-    for i in range(geo_dim):
-        new_comb[i] = comb.count(i)
-    return tuple(new_comb)
-
-def get_fiat_values(ufl_element):
-    """Create a FIAT element and use it to tabulate the values on the reference
-    element. The return values is a dictionary with keys equal to the derivative
-    order and values is a matrix where each row is the basis values at a point.
-    E.g., {0:[[1,0,0],[0,1,0], [0,0,1]]}."""
-
-    # Get data and tabulate basis values.
-    element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element)
-    values = element.tabulate(deriv_order, points)
-    return_values = {}
-    value_shape = element.value_shape()
-
-    # Rearrange values to match what we get from evaluate_basis*()
-    for n in range(deriv_order + 1):
-        combinations = compute_derivative_combinations(n, geo_dim)
-        vals = []
-        # If we're evaluating the basis functions, use all points, but if we're
-        # evaluating the derivatives, just use the 3 arbitrary points to avoid
-        # the number of tests exploding with spacedim**2.
-        if n == 0:
-            new_points = points
-        else:
-            new_points = points[-3:]
-        for p, point in enumerate(new_points):
-            if n != 0:
-                p += element.space_dimension()
-            row = [[] for i in range(element.space_dimension())]
-            for i in range(element.space_dimension()):
-                if value_shape == ():
-                    for deriv in combinations:
-                        deriv_vals = values[deriv]
-                        row[i].append(deriv_vals[i][p])
-                elif len(value_shape) == 1:
-                    for c in range(element.value_shape()[0]):
-                        for deriv in combinations:
-                            deriv_vals = values[deriv]
-                            row[i].append(deriv_vals[i][c][p])
-                elif len(value_shape) == 2:
-                    for j in range(element.value_shape()[0]):
-                        for k in range(element.value_shape()[1]):
-                            for deriv in combinations:
-                                deriv_vals = values[deriv]
-                                row[i].append(deriv_vals[i][j][k][p])
-                else:
-                    print(values)
-                    error("Did not expect tensor elements of rank > 2")
-            new_row = []
-            for r in row:
-                new_row += r
-            vals.append(new_row)
-        return_values[n] = numpy.array(vals)
-    return return_values
-
-def get_ffc_values(ufl_element):
-    "Get the values from evaluate_basis and evaluate_basis_derivatives."
-
-    # Get data and tabulate basis values.
-    element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element)
-
-    # Get relevant element name.
-    element_name = get_element_name(ufl_element)
-
-    # Create g++ code and compile.
-    num_coords = len(ref_coords)
-    options = {"element": element_name,
-               "dim": geo_dim,
-               "num_points": len(points),
-               "points": matrix(points),
-               "cell_ref_coords": "double cell_ref_coords[%d][%d] = %s" % (num_coords, geo_dim, matrix(ref_coords)),
-               "num_coords": num_coords}
-    error = compile_gcc_code(ufl_element, evaluate_basis_code_fiat % options, gcc_fail, log_file)
-    if error:
-        return None
-
-    # Loop derivative order and compute values.
-    ffc_values = {}
-    for n in range(deriv_order + 1):
-        values = run_code(ufl_element, n, run_fail, log_file)
-        if values is None:
-            return None
-        ffc_values[n] = values
-    return ffc_values
-
-def verify_element(num_elements, i, ufl_element):
-    info("\nVerifying element %d of %d: %s" % (i, num_elements, str(ufl_element)))
-    error = compile_element(ufl_element, ffc_fail, log_file)
-
-    # Return if test failed
-    if error:
-        return 1
-
-    # Get FIAT values that are formatted in the same way as the values from
-    # evaluate_basis and evaluate_basis_derivatives.
-    # t = time.time()
-    fiat_values = get_fiat_values(ufl_element)
-    # print "fiat_vals: ", time.time() - t
-
-    # Get FFC values.
-    t = time.time()
-    ffc_values = get_ffc_values(ufl_element)
-    if ffc_values is None:
-        return 1
-    debug("  time to compute FFC values: %f" % (time.time() - t))
-
-    # Compare values and return number of tests.
-    return verify_values(ufl_element, fiat_values, ffc_values, dif_cri, dif_acc, correct, log_file)
-
-def main(debug_level):
-    "Call evaluate basis for a range of different elements."
-
-    push_level(debug_level)
-
-    # Remove old log file.
-    if os.path.isfile(log_file):
-        os.remove(log_file)
-
-    # Change to temporary folder and copy form files
-    if not os.path.isdir("tmp"):
-        os.mkdir("tmp")
-    os.chdir("tmp")
-
-    # Create list of all elements that have to be tested.
-    elements = []
-    for element in single_elements:
-        for shape in element["shapes"]:
-            for order in element["orders"]:
-                elements.append(FiniteElement(element["family"], shape, order))
-
-    # Add the mixed elements
-    elements += mixed_elements
-    num_elements = len(elements)
-
-    # Test all elements
-    num_tests = 0
-    msg = "Verifying evaluate_basis and evaluate_basis_derivatives for elements"
-    info("\n" + msg + "\n" + len(msg)*"-")
-    for i, ufl_element in enumerate(elements):
-        num_tests += verify_element(num_elements, i + 1, ufl_element)
-
-    # print results
-    error = print_results(num_tests, ffc_fail, gcc_fail, run_fail, dif_cri, dif_acc, correct)
-
-    if not error:
-        # Remove temporary directory
-        os.chdir(os.pardir)
-        shutil.rmtree("tmp")
-    pop_level()
-
-    return error
-
-if __name__ == "__main__":
-    # sys.exit(main(sys.argv[1:]))
-    sys.exit(main(INFO))
diff --git a/test/unit/evaluate_basis/test_against_ref_values.py b/test/unit/evaluate_basis/test_against_ref_values.py
deleted file mode 100644
index e228b54..0000000
--- a/test/unit/evaluate_basis/test_against_ref_values.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-18
-# Last changed: 2010-01-29
-
-from __future__ import print_function
-from .cppcode import evaluate_basis_code
-from ufl import FiniteElement, MixedElement
-from instant.output import get_status_output
-import sys, os, pickle, numpy, shutil
-from .elements import single_elements, mixed_elements
-
-ffc_failed = []
-gcc_failed = []
-run_failed = []
-
-def check_results(values, reference):
-    "Check results and print summary."
-
-    # Check if we have missing values.
-    missing_vals = []
-    num_refs = len(list(reference.keys()))
-    for element in list(reference.keys()):
-        if not element in values:
-            missing_vals.append(element)
-
-    missing_refs = []
-    diffs = []
-    correct = []
-    num_ok = 0
-    print("")
-    sorted_elements = sorted(values.keys())
-    for element in sorted_elements:
-        vals = values[element]
-        print("\nResults for %s:" % element)
-
-        if vals is None:
-            print("Error")
-            continue
-
-        # Get reference values
-        if not element in reference:
-            missing_refs.append(element)
-            print("Missing reference")
-            continue
-        refs = reference[element]
-        tol = 1e-12
-
-        e = max(abs(vals - refs))
-        if e < tol:
-            num_ok += 1
-            print("OK: (diff = %g)" % e)
-            correct.append(element)
-        else:
-            print("*** (diff = %g)" % e)
-            diffs.append(element)
-
-    if ffc_failed == gcc_failed == run_failed == missing_refs == diffs == missing_vals:
-        print("\nAll %d elements verified OK" % len(reference))
-        return 0
-    else:
-        print("\n*** The values were correct for the following elements:\n" + "\n\n".join(correct))
-    if len(ffc_failed) > 0:
-        print("\n*** FFC compilation failed for the following elements:\n" + "\n\n".join(ffc_failed))
-    if len(gcc_failed) > 0:
-        print("\n*** g++ compilation failed for the following elements:\n" + "\n\n".join(gcc_failed))
-    if len(run_failed) > 0:
-        print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n\n".join(run_failed))
-    if len(missing_refs) > 0:
-        print("\n*** No reference values were found for the following elements:\n" + "\n\n".join(missing_refs))
-    if len(missing_vals) > 0:
-        print("\n*** No values were computed the following %d elements:\n" % len(missing_vals) +\
-              "\n\n".join(missing_vals))
-    if len(diffs) > 0:
-        print("\n*** Difference in values were found for the following elements:\n" + "\n\n".join(diffs))
-
-    num_ffc = len(ffc_failed)
-    num_gcc = len(gcc_failed)
-    num_run = len(run_failed)
-    num_ref = len(missing_refs)
-    num_val = len(missing_vals)
-    num_cor = len(correct)
-    num_dif = len(diffs)
-    print("\nNum ref elements: ", num_refs)
-    print("Num ffc fail:     ", num_ffc)
-    print("Num gcc fail:     ", num_gcc)
-    print("Num run fail:     ", num_run)
-    print("Num miss ref:     ", num_ref)
-    print("Num miss val:     ", num_val)
-    print("Num correct:      ", num_cor)
-    print("Num diff:         ", num_dif)
-    print("Total:            ", num_ffc + num_gcc + num_run + num_ref + num_val + num_cor + num_dif)
-
-    return 1
-
-def compile_element(ufl_element):
-    "Create UFL form file with a single element in it and compile it with FFC"
-    f = open("test.ufl", "w")
-    if isinstance(ufl_element, (FiniteElement, MixedElement)):
-        f.write("element = " + repr(ufl_element))
-    f.close()
-    error, out = get_status_output("ffc test.ufl")
-    if error:
-        ffc_failed.append(repr(ufl_element))
-    return error
-
-def get_element_name(ufl_element):
-    "Extract relevant element name from header file."
-    f = open("test.h")
-    lines = f.readlines()
-    f.close()
-
-    signature = repr(ufl_element)
-    name = None
-    for e, l in enumerate(lines):
-        if "class" in l and "finite_element" in l:
-            name = l
-        if signature in l:
-            break
-    if name is None:
-        raise RuntimeError("No finite element class found")
-    return name.split()[1][:-1]
-
-def compute_values(ufl_element):
-    "Compute values of basis functions for given element."
-
-    # Get relevant element name
-    element_name = get_element_name(ufl_element)
-
-    # Create g++ code
-    options = {"element": element_name}
-    code = evaluate_basis_code % options
-    f = open("evaluate_basis.cpp", "w")
-    f.write(code)
-    f.close()
-
-    # Get UFC flags
-    ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip()
-
-    # Compile g++ code
-    c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags
-    error, output = get_status_output(c)
-    if error:
-        gcc_failed.append(repr(ufl_element))
-        return None
-
-    # Run compiled code and get values
-    error, output = get_status_output(".%sevaluate_basis" % os.path.sep)
-    if error:
-        run_failed.append(repr(ufl_element))
-        return None
-    values = [float(value) for value in output.split(" ") if len(value) > 0]
-    return numpy.array(values)
-
-def print_refs():
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-        for elem, vals in list(reference.items()):
-            print()
-            print(elem)
-            print(vals)
-    else:
-        raise RuntimeError("No references to print")
-
-def main(args):
-    "Call evaluate basis for a range of different elements."
-
-    if "refs" in args:
-        print_refs()
-        return 0
-
-    # Change to temporary folder and copy form files
-    if not os.path.isdir("tmp"):
-        os.mkdir("tmp")
-    os.chdir("tmp")
-
-    values = {}
-    # Evaluate basis for single elements
-    print("\nComputing evaluate_basis for single elements")
-    for element in single_elements:
-        for shape in element["shapes"]:
-            for order in element["orders"]:
-                ufl_element = FiniteElement(element["family"], shape, order)
-                print("Compiling element: ", str(ufl_element))
-                error = compile_element(ufl_element)
-                if error:
-                    values[repr(ufl_element)] = None
-                    continue
-                print("Computing values")
-                values[repr(ufl_element)] = compute_values(ufl_element)
-
-    # Evaluate basis for mixed elements
-    print("\nComputing evaluate_basis for mixed elements")
-    for ufl_element in mixed_elements:
-        print("Compiling element: ", str(ufl_element))
-        error = compile_element(ufl_element)
-        if error:
-            values[repr(ufl_element)] = None
-            continue
-        print("Computing values")
-        values[repr(ufl_element)] = compute_values(ufl_element)
-
-    # Load or update reference values
-    os.chdir(os.pardir)
-    if os.path.isfile("reference.pickle"):
-        reference = pickle.load(open("reference.pickle", "r"))
-    else:
-        print("Unable to find reference values, storing current values.")
-        pickle.dump(values, open("reference.pickle", "w"))
-        return 0
-
-    # Check results
-    error = check_results(values, reference)
-
-    if not error:
-        # Remove temporary directory
-        shutil.rmtree("tmp")
-
-    return error
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv[1:]))
diff --git a/test/unit/evaluate_basis/test_basis_against_fiat.py b/test/unit/evaluate_basis/test_basis_against_fiat.py
new file mode 100644
index 0000000..0fb6cb0
--- /dev/null
+++ b/test/unit/evaluate_basis/test_basis_against_fiat.py
@@ -0,0 +1,409 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010-2016 Kristian B. Oelgaard and Garth N. Wells
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+# Modified by Lizao Li, 2016
+
+import pytest
+import os
+import numpy
+import subprocess
+import time
+
+from ufl import FiniteElement, MixedElement
+from ufl.utils.py23 import as_native_str
+import ffc
+from ffc.log import error
+from ffc.mixedelement import MixedElement as FFCMixedElement
+from ffc.fiatinterface import create_element, reference_cell
+
+# Local imports
+from cppcode import evaluate_basis_code_fiat
+
+# Tolerances
+tol = 1e-14
+crit_tol = 1e-8
+# crit_tol = 1e-16
+
+# Some random points
+random_points = {1: [(0.114,), (0.349,), (0.986,)],
+                 2: [(0.114, 0.854), (0.349, 0.247), (0.986, 0.045)],
+                 3: [(0.114, 0.854, 0.126), (0.349, 0.247, 0.457),
+                     (0.986, 0.045, 0.127)]}
+
+
+def xcomb(items, n):
+    "Create n-tuples with combinations of items."
+    if n == 0:
+        yield []
+    else:
+        for i in range(len(items)):
+            for cc in xcomb(items[:i] + items[i + 1:], n - 1):
+                yield [items[i]] + cc
+
+
+# Elements, supported by FFC and FIAT, and their supported shape and orders
+# TODO: This is not used anywhere, was it before? What is the purpose?
+single_elements = [{"family": "Lagrange",
+                    "shapes": ["interval", "triangle", "tetrahedron"],
+                    "orders": [1, 2, 3, 4]},
+                   {"family": "Discontinuous Lagrange",
+                    "shapes": ["interval", "triangle", "tetrahedron"],
+                    "orders": [0, 1, 2, 3, 4]},
+                   {"family": "Crouzeix-Raviart",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1]},
+                   {"family": "Raviart-Thomas",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1, 2, 3]},
+                   {"family": "Discontinuous Raviart-Thomas",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1, 2, 3]},
+                   {"family": "Brezzi-Douglas-Marini",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1, 2, 3]},
+                   {"family": "Brezzi-Douglas-Fortin-Marini",
+                    "shapes": ["triangle"],
+                    "orders": [2]},
+                   {"family": "Nedelec 1st kind H(curl)",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1, 2, 3]},
+                   {"family": "Nedelec 2nd kind H(curl)",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [1, 2, 3]},
+                   {"family": "Regge",
+                    "shapes": ["triangle", "tetrahedron"],
+                    "orders": [0, 1, 2, 3]},
+                   {"family": "HHJ",
+                    "shapes": ["triangle"],
+                    "orders": [0, 1, 2, 3]}]
+
+
+# Create some mixed elements
+dg0_tri = FiniteElement("DG", "triangle", 0)
+dg1_tri = FiniteElement("DG", "triangle", 1)
+cg1_tri = FiniteElement("CG", "triangle", 1)
+cr1_tri = FiniteElement("CR", "triangle", 1)
+rt1_tri = FiniteElement("RT", "triangle", 1)
+drt2_tri = FiniteElement("DRT", "triangle", 2)
+bdm1_tri = FiniteElement("BDM", "triangle", 1)
+ned1_tri = FiniteElement("N1curl", "triangle", 1)
+reg0_tri = FiniteElement("Regge", "triangle", 0)
+hhj0_tri = FiniteElement("HHJ", "triangle", 0)
+
+dg0_tet = FiniteElement("DG", "tetrahedron", 0)
+dg1_tet = FiniteElement("DG", "tetrahedron", 1)
+cg1_tet = FiniteElement("CG", "tetrahedron", 1)
+cr1_tet = FiniteElement("CR", "tetrahedron", 1)
+rt1_tet = FiniteElement("RT", "tetrahedron", 1)
+drt2_tet = FiniteElement("DRT", "tetrahedron", 2)
+bdm1_tet = FiniteElement("BDM", "tetrahedron", 1)
+ned1_tet = FiniteElement("N1curl", "tetrahedron", 1)
+reg0_tet = FiniteElement("Regge", "tetrahedron", 0)
+
+# Create combinations in pairs
+mix_tri = [MixedElement(e) for e in xcomb([dg0_tri, dg1_tri, cg1_tri, cr1_tri,
+                                           rt1_tri, drt2_tri, bdm1_tri,
+                                           ned1_tri, reg0_tri, hhj0_tri], 2)]
+mix_tet = [MixedElement(e) for e in xcomb([dg0_tet, dg1_tet, cg1_tet, cr1_tet,
+                                           rt1_tet, drt2_tet, bdm1_tet,
+                                           ned1_tet, reg0_tet], 2)]
+
+mixed_elements = [MixedElement([dg0_tri] * 4),
+                  MixedElement([cg1_tri] * 3),
+                  MixedElement([bdm1_tri] * 2),
+                  MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri,
+                                ned1_tri]),
+                  MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri,
+                                ned1_tri]),
+                  MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri,
+                                                                 cr1_tri])]),
+                  MixedElement([drt2_tri, cg1_tri]),
+                  MixedElement([dg0_tet] * 4), MixedElement([cg1_tet] * 3),
+                  MixedElement([bdm1_tet] * 2),
+                  MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet,
+                                ned1_tet]),
+                  MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet,
+                                ned1_tet]),
+                  MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet,
+                                                                 cr1_tet])]),
+                  MixedElement([drt2_tet, cg1_tet]),
+                  MixedElement([cg1_tet, cg1_tet, cg1_tet, reg0_tet]),
+                  MixedElement([reg0_tet] * 2),
+                  MixedElement([hhj0_tri, ned1_tri])] + mix_tri + mix_tet
+
+# Collect all elements here
+all_elements = mixed_elements
+
+
+def compile_gcc_code(ufl_element, code):
+    # Write code to file
+    with open("evaluate_basis.cpp", "w") as f:
+        f.write(code)
+
+    # Get location of UFC file
+    from ffc import get_include_path, get_ufc_cxx_flags
+    ufc_include_path = get_include_path()
+    ufc_cxx_flags = get_ufc_cxx_flags()
+
+    # Compile c++ code
+    c = "c++ -I{} {} -Wall -Werror -o evaluate_basis_test_code evaluate_basis.cpp".format(ufc_include_path, " ".join(ufc_cxx_flags))
+    with open("compile.sh", "w") as f:
+        f.write(c + "\n")
+    subprocess.check_call(c, shell=True)
+
+
+def run_code(ufl_element, deriv_order):
+    "Compute values of basis functions for given element."
+
+    # Run compiled code and get values
+    c = ".%sevaluate_basis_test_code %d" % (os.path.sep, deriv_order)
+    try:
+        output = as_native_str(subprocess.check_output(c, shell=True))
+    except subprocess.CalledProcessError as e:
+        print("Could not run compile code for element: {}".format(str(ufl_element)))
+        raise
+
+    values = [[float(value) for value in line.strip().split(" ") if value] for line in output.strip().split("\n")]
+    return numpy.array(values)
+
+
+def get_element_name(ufl_element):
+    "Extract relevant element name from header file."
+    with open("test.h") as f:
+        lines = f.readlines()
+
+    signature = repr(ufl_element)
+    name = None
+    for e, l in enumerate(lines):
+        if "class" in l and "finite_element" in l:
+            name = l
+        if signature in l:
+            break
+    if name is None:
+        raise RuntimeError("No finite element class found")
+    return name.split()[1][:-1]
+
+
+def generate_element(ufl_element):
+    "Create UFL form file with a single element in it and compile it with FFC"
+    with open("test.ufl", "w") as f:
+        f.write("element = " + repr(ufl_element))
+    r = ffc.main(["test.ufl"])
+    assert r == 0, "FFC compilation failed for element: {}.".format(str(ufl_element))
+
+
+def matrix(points):
+    return "{%s};" % ", ".join(["{%s}" % ", ".join([str(c) for c in p]) for p in points])
+
+
+def to_fiat_tuple(comb, gdim):
+    """Convert a list of combinations of derivatives to a fiat tuple of
+    derivatives.  FIAT expects a list with the number of derivatives
+    in each spatial direction.  E.g., in 2D: u_{xyy} --> [0, 1, 1] in
+    FFC --> (1, 2) in FIAT.
+
+    """
+    new_comb = [0] * gdim
+    if comb == []:
+        return tuple(new_comb)
+    for i in range(gdim):
+        new_comb[i] = comb.count(i)
+    return tuple(new_comb)
+
+
+def compute_derivative_combinations(deriv_order, geo_dim):
+    "Compute combinations of derivatives in spatial directions (like code snippet)."
+
+    if deriv_order == 0:
+        return [(0,) * geo_dim]
+
+    num_derivatives = geo_dim**deriv_order
+    combinations = [[0] * deriv_order for n in range(num_derivatives)]
+    for i in range(1, num_derivatives):
+        for k in range(i):
+            j = deriv_order - 1
+            while j + 1 > 0:
+                j -= 1
+                if combinations[i][j] + 1 > geo_dim - 1:
+                    combinations[i][j] = 0
+                else:
+                    combinations[i][j] += 1
+                    break
+    # Convert to fiat tuples.
+    for i in range(num_derivatives):
+        combinations[i] = to_fiat_tuple(combinations[i], geo_dim)
+
+    return combinations
+
+
+def get_data(ufl_element):
+    "Get needed data to run tests."
+
+    # Create fiat element.
+    element = create_element(ufl_element)
+
+    # The derivative order that we are interested in is the degree of
+    # the element.
+    if isinstance(element, FFCMixedElement):
+        deriv_order = max([e.degree() for e in element.elements()])
+    else:
+        deriv_order = element.degree()
+
+    # Get coordinates of the reference cell.
+    cell = ufl_element.cell()
+    ref_coords = reference_cell(cell.cellname()).get_vertices()
+
+    # Get the locations of the fiat element dofs.
+    elem_points = [list(L.pt_dict.keys())[0] for L in element.dual_basis()]
+
+    # Add some random points.
+    geo_dim = cell.geometric_dimension()
+    points = elem_points + random_points[geo_dim]
+
+    return (element, points, geo_dim, ref_coords, deriv_order)
+
+
+def get_ffc_values(ufl_element):
+    "Get the values from evaluate_basis and evaluate_basis_derivatives."
+
+    # Get data from element and tabulate basis values
+    element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element)
+
+    # Get relevant element name
+    element_name = get_element_name(ufl_element)
+
+    # Create g++ code and compile
+    num_coords = len(ref_coords)
+    options = {"element": element_name,
+               "dim": geo_dim,
+               "num_points": len(points),
+               "points": matrix(points),
+               "cell_ref_coords": "double cell_ref_coords[{}][{}] = {}".format(num_coords, geo_dim, matrix(ref_coords)),
+               "num_coords": num_coords}
+    compile_gcc_code(ufl_element, evaluate_basis_code_fiat % options)
+
+    # Loop over derivative order and compute values
+    ffc_values = {}
+    for n in range(deriv_order + 1):
+        values = run_code(ufl_element, n)
+        ffc_values[n] = values
+
+    return ffc_values
+
+
+def get_fiat_values(ufl_element):
+    """Create a FIAT element and use it to tabulate the values on the
+    reference element. The return values is a dictionary with keys
+    equal to the derivative order and values is a matrix where each
+    row is the basis values at a point.  E.g., {0:[[1,0,0],[0,1,0],
+    [0,0,1]]}.
+
+    """
+
+    # Get data and tabulate basis values.
+    element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element)
+    values = element.tabulate(deriv_order, points)
+    return_values = {}
+    value_shape = element.value_shape()
+
+    # Rearrange values to match what we get from evaluate_basis*()
+    for n in range(deriv_order + 1):
+        combinations = compute_derivative_combinations(n, geo_dim)
+        vals = []
+        # If we're evaluating the basis functions, use all points, but
+        # if we're evaluating the derivatives, just use the 3
+        # arbitrary points to avoid the number of tests exploding with
+        # spacedim**2.
+        if n == 0:
+            new_points = points
+        else:
+            new_points = points[-3:]
+        for p, point in enumerate(new_points):
+            if n != 0:
+                p += element.space_dimension()
+            row = [[] for i in range(element.space_dimension())]
+            for i in range(element.space_dimension()):
+                if value_shape == ():
+                    for deriv in combinations:
+                        deriv_vals = values[deriv]
+                        row[i].append(deriv_vals[i][p])
+                elif len(value_shape) == 1:
+                    for c in range(element.value_shape()[0]):
+                        for deriv in combinations:
+                            deriv_vals = values[deriv]
+                            row[i].append(deriv_vals[i][c][p])
+                elif len(value_shape) == 2:
+                    for j in range(element.value_shape()[0]):
+                        for k in range(element.value_shape()[1]):
+                            for deriv in combinations:
+                                deriv_vals = values[deriv]
+                                row[i].append(deriv_vals[i][j][k][p])
+                else:
+                    print(values)
+                    error("Did not expect tensor elements of rank > 2")
+            new_row = []
+            for r in row:
+                new_row += r
+            vals.append(new_row)
+        return_values[n] = numpy.array(vals)
+    return return_values
+
+
+def verify_values(ufl_element, ref_values, ffc_values):
+    "Check the values from evaluate_basis*() against some reference values."
+
+    num_tests = len(ffc_values)
+    if num_tests != len(ref_values):
+        raise RuntimeError("The number of computed values is not equal to the number of reference values.")
+
+    for deriv_order in range(num_tests):
+        s = "evaluate_basis_derivatives, order = %d" % deriv_order
+        e = abs(ffc_values[deriv_order] - ref_values[deriv_order])
+        error = e.max()
+
+        # Check that error is below critical tolerance
+        assert not error > crit_tol, "{} failed, error={} (crit_tol: {})".format(s, error, crit_tol)
+
+        # Print message if error is greater than tolerance
+        if error > tol:
+            print("{} ok: error={} (tol: {})".format(s, error, tol))
+
+    return num_tests
+
+
+# Test over different element types
+ at pytest.mark.parametrize("ufl_element", all_elements)
+def test_element(ufl_element):
+    "Test FFC elements against FIAT"
+
+    print("\nVerifying element: {}".format(str(ufl_element)))
+
+    # Generate element
+    generate_element(ufl_element)
+
+    # Get FFC values
+    t = time.time()
+    ffc_values = get_ffc_values(ufl_element)
+    assert ffc_values is not None
+    # debug("  time to compute FFC values: %f" % (time.time() - t))
+
+    # Get FIAT values that are formatted in the same way as the values
+    # from evaluate_basis and evaluate_basis_derivatives.
+    fiat_values = get_fiat_values(ufl_element)
+
+    # Compare FIAT and FFC values
+    verify_values(ufl_element, fiat_values, ffc_values)
diff --git a/test/unit/evaluate_basis/test_common.py b/test/unit/evaluate_basis/test_common.py
deleted file mode 100644
index 8575292..0000000
--- a/test/unit/evaluate_basis/test_common.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-29
-# Last changed: 2013-02-06
-
-from ffc.log import info, info_red, info_blue, info_green, debug
-from instant.output import get_status_output
-import numpy
-import os
-import sys
-from six.moves import xrange as range
-
-tol = 1e-14
-crit_tol = 1e-8
-
-def xcomb(items, n):
-    "Create n-tuples with combinations of items."
-    if n==0: yield []
-    else:
-        for i in range(len(items)):
-            for cc in xcomb(items[:i]+items[i+1:],n-1):
-                yield [items[i]]+cc
-
-# Global log file
-def log_error(message, log_file):
-    "Log error message."
-    log = open(os.path.join(os.pardir, log_file), "a")
-    log.write("\n" + "-"*79 + "\n" + message + "\n" + "-"*79 + "\n")
-    log.close()
-
-def print_results(num_tests, ffc_fail, gcc_fail, run_fail, dif_cri, dif_acc, correct):
-    "Check print summary."
-
-    num_ffc = len(ffc_fail)
-    num_gcc = len(gcc_fail)
-    num_run = len(run_fail)
-    num_cri = len(dif_cri)
-    num_acc = len(dif_acc)
-    num_cor = len(correct)
-
-    if ffc_fail == gcc_fail == run_fail == dif_cri == dif_acc == []:
-        info_green("\nAll %d tests OK" % num_tests)
-        return 0
-
-    num_tests = str(num_tests)
-    num_tot = str(num_ffc + num_gcc + num_run + num_cor + num_cri + num_acc)
-    num_ffc = str(num_ffc)
-    num_gcc = str(num_gcc)
-    num_run = str(num_run)
-    num_cor = str(num_cor)
-    num_cri = str(num_cri)
-    num_acc = str(num_acc)
-    num_ffc = " "*(len(num_tests) - len(num_ffc)) + num_ffc
-    num_gcc = " "*(len(num_tests) - len(num_gcc)) + num_gcc
-    num_run = " "*(len(num_tests) - len(num_run)) + num_run
-    num_cor = " "*(len(num_tests) - len(num_cor)) + num_cor
-    num_cri = " "*(len(num_tests) - len(num_cri)) + num_cri
-    num_acc = " "*(len(num_tests) - len(num_acc)) + num_acc
-    num_tot = " "*(len(num_tests) - len(num_tot)) + num_tot
-
-    info("\n\n*************** SUMMARY ***************")
-    info("\n  Number of tests:                  " + num_tests)
-    info("\n  Num ffc fail:                     " + num_ffc)
-    info("  Num gcc fail:                     " + num_gcc)
-    info("  Num run fail:                     " + num_run)
-    info(("  Num correct:        (tol. %g): " % tol) + num_cor)
-    info(("  Num diff. critical: (tol. %g): " % crit_tol) + num_cri)
-    info("  Num diff. acceptable:             " + num_acc)
-    info("  Total:                            " + num_tot)
-    info("")
-    # Return 0 if there was only acceptable errors.
-    if ffc_fail == gcc_fail == run_fail == dif_cri == []:
-        return 0
-    return 1
-
-def compile_element(ufl_element, ffc_fail, log_file):
-    "Create UFL form file with a single element in it and compile it with FFC"
-    f = open("test.ufl", "w")
-    f.write("element = " + repr(ufl_element))
-    f.close()
-    error, output = get_status_output("ffc test.ufl")
-    if error:
-        info_red("FFC compilation failed.")
-        log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file)
-        ffc_fail.append(str(ufl_element))
-    return error
-
-def get_element_name(ufl_element):
-    "Extract relevant element name from header file."
-    f = open("test.h")
-    lines = f.readlines()
-    f.close()
-
-    signature = repr(ufl_element)
-    name = None
-    for e, l in enumerate(lines):
-        if "class" in l and "finite_element" in l:
-            name = l
-        if signature in l:
-            break
-    if name is None:
-        raise RuntimeError("No finite element class found")
-    return name.split()[1][:-1]
-
-def compile_gcc_code(ufl_element, code, gcc_fail, log_file):
-
-    # Write code.
-    f = open("evaluate_basis.cpp", "w")
-    f.write(code)
-    f.close()
-
-    # Get UFC flags
-    ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip()
-
-    # Compile g++ code
-    c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags
-    f = open("compile.sh", "w")
-    f.write(c + "\n")
-    f.close()
-    error, output = get_status_output(c)
-    if error:
-        info_red("GCC compilation failed.")
-        log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file)
-        gcc_fail.append(str(ufl_element))
-        if error and ("-f" in sys.argv or "--failfast" in sys.argv):
-            print("FAIL")
-            exit(1)
-        return error
-
-def run_code(ufl_element, deriv_order, run_fail, log_file):
-    "Compute values of basis functions for given element."
-
-    # Run compiled code and get values
-    error, output = get_status_output(".%sevaluate_basis %d" % (os.path.sep, deriv_order))
-    if error:
-        info_red("Runtime error (segmentation fault?).")
-        log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file)
-        run_fail.append(str(ufl_element))
-        return None
-    values = [[float(value) for value in line.strip().split(" ") if value] for line in output.strip().split("\n")]
-    return numpy.array(values)
-
-def verify_values(ufl_element, ref_values, ffc_values, dif_cri, dif_acc, correct, log_file):
-    "Check the values from evaluate_basis*() against some reference values."
-
-    num_tests = len(ffc_values)
-    if num_tests != len(ref_values):
-        raise RuntimeError("The number of computed values is not equal to the number of reference values.")
-
-    errors = [str(ufl_element)]
-    for deriv_order in range(num_tests):
-        s = ""
-        if deriv_order == 0:
-            s = "  evaluate_basis"
-        else:
-            s = "  evaluate_basis_derivatives, order = %d" % deriv_order
-        e = abs(ffc_values[deriv_order] - ref_values[deriv_order])
-        error = e.max()
-        if error > tol:
-            if error >  crit_tol:
-                m = "%s failed: error = %s (crit_tol: %s)" % (s, str(error), str(crit_tol))
-                info_red(m)
-                dif_cri.append(str(ufl_element))
-                s = s + "\n" + m
-            else:
-                m = "%s ok: error = %s (tol: %s)" % (s, str(error), str(tol))
-                info_blue(m)
-                dif_acc.append(str(ufl_element))
-                s = s + "\n" + m
-            errors.append(s)
-        else:
-            info_green("%s OK" % s)
-            correct.append(str(ufl_element))
-    # Log errors if any
-    if len(errors) > 1:
-        log_error("\n".join(errors), log_file)
-
-    return num_tests
-
diff --git a/test/unit/misc/__init__.py b/test/unit/misc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/test/unit/misc/test.py b/test/unit/misc/test.py
deleted file mode 100644
index d37efb5..0000000
--- a/test/unit/misc/test.py
+++ /dev/null
@@ -1,364 +0,0 @@
-"Unit tests for FFC"
-
-# Copyright (C) 2007-2009 Anders Logg
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Marie E. Rognes, 2010
-#
-# First added:  2007-02-06
-# Last changed: 2009-02-24
-from __future__ import print_function
-import unittest
-import sys
-import numpy
-import math
-import os
-import instant
-from time import time
-
-sys.path.append(os.path.join(os.pardir, os.pardir))
-
-from ufl import *
-from ffc.fiatinterface import create_element as create
-from ffc import jit
-
-interval = [(0,), (1,)]
-triangle = [(0, 0), (1, 0), (0, 1)]
-tetrahedron = [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]
-num_points = 5
-
-def random_point(shape):
-    w = numpy.random.random(len(shape))
-    return sum([numpy.array(shape[i])*w[i] for i in range(len(shape))]) / sum(w)
-
-class SpaceDimensionTests(unittest.TestCase):
-
-    def testContinuousLagrange(self):
-        "Test space dimensions of continuous Lagrange elements."
-
-        P1 = create(FiniteElement("Lagrange", "triangle", 1))
-        self.assertEqual(P1.space_dimension(), 3)
-
-        P2 = create(FiniteElement("Lagrange", "triangle", 2))
-        self.assertEqual(P2.space_dimension(), 6)
-
-        P3 = create(FiniteElement("Lagrange", "triangle", 3))
-        self.assertEqual(P3.space_dimension(), 10)
-
-    def testDiscontinuousLagrange(self):
-        "Test space dimensions of discontinuous Lagrange elements."
-
-        P0 = create(FiniteElement("DG", "triangle", 0))
-        self.assertEqual(P0.space_dimension(), 1)
-
-        P1 = create(FiniteElement("DG", "triangle", 1))
-        self.assertEqual(P1.space_dimension(), 3)
-
-        P2 = create(FiniteElement("DG", "triangle", 2))
-        self.assertEqual(P2.space_dimension(), 6)
-
-        P3 = create(FiniteElement("DG", "triangle", 3))
-        self.assertEqual(P3.space_dimension(), 10)
-
-class FunctionValueTests(unittest.TestCase):
-    """
-    These tests examine tabulate gives the correct answers for a the
-    supported (non-mixed) elements of polynomial degree less than or
-    equal to 3
-    """
-
-    # FIXME: Add tests for NED and BDM/RT in 3D.
-
-    def _check_function_values(self, points, element, reference):
-        for x in points:
-            table = element.tabulate(0, (x,))
-            basis = table[list(table.keys())[0]]
-            for i in range(len(basis)):
-                if element.value_shape() == ():
-                    self.assertAlmostEqual(float(basis[i]), reference[i](x))
-                else:
-                    for k in range(element.value_shape()[0]):
-                        self.assertAlmostEqual(basis[i][k][0],
-                                               reference[i](x)[k])
-
-    def testContinuousLagrange1D(self):
-        "Test values of continuous Lagrange functions in 1D."
-
-        element = create(FiniteElement("Lagrange", "interval", 1))
-        reference = [lambda x: 1 - x[0],
-                     lambda x: x[0]]
-
-        points = [random_point(interval) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testContinuousLagrange2D(self):
-        "Test values of continuous Lagrange functions in 2D."
-
-        element = create(FiniteElement("Lagrange", "triangle", 1))
-        reference = [lambda x: 1 - x[0] - x[1],
-                     lambda x: x[0],
-                     lambda x: x[1]]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testContinuousLagrange3D(self):
-        "Test values of continuous Lagrange functions in 3D."
-
-        element = create(FiniteElement("Lagrange", "tetrahedron", 1))
-        reference = [lambda x: 1 - x[0] - x[1] - x[2],
-                     lambda x: x[0],
-                     lambda x: x[1],
-                     lambda x: x[2]]
-
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testDiscontinuousLagrange1D(self):
-        "Test values of discontinuous Lagrange functions in 1D."
-
-        element = create(FiniteElement("DG", "interval", 1))
-        reference = [lambda x: 1 - x[0],
-                     lambda x: x[0]]
-
-        points = [random_point(interval) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-
-    def testDiscontinuousLagrange2D(self):
-        "Test values of discontinuous Lagrange functions in 2D."
-
-        element = create(FiniteElement("DG", "triangle", 1))
-        reference = [lambda x: 1 - x[0] - x[1],
-                     lambda x: x[0],
-                     lambda x: x[1]]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testDiscontinuousLagrange3D(self):
-        "Test values of discontinuous Lagrange functions in 3D."
-
-        element = create(FiniteElement("DG", "tetrahedron", 1))
-        reference = [lambda x: 1 - x[0] - x[1] - x[2],
-                     lambda x: x[0],
-                     lambda x: x[1],
-                     lambda x: x[2]]
-
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testBDM1_2D(self):
-        "Test values of BDM1."
-
-        element = create(FiniteElement("Brezzi-Douglas-Marini", "triangle", 1))
-        reference = [lambda x: (2*x[0], -x[1]),
-                     lambda x: (-x[0], 2*x[1]),
-                     lambda x: (2 - 2*x[0] - 3*x[1], x[1]),
-                     lambda x: (- 1 + x[0] + 3*x[1], - 2*x[1]),
-                     lambda x: (-x[0], -2 + 3*x[0] + 2*x[1]),
-                     lambda x: (2*x[0], 1 - 3*x[0] - x[1])]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-
-    def testRT1_2D(self):
-        "Test values of RT1."
-
-        element = create(FiniteElement("Raviart-Thomas", "triangle", 1))
-        reference = [lambda x: (x[0], x[1]),
-                     lambda x: (1 - x[0], -x[1]),
-                     lambda x: (x[0], x[1] - 1)]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testRT2_2D(self):
-        "Test values of RT2."
-
-        element = create(FiniteElement("Raviart-Thomas", "triangle", 2))
-
-        reference = [ lambda x: (-x[0] + 3*x[0]**2,
-                                 -x[1] + 3*x[0]*x[1]),
-                      lambda x: (-x[0] + 3*x[0]*x[1],
-                                 -x[1] + 3*x[1]**2),
-                      lambda x: ( 2 - 5*x[0] - 3*x[1] + 3*x[0]*x[1] + 3*x[0]**2,
-                                  -2*x[1] + 3*x[0]*x[1] + 3*x[1]**2),
-                      lambda x: (-1.0 + x[0] + 3*x[1] - 3*x[0]*x[1],
-                                 x[1] - 3*x[1]**2),
-                      lambda x: (2*x[0] - 3*x[0]*x[1] - 3*x[0]**2,
-                                 -2 + 3*x[0]+ 5*x[1] - 3*x[0]*x[1] - 3*x[1]**2),
-                      lambda x: (- x[0] + 3*x[0]**2,
-                                 + 1 - 3*x[0] - x[1] + 3*x[0]*x[1]),
-                      lambda x: (6*x[0] - 3*x[0]*x[1] - 6*x[0]**2,
-                                 3*x[1] - 6*x[0]*x[1] - 3*x[1]**2),
-                      lambda x: (3*x[0] - 6*x[0]*x[1] - 3*x[0]**2,
-                                 6*x[1]- 3*x[0]*x[1] - 6*x[1]**2),
-                      ]
-
-    def testDRT1_2D(self):
-        "Test values of DRT1."
-
-        element = create(FiniteElement("Discontinuous Raviart-Thomas", "triangle", 1))
-        reference = [lambda x: (x[0], x[1]),
-                     lambda x: (1 - x[0], -x[1]),
-                     lambda x: (x[0], x[1] - 1)]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testDRT2_2D(self):
-        "Test values of DRT2."
-
-        element = create(FiniteElement("Discontinuous Raviart-Thomas", "triangle", 2))
-
-        reference = [ lambda x: (-x[0] + 3*x[0]**2,
-                                 -x[1] + 3*x[0]*x[1]),
-                      lambda x: (-x[0] + 3*x[0]*x[1],
-                                 -x[1] + 3*x[1]**2),
-                      lambda x: ( 2 - 5*x[0] - 3*x[1] + 3*x[0]*x[1] + 3*x[0]**2,
-                                  -2*x[1] + 3*x[0]*x[1] + 3*x[1]**2),
-                      lambda x: (-1.0 + x[0] + 3*x[1] - 3*x[0]*x[1],
-                                 x[1] - 3*x[1]**2),
-                      lambda x: (2*x[0] - 3*x[0]*x[1] - 3*x[0]**2,
-                                 -2 + 3*x[0]+ 5*x[1] - 3*x[0]*x[1] - 3*x[1]**2),
-                      lambda x: (- x[0] + 3*x[0]**2,
-                                 + 1 - 3*x[0] - x[1] + 3*x[0]*x[1]),
-                      lambda x: (6*x[0] - 3*x[0]*x[1] - 6*x[0]**2,
-                                 3*x[1] - 6*x[0]*x[1] - 3*x[1]**2),
-                      lambda x: (3*x[0] - 6*x[0]*x[1] - 3*x[0]**2,
-                                 6*x[1]- 3*x[0]*x[1] - 6*x[1]**2),
-                      ]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testNED1_2D(self):
-        "Test values of NED1."
-
-        element = create(FiniteElement("N1curl", "triangle", 1))
-        reference = [ lambda x: (-x[1], x[0]),
-                      lambda x: ( x[1], 1 - x[0]),
-                      lambda x: ( 1 - x[1], x[0]),
-                      ]
-
-        points = [random_point(triangle) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testRT1_3D(self):
-        element = create(FiniteElement("RT", "tetrahedron", 1))
-        reference = [lambda x: (-x[0], -x[1], -x[2]),
-                     lambda x: (-1.0 + x[0], x[1], x[2]),
-                     lambda x: (-x[0], 1.0 - x[1], -x[2]),
-                     lambda x: ( x[0], x[1], -1.0 + x[2])
-                     ]
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testDRT1_3D(self):
-        element = create(FiniteElement("DRT", "tetrahedron", 1))
-        reference = [lambda x: (-x[0], -x[1], -x[2]),
-                     lambda x: (-1.0 + x[0], x[1], x[2]),
-                     lambda x: (-x[0], 1.0 - x[1], -x[2]),
-                     lambda x: ( x[0], x[1], -1.0 + x[2])
-                     ]
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testBDM1_3D(self):
-        element = create(FiniteElement("BDM", "tetrahedron", 1))
-        reference = [ lambda x: (-3*x[0], x[1], x[2]),
-                      lambda x: (x[0], -3*x[1], x[2]),
-                      lambda x: (x[0], x[1], -3*x[2]),
-                      lambda x: (-3.0 + 3*x[0] + 4*x[1] + 4*x[2], -x[1], -x[2]),
-                      lambda x: (1.0 - x[0] - 4*x[1], 3*x[1], -x[2]),
-                      lambda x: (1.0 - x[0] - 4*x[2], -x[1], 3*x[2]),
-                      lambda x: (x[0], 3.0 - 4*x[0] - 3*x[1] - 4*x[2], x[2]),
-                      lambda x: (-3*x[0], -1.0 + 4*x[0] + x[1], x[2]),
-                      lambda x: (x[0], -1.0 + x[1] + 4*x[2], -3*x[2]),
-                      lambda x: (-x[0], -x[1], -3.0 + 4*x[0] + 4*x[1] + 3*x[2]),
-                      lambda x: (3*x[0], -x[1], 1.0 - 4*x[0] - x[2]),
-                      lambda x: (-x[0], 3*x[1], 1.0 - 4*x[1] - x[2])
-                      ]
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-    def testNED1_3D(self):
-        element = create(FiniteElement("N1curl", "tetrahedron", 1))
-        reference = [ lambda x: (0.0, -x[2], x[1]),
-                      lambda x: (-x[2], 0.0,  x[0]),
-                      lambda x: (-x[1],  x[0], 0.0),
-                      lambda x: ( x[2], x[2], 1.0 - x[0] - x[1]),
-                      lambda x: (x[1], 1.0 - x[0] - x[2], x[1]),
-                      lambda x: (1.0 - x[1] - x[2], x[0], x[0])
-                      ]
-        points = [random_point(tetrahedron) for i in range(num_points)]
-        self._check_function_values(points, element, reference)
-
-class JITTests(unittest.TestCase):
-
-    def testPoisson(self):
-        "Test that JIT compiler is fast enough."
-
-        # FIXME: Use local cache: cache_dir argument to instant.build_module
-        #options = {"log_level": INFO + 5}
-        #options = {"log_level": 5}
-        options = {"log_level": WARNING}
-
-        # Define two forms with the same signatures
-        element = FiniteElement("Lagrange", "triangle", 1)
-        v = TestFunction(element)
-        u = TrialFunction(element)
-        f = Coefficient(element)
-        g = Coefficient(element)
-        a0 = f*dot(grad(v), grad(u))*dx
-        a1 = g*dot(grad(v), grad(u))*dx
-
-        # Strange this needs to be done twice
-
-        # Compile a0 so it will be in the cache (both in-memory and disk)
-        jit(a0, options)
-        jit(a0, options)
-
-        # Compile a0 again (should be really fast, using in-memory cache)
-        t = time()
-        jit(a0, options)
-        dt0 = time() - t
-
-        print("")
-
-        # Compile a1 (should be fairly fast, using disk cache)
-        t = time()
-        jit(a1, options)
-        dt1 = time() - t
-
-        # Good values
-        dt0_good = 0.005
-        dt1_good = 0.01
-
-        print("")
-        print("JIT in-memory cache:", dt0)
-        print("JIT disk cache:     ", dt1)
-        print("Reasonable values are %g and %g" % (dt0_good, dt1_good))
-
-        # Check times
-        self.assertTrue(dt0 < 10*dt0_good)
-        self.assertTrue(dt1 < 10*dt1_good)
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/test/unit/misc/test_elements.py b/test/unit/misc/test_elements.py
new file mode 100644
index 0000000..1bdd56d
--- /dev/null
+++ b/test/unit/misc/test_elements.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+"Unit tests for FFC"
+
+# Copyright (C) 2007-2016 Anders Logg and Garth N. Wells
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# Modified by Marie E. Rognes, 2010
+# Modified by Lizao Li, 2016
+
+
+from __future__ import print_function
+import pytest
+import os
+import sys
+import numpy
+import math
+from time import time
+
+from ufl import *
+from ffc.fiatinterface import create_element
+from ffc import jit
+
+
+def element_coords(cell):
+    if cell == "interval":
+        return [(0,), (1,)]
+    elif cell == "triangle":
+        return [(0, 0), (1, 0), (0, 1)]
+    elif cell == "tetrahedron":
+        return [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]
+    else:
+        RuntimeError("Unknown cell type")
+
+
+def random_point(shape):
+    w = numpy.random.random(len(shape))
+    return sum([numpy.array(shape[i])*w[i] for i in range(len(shape))])/sum(w)
+
+
+ at pytest.mark.parametrize("degree, expected_dim", [(1, 3), (2, 6), (3, 10)])
+def test_continuous_lagrange(degree, expected_dim):
+    "Test space dimensions of continuous Lagrange elements."
+    P = create_element(FiniteElement("Lagrange", "triangle", degree))
+    assert P.space_dimension() == expected_dim
+
+
+ at pytest.mark.parametrize("degree, expected_dim", [(0, 1), (1, 3), (2, 6), (3, 10)])
+def test_discontinuous_lagrange(degree, expected_dim):
+    "Test space dimensions of discontinuous Lagrange elements."
+    P = create_element(FiniteElement("DG", "triangle", degree))
+    assert P.space_dimension() == expected_dim
+
+ at pytest.mark.parametrize("degree, expected_dim",
+                         [(0, 3), (1, 9), (2, 18), (3, 30)])
+def test_regge(degree, expected_dim):
+    "Test space dimensions of generalized Regge element."
+    P = create_element(FiniteElement("Regge", "triangle", degree))
+    assert P.space_dimension() == expected_dim
+
+ at pytest.mark.parametrize("degree, expected_dim",
+                         [(0, 3), (1, 9), (2, 18), (3, 30)])
+def test_hhj(degree, expected_dim):
+    "Test space dimensions of Hellan-Herrmann-Johnson element."
+    P = create_element(FiniteElement("HHJ", "triangle", degree))
+    assert P.space_dimension() == expected_dim
+
+
+class TestFunctionValues():
+    """These tests examine tabulate gives the correct answers for a the
+supported (non-mixed) for low degrees"""
+
+    # FIXME: Add tests for NED and BDM/RT in 3D.
+
+    # Shape (basis) functions on reference element
+    reference_interval_1 = [lambda x: 1 - x[0], lambda x: x[0]]
+    reference_triangle_1 = [lambda x: 1 - x[0] - x[1], lambda x: x[0], lambda x: x[1]]
+    reference_tetrahedron_1 = [lambda x: 1 - x[0] - x[1] - x[2], lambda x: x[0],
+                               lambda x: x[1], lambda x: x[2]]
+    reference_triangle_bdm1 = [lambda x: (2*x[0], -x[1]),
+                               lambda x: (-x[0], 2*x[1]),
+                               lambda x: (2 - 2*x[0] - 3*x[1], x[1]),
+                               lambda x: (- 1 + x[0] + 3*x[1], - 2*x[1]),
+                               lambda x: (-x[0], -2 + 3*x[0] + 2*x[1]),
+                               lambda x: (2*x[0], 1 - 3*x[0] - x[1])]
+    reference_triangle_rt1 = [lambda x: (x[0], x[1]), lambda x: (1 - x[0], -x[1]),
+                              lambda x: (x[0], x[1] - 1)]
+    reference_triangle_rt2 = [lambda x: (-x[0] + 3*x[0]**2, -x[1] + 3*x[0]*x[1]),
+                              lambda x: (-x[0] + 3*x[0]*x[1], -x[1] + 3*x[1]**2),
+                              lambda x: ( 2 - 5*x[0] - 3*x[1] + 3*x[0]*x[1] + 3*x[0]**2,
+                                          -2*x[1] + 3*x[0]*x[1] + 3*x[1]**2),
+                              lambda x: (-1.0 + x[0] + 3*x[1] - 3*x[0]*x[1], x[1] - 3*x[1]**2),
+                              lambda x: (2*x[0] - 3*x[0]*x[1] - 3*x[0]**2,
+                                         -2 + 3*x[0]+ 5*x[1] - 3*x[0]*x[1] - 3*x[1]**2),
+                              lambda x: (- x[0] + 3*x[0]**2,
+                                       + 1 - 3*x[0] - x[1] + 3*x[0]*x[1]),
+                              lambda x: (6*x[0] - 3*x[0]*x[1] - 6*x[0]**2,
+                                         3*x[1] - 6*x[0]*x[1] - 3*x[1]**2),
+                              lambda x: (3*x[0] - 6*x[0]*x[1] - 3*x[0]**2,
+                                         6*x[1]- 3*x[0]*x[1] - 6*x[1]**2)]
+    reference_triangle_ned1 = [lambda x: (-x[1], x[0]), lambda x: ( x[1], 1 - x[0]),
+                               lambda x: ( 1 - x[1], x[0])]
+    reference_tetrahedron_rt1 = [lambda x: (-x[0], -x[1], -x[2]),
+                                 lambda x: (-1.0 + x[0], x[1], x[2]),
+                                 lambda x: (-x[0], 1.0 - x[1], -x[2]),
+                                 lambda x: ( x[0], x[1], -1.0 + x[2])]
+    reference_tetrahedron_bdm1 = [lambda x: (-3*x[0], x[1], x[2]),
+                                  lambda x: (x[0], -3*x[1], x[2]),
+                                  lambda x: (x[0], x[1], -3*x[2]),
+                                  lambda x: (-3.0 + 3*x[0] + 4*x[1] + 4*x[2], -x[1], -x[2]),
+                                  lambda x: (1.0 - x[0] - 4*x[1], 3*x[1], -x[2]),
+                                  lambda x: (1.0 - x[0] - 4*x[2], -x[1], 3*x[2]),
+                                  lambda x: (x[0], 3.0 - 4*x[0] - 3*x[1] - 4*x[2], x[2]),
+                                  lambda x: (-3*x[0], -1.0 + 4*x[0] + x[1], x[2]),
+                                  lambda x: (x[0], -1.0 + x[1] + 4*x[2], -3*x[2]),
+                                  lambda x: (-x[0], -x[1], -3.0 + 4*x[0] + 4*x[1] + 3*x[2]),
+                                  lambda x: (3*x[0], -x[1], 1.0 - 4*x[0] - x[2]),
+                                  lambda x: (-x[0], 3*x[1], 1.0 - 4*x[1] - x[2])]
+    reference_tetrahedron_ned1 = [lambda x: (0.0, -x[2], x[1]),
+                                  lambda x: (-x[2], 0.0,  x[0]),
+                                  lambda x: (-x[1],  x[0], 0.0),
+                                  lambda x: ( x[2], x[2], 1.0 - x[0] - x[1]),
+                                  lambda x: (x[1], 1.0 - x[0] - x[2], x[1]),
+                                  lambda x: (1.0 - x[1] - x[2], x[0], x[0])]
+
+
+    # Tests to perform
+    tests = [("Lagrange", "interval", 1, reference_interval_1),
+             ("Lagrange", "triangle", 1, reference_triangle_1),
+             ("Lagrange", "tetrahedron", 1, reference_tetrahedron_1),
+             ("Discontinuous Lagrange", "interval", 1, reference_interval_1),
+             ("Discontinuous Lagrange", "triangle", 1, reference_triangle_1),
+             ("Discontinuous Lagrange", "tetrahedron", 1, reference_tetrahedron_1),
+             ("Brezzi-Douglas-Marini", "triangle", 1, reference_triangle_bdm1),
+             ("Raviart-Thomas", "triangle", 1, reference_triangle_rt1),
+             ("Raviart-Thomas", "triangle", 2, reference_triangle_rt2),
+             ("Discontinuous Raviart-Thomas", "triangle", 1, reference_triangle_rt1),
+             ("Discontinuous Raviart-Thomas", "triangle", 2, reference_triangle_rt2),
+             ("N1curl", "triangle", 1, reference_triangle_ned1),
+             ("Raviart-Thomas", "tetrahedron", 1, reference_tetrahedron_rt1),
+             ("Discontinuous Raviart-Thomas", "tetrahedron", 1, reference_tetrahedron_rt1),
+             ("Brezzi-Douglas-Marini", "tetrahedron", 1, reference_tetrahedron_bdm1),
+             ("N1curl", "tetrahedron", 1, reference_tetrahedron_ned1),
+        ]
+
+
+    @pytest.mark.parametrize("family, cell, degree, reference", tests)
+    def test_values(self, family, cell, degree, reference):
+        # Create element
+        element = create_element(FiniteElement(family, cell, degree))
+
+        # Get some points and check basis function values at points
+        points = [random_point(element_coords(cell)) for i in range(5)]
+        for x in points:
+            table = element.tabulate(0, (x,))
+            basis = table[list(table.keys())[0]]
+            for i in range(len(basis)):
+                if not element.value_shape():
+                    assert round(float(basis[i]) - reference[i](x), 10) == 0.0
+                else:
+                    for k in range(element.value_shape()[0]):
+                        assert round(basis[i][k][0] - reference[i](x)[k] , 10) == 0.0
+
+
+class Test_JIT():
+
+    def test_poisson(self):
+        "Test that JIT compiler is fast enough."
+
+        # FIXME: Use local cache: cache_dir argument to instant.build_module
+        #options = {"log_level": INFO + 5}
+        #options = {"log_level": 5}
+        options = {"log_level": WARNING}
+
+        # Define two forms with the same signatures
+        element = FiniteElement("Lagrange", "triangle", 1)
+        v = TestFunction(element)
+        u = TrialFunction(element)
+        f = Coefficient(element)
+        g = Coefficient(element)
+        a0 = f*dot(grad(v), grad(u))*dx
+        a1 = g*dot(grad(v), grad(u))*dx
+
+        # Strange this needs to be done twice
+
+        # Compile a0 so it will be in the cache (both in-memory and disk)
+        jit(a0, options)
+        jit(a0, options)
+
+        # Compile a0 again (should be really fast, using in-memory cache)
+        t = time()
+        jit(a0, options)
+        dt0 = time() - t
+
+        # Compile a1 (should be fairly fast, using disk cache)
+        t = time()
+        jit(a1, options)
+        dt1 = time() - t
+
+        # Good values
+        dt0_good = 0.005
+        dt1_good = 0.01
+
+        print("\nJIT in-memory cache:", dt0)
+        print("JIT disk cache:     ", dt1)
+        print("Reasonable values are %g and %g" % (dt0_good, dt1_good))
+
+        # Check times
+        assert dt0 < 10*dt0_good
+        assert dt1 < 10*dt1_good
diff --git a/test/unit/pytest.ini b/test/unit/pytest.ini
new file mode 100644
index 0000000..ab18d0f
--- /dev/null
+++ b/test/unit/pytest.ini
@@ -0,0 +1,9 @@
+[pytest]
+# We use fixture features requiring pytest 2.4
+minversion = 2.4
+
+# Make pytest ignore temp folders
+norecursedirs = .* __pycache__ test_*_tempdir
+
+# Make pytest ignore utility .py files
+python_files = test_*.py
diff --git a/test/unit/symbolics/__init__.py b/test/unit/symbolics/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/test/unit/symbolics/test.py b/test/unit/symbolics/test.py
deleted file mode 100755
index b7d2904..0000000
--- a/test/unit/symbolics/test.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python
-"Test suite for the symbolic classes."
-
-# Copyright (C) 2009-2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2009-07-11
-# Last changed: 2010-02-01
-
-import unittest
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-
-# Import tests
-from .testfloat import TestFloat
-from .testsymbol import TestSymbol
-from .testproduct import TestProduct
-from .testsum import TestSum
-from .testfraction import TestFraction
-from .testfloatoperators import TestFloatOperators
-from .testsymboloperators import TestSymbolOperators
-from .testproductoperators import TestProductOperators
-from .testsumoperators import TestSumOperators
-from .testfractionoperators import TestFractionOperators
-from .testmixedsymbols import TestMixedSymbols
-from .testexpandoperations import TestExpandOperations
-from .testreducevartype import TestReduceVarType
-from .testreduceoperations import TestReduceOperations
-from .testnotfinished import TestNotFinished
-from .testdgelastodyn import TestDGElastoDyn
-from .testreducegip import TestReduceGIP
-from .testpoisson import TestPoisson
-from .testelasticity2d import TestElasticity2D
-from .testelasticityterm import TestElasticityTerm
-from .testelasweighted import TestElasWeighted
-from .testelasweighted2 import TestElasWeighted2
-from .testrealexamples import TestRealExamples
-
-class TestSingle(unittest.TestCase):
-
-    def testSingle(self):
-        "Run a single test."
-        expr =\
-Fraction(
-  Symbol('W1', GEO),
-  Sum([
-    Fraction(
-      Sum([
-        Symbol('F1', IP),
-        Symbol('F2', IP)
-      ]),
-      Sum([
-        Symbol('K_00', GEO), Symbol('K_01', GEO)
-      ])
-    ),
-    Fraction(
-      Sum([
-        Symbol('F3', IP),
-        Symbol('F4', IP)
-      ]),
-      Sum([
-        Symbol('K_10', GEO), Symbol('K_11', GEO),
-      ])
-    )
-  ])
-)
-#        print "RED: ", expr
-        red = expr.expand().reduce_vartype(IP)
-        red = expr.reduce_vartype(IP)
-#        red = expr.reduce_vartype(IP)
-#        print "expr: ", expr.expand()
-#        comb = Sum([Product([f, r]) for f,r in red]).expand()
-#        print "\nsum: \n", comb
-#        print "eval expr: ", eval(str(expr))
-#        print "eval comb: ", eval(str(comb))
-#        self.assertAlmostEqual(eval(str(expr)), eval(str(comb)))
-
-def suite():
-
-    suite = unittest.TestSuite()
-    # Classes and member functions
-    suite.addTest(TestFloat('testFloat'))
-    suite.addTest(TestSymbol('testSymbol'))
-    suite.addTest(TestProduct('testProduct'))
-    suite.addTest(TestSum('testSum'))
-    suite.addTest(TestFraction('testFraction'))
-    suite.addTest(TestFloatOperators('testFloatOperators'))
-    suite.addTest(TestSymbolOperators('testSymbolOperators'))
-    suite.addTest(TestProductOperators('testProductOperators'))
-    suite.addTest(TestSumOperators('testSumOperators'))
-    suite.addTest(TestFractionOperators('testFractionOperators'))
-    suite.addTest(TestMixedSymbols('testMixedSymbols'))
-    suite.addTest(TestExpandOperations('testExpandOperations'))
-    suite.addTest(TestReduceVarType('testReduceVarType'))
-    suite.addTest(TestReduceOperations('testReduceOperations'))
-
-    # Misc.
-    suite.addTest(TestNotFinished('testNotFinished'))
-
-    # 'Real' expressions (expand and reduce)
-    suite.addTest(TestDGElastoDyn('testDGElastoDyn'))
-    suite.addTest(TestReduceGIP('testReduceGIP'))
-    suite.addTest(TestPoisson('testPoisson'))
-    suite.addTest(TestElasticity2D('testElasticity2D'))
-
-    # 'Real' expressions (generate code)
-    suite.addTest(TestElasticityTerm('testElasticityTerm'))
-    suite.addTest(TestElasWeighted('testElasWeighted'))
-    suite.addTest(TestElasWeighted2('testElasWeighted2'))
-
-    # Various bug encounters
-    suite.addTest(TestRealExamples('testRealExamples'))
-
-    return suite
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(suite())
-#    runner.run(TestSingle('testSingle'))
-
diff --git a/test/unit/symbolics/test_dg_elastodyn.py b/test/unit/symbolics/test_dg_elastodyn.py
new file mode 100755
index 0000000..2fdd3f0
--- /dev/null
+++ b/test/unit/symbolics/test_dg_elastodyn.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testDGElastoDyn():
+    expr = Product([
+        Sum([
+            Symbol("F0", IP),
+            Symbol("F1", IP)
+        ]),
+        Fraction(
+            Symbol("w4", GEO),
+            Symbol("w3", GEO)
+        ),
+        Fraction(
+            Product([
+                Symbol("w2", GEO),
+                Symbol("w5", GEO)
+            ]),
+            Symbol("w6", GEO)
+        )
+    ])
+
+    expr_exp = expr.expand()
+
+    expr_red = expr_exp.reduce_ops()
+
+    F0, F1, w2, w3, w4, w5, w6 = (3.12, -8.1, -45.3, 17.5, 2.2, 5.3, 9.145)
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 10) == 0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 10) == 0
+    assert expr.ops() == 6
+    assert expr_exp.ops() == 11
+    assert expr_red.ops() == 6
diff --git a/test/unit/symbolics/test_elas_weighted.py b/test/unit/symbolics/test_elas_weighted.py
new file mode 100755
index 0000000..31ae2c0
--- /dev/null
+++ b/test/unit/symbolics/test_elas_weighted.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testElasWeighted():
+    expr = Product([
+        Symbol('W4', IP),
+        Sum([
+            Product([
+                Symbol('FE0_C1_D01_ip_j', BASIS),
+                Symbol('FE0_C1_D01_ip_k', BASIS),
+                Symbol('Jinv_00', GEO),
+                Symbol('w1', GEO)
+            ]),
+            Product([
+                Symbol('FE0_C1_D01_ip_j', BASIS),
+                Symbol('FE0_C1_D01_ip_k', BASIS),
+                Symbol('Jinv_01', GEO),
+                Symbol('w0', GEO)
+            ]),
+            Product([
+                Symbol('w2', GEO),
+                Sum([
+                    Product([
+                        Symbol('FE0_C1_D01_ip_j', BASIS),
+                        Symbol('FE0_C1_D01_ip_k', BASIS),
+                        Symbol('Jinv_00', GEO),
+                        Symbol('w1', GEO)
+                    ]),
+                    Product([
+                        Symbol('FE0_C1_D01_ip_j', BASIS),
+                        Symbol('FE0_C1_D01_ip_k', BASIS),
+                        Symbol('Jinv_01', GEO),
+                        Symbol('w0', GEO)
+                    ])
+                ])
+            ])
+        ])
+    ])
+
+    expr_exp = expr.expand()
+    expr_red = expr_exp.reduce_ops()
+
+    det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)]
+    assert round(eval(str(expr)) - eval(str(expr_exp))) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red))) == 0.0
+    assert expr.ops() == 17
+    assert expr_exp.ops() == 21
+    assert expr_red.ops() == 10
+
+    # Generate code
+    ip_consts = {}
+    geo_consts = {}
+    trans_set = set()
+
+    start = time.time()
+    opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
+
+    G = [eval(str(list(geo_consts.items())[0][0]))]
+    I = [eval(str(list(ip_consts.items())[0][0]))]
+    assert round(eval(str(expr)) - eval(str(opt_code))) == 0.0
diff --git a/test/unit/symbolics/test_elas_weighted2.py b/test/unit/symbolics/test_elas_weighted2.py
new file mode 100755
index 0000000..a871027
--- /dev/null
+++ b/test/unit/symbolics/test_elas_weighted2.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testElasWeighted2():
+
+    expr = Product([
+        Symbol('W4', IP),
+        Sum([
+            Product([
+                Symbol('FE0_C1_D01_ip_j', BASIS),
+                Symbol('FE0_C1_D01_ip_k', BASIS),
+                Symbol('Jinv_00', GEO),
+                Symbol('w1', GEO)
+            ]),
+            Product([
+                Symbol('FE0_C1_D01_ip_j', BASIS),
+                Symbol('Jinv_01', GEO),
+                Sum([
+                    Product([
+                        Symbol('FE0_C1_D01_ip_k', BASIS),
+                        Symbol('w0', GEO)
+                    ]),
+                    Product([
+                        Symbol('FE0_C1_D01_ip_k', BASIS),
+                        Symbol('w1', GEO)
+                    ])
+                ])
+            ]),
+            Product([
+                Symbol('w2', GEO),
+                Sum([
+                    Product([
+                        Symbol('FE0_C1_D01_ip_j', BASIS),
+                        Symbol('FE0_C1_D01_ip_k', BASIS),
+                        Symbol('Jinv_00', GEO),
+                        Symbol('w1', GEO)
+                    ]),
+                    Product([
+                        Symbol('FE0_C1_D01_ip_j', BASIS),
+                        Symbol('Jinv_01', GEO),
+                        Sum([
+                            Product([
+                                    Symbol('FE0_C1_D01_ip_k', BASIS),
+                                    Symbol('w0', GEO)
+                                    ]),
+                            Product([
+                                Symbol('FE0_C1_D01_ip_k', BASIS),
+                                Symbol('w1', GEO)
+                            ])
+                        ])
+                    ])
+                ])
+            ])
+        ])
+    ])
+
+    expr_exp = expr.expand()
+    expr_red = expr_exp.reduce_ops()
+
+    det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)]
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 10) == 0.0
+    assert expr.ops() == 21
+    assert expr_exp.ops() == 32
+    assert expr_red.ops() == 12
+
+    # Generate code
+    ip_consts = {}
+    geo_consts = {}
+    trans_set = set()
+
+    start = time.time()
+    opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
+
+    G = [eval(str(list(geo_consts.items())[0][0]))]
+    I = [eval(str(list(ip_consts.items())[0][0]))]
+    assert round(eval(str(expr)) - eval(str(opt_code)), 10) == 0.0
diff --git a/test/unit/symbolics/test_elasticity_2d.py b/test/unit/symbolics/test_elasticity_2d.py
new file mode 100755
index 0000000..ac2a91a
--- /dev/null
+++ b/test/unit/symbolics/test_elasticity_2d.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testElasticity2D():
+    elasticity = """(((Jinv_00*FE0_C0_D10_ip_j + Jinv_10*FE0_C0_D01_ip_j)*2*(Jinv_00*FE0_C0_D10_ip_k + Jinv_10*FE0_C0_D01_ip_k)*2 + ((Jinv_00*FE0_C1_D10_ip_j + Jinv_10*FE0_C1_D01_ip_j) + (Jinv_01*FE0_C0_D10_ip_j + Jinv_11*FE0_C0_D01_ip_j))*((Jinv_00*FE0_C1_D10_ip_k + Jinv_10*FE0_C1_D01_ip_k) + (Jinv_01*FE0_C0_D10_ip_k + Jinv_11*FE0_C0_D01_ip_k))) + ((Jinv_01*FE0_C1_D10_ip_j + Jinv_11*FE0_C1_D01_ip_j)*2*(Jinv_01*FE0_C1_D10_ip_k + Jinv_11*FE0_C1_D01_ip_k)*2 + ((Jinv_01*FE0_C0_D10_ip_j + Ji [...]
+
+    expr = Product([
+        Sum([
+            Sum([
+                Product([
+                    Sum([
+                        Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]),
+                        Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
+                    ]),
+                    FloatValue(2),
+                    Sum([
+                        Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]),
+                        Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
+                    ]),
+                    FloatValue(2)
+                ]),
+                Product([
+                        Sum([
+                            Sum([
+                                Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]),
+                                Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
+                                ]),
+                            Sum([
+                                Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]),
+                                Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
+                                ])
+                            ]),
+                Sum([
+                    Sum([
+                        Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]),
+                        Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
+                    ]),
+                    Sum([
+                        Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]),
+                        Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
+                    ])
+                ])
+                ])
+            ]),
+            Sum([
+                Product([
+                    Sum([
+                        Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]),
+                        Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
+                    ]),
+                    FloatValue(2),
+                    Sum([
+                        Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]),
+                        Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
+                    ]),
+                    FloatValue(2)
+                ]),
+                Product([
+                    Sum([
+                        Sum([
+                            Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]),
+                            Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
+                        ]),
+                        Sum([
+                            Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]),
+                            Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
+                        ])
+                    ]),
+                    Sum([
+                        Sum([
+                            Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]),
+                            Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
+                        ]),
+                        Sum([
+                            Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]),
+                            Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
+                        ])
+                    ])
+                ])
+            ])
+        ]),
+        FloatValue(0.25),
+        Symbol("W4_ip", IP),
+        Symbol("det", GEO)
+    ])
+
+    expr_exp = expr.expand()
+    elasticity_exp = expand_operations(elasticity, format)
+    expr_red = expr_exp.reduce_ops()
+    elasticity_red = reduce_operations(elasticity, format)
+    elasticity_exp_ops = operation_count(elasticity_exp, format)
+    elasticity_red_ops = operation_count(elasticity_red, format)
+
+    Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3)
+    FE0_C0_D01_ip_j, FE0_C0_D10_ip_j, FE0_C0_D01_ip_k, FE0_C0_D10_ip_k = (1.12, 5.7, -9.3, 7.4)
+    FE0_C1_D01_ip_j, FE0_C1_D10_ip_j, FE0_C1_D01_ip_k, FE0_C1_D10_ip_k = (3.12, -8.1, -45.3, 17.5)
+
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 8) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 8) == 0.0
+    assert round(eval(str(expr)) - eval(str(elasticity)), 8) == 0.0
+    assert round(eval(str(expr)) - eval(str(elasticity_exp)), 8) == 0.0
+    assert round(eval(str(expr)) - eval(str(elasticity_red)), 8) == 0.0
+    assert expr.ops() == 52
+    assert elasticity_exp_ops == 159
+    assert expr_exp.ops() == 159
+    assert elasticity_red_ops == 71
+    assert expr_red.ops() == 71
diff --git a/test/unit/symbolics/test_elasticity_term.py b/test/unit/symbolics/test_elasticity_term.py
new file mode 100755
index 0000000..8bb45c3
--- /dev/null
+++ b/test/unit/symbolics/test_elasticity_term.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def test_Elasticity_Term():
+    # expr:  0.25*W1*det*(FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21 + FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21)
+    expr = Product([
+        FloatValue(0.25), Symbol('W1', GEO), Symbol('det', GEO),
+        Sum([Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS),
+                      Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)]),
+             Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS),
+                      Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)])
+             ])
+    ])
+
+    expr_exp = expr.expand()
+    expr_red = expr_exp.reduce_ops()
+
+    det, W1, Jinv_00, Jinv_21, FE0_C2_D001_0_j, FE0_C2_D001_0_k = [0.123 + i for i in range(6)]
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 10) == 0.0
+    assert expr.ops() == 10
+    assert expr_exp.ops() == 6
+    assert expr_red.ops() == 6
+
+    # Generate code
+    ip_consts = {}
+    geo_consts = {}
+    trans_set = set()
+
+    start = time.time()
+    opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
+
+    G = [eval(str(list(geo_consts.items())[0][0]))]
+    assert round(eval(str(expr)) - eval(str(opt_code))) == 0.0
diff --git a/test/unit/symbolics/test_expand_operations.py b/test/unit/symbolics/test_expand_operations.py
new file mode 100755
index 0000000..2c4be30
--- /dev/null
+++ b/test/unit/symbolics/test_expand_operations.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testExpandOperations():
+    f0 = FloatValue(-1)
+    f1 = FloatValue(2)
+    f2 = FloatValue(1)
+    sx = Symbol("x", GEO)
+    sy = Symbol("y", GEO)
+    sz = Symbol("z", GEO)
+    s0 = Product([FloatValue(-1), Symbol("x", GEO)])
+    s1 = Symbol("y", GEO)
+    s2 = Product([FloatValue(5), Symbol("z", IP)])
+    s3 = Product([FloatValue(-4), Symbol("z", GEO)])
+
+    # Random variable values
+    x = 2.2
+    y = -0.2
+    z = 1.1
+
+    # Aux. expressions
+    P0 = Product([s2, s1])
+    P1 = Product([P0, s0])
+    P2 = Product([P1, s1, P0])
+    P3 = Product([P1, P2])
+
+    S0 = Sum([s2, s1])
+    S1 = Sum([S0, s0])
+    S2 = Sum([S1, s1, S0])
+    S3 = Sum([S1, S2])
+
+    F0 = Fraction(s2, s1)
+    F1 = Fraction(F0, s0)
+    F2 = Fraction(F1, F0)
+    F3 = Fraction(F1, F2)
+
+    # Special fractions
+    F4 = Fraction(P0, F0)
+    F5 = Fraction(Fraction(s0, P0), P0)
+    F6 = Fraction(Fraction(Fraction(s1, s0), Fraction(s1, s2)), Fraction(Fraction(s2, s0), Fraction(s1, s0)))
+    F7 = Fraction(s1, Product([s1, Symbol("x", GEO)]))
+    F8 = Fraction(Sum([sx, Fraction(sy, sx)]), FloatValue(2))
+
+    F4x = F4.expand()
+    F5x = F5.expand()
+    F6x = F6.expand()
+    F7x = F7.expand()
+    F8x = F8.expand()
+
+    assert round(eval(str(F4)) - eval(str(F4x)), 10) == 0.0
+    assert round(eval(str(F5)) - eval(str(F5x)), 10) == 0.0
+    assert round(eval(str(F6)) - eval(str(F6x)), 10) == 0.0
+    assert round(eval(str(F7)) - eval(str(F7x)), 10) == 0.0
+    assert round(eval(str(F8)) - eval(str(F8x)), 10) == 0.0
+
+    assert F4.ops() == 5
+    assert F4x.ops() == 1
+    assert F5.ops() == 6
+    assert F5x.ops() == 5
+    assert F6.ops() == 9
+    assert F6x.ops() == 1
+    assert F7.ops() == 2
+    assert F7x.ops() == 1
+    assert F8.ops() == 3
+    assert F8x.ops() == 4
+
+    # Expressions that should be expanded
+    e0 = Product([P3, F2])
+    e1 = Product([S3, P2])
+    e2 = Product([F3, S1])
+
+    e3 = Sum([P3, F2])
+    e4 = Sum([S3, P2])
+    e5 = Sum([F3, S1])
+
+    e6 = Fraction(P3, F2)
+    e7 = Fraction(S3, P2)
+    e8 = Fraction(F3, S1)
+    e9 = Fraction(S0, s0)
+
+    e0x = e0.expand()
+    e1x = e1.expand()
+    e2x = e2.expand()
+    e3x = e3.expand()
+    e4x = e4.expand()
+    e5x = e5.expand()
+    e6x = e6.expand()
+    e7x = e7.expand()
+    e8x = e8.expand()
+    e9x = e9.expand()
+
+    assert round(eval(str(e0)) - eval(str(e0x)), 10) == 0.0
+    assert round(eval(str(e1)) - eval(str(e1x)), 10) == 0.0
+    assert round(eval(str(e2)) - eval(str(e2x)), 10) == 0.0
+    assert round(eval(str(e3)) - eval(str(e3x)), 10) == 0.0
+    assert round(eval(str(e4)) - eval(str(e4x)), 10) == 0.0
+    assert round(eval(str(e5)) - eval(str(e5x)), 10) == 0.0
+    assert round(eval(str(e6)) - eval(str(e6x)), 10) == 0.0
+    assert round(eval(str(e7)) - eval(str(e7x)), 10) == 0.0
+    assert round(eval(str(e8)) - eval(str(e8x)), 10) == 0.0
+    assert round(eval(str(e9)) - eval(str(e9x)), 10) == 0.0
+
+    assert e0.ops() == 16
+    assert e0x.ops() == 8
+    assert e1.ops() == 18
+    assert e1x.ops() == 23
+    assert e2.ops() == 14
+    assert e2x.ops() == 9
+    assert e3.ops() == 16
+    assert e3x.ops() == 11
+    assert e4.ops() == 18
+    assert e4x.ops() == 12
+    assert e5.ops() == 14
+    assert e5x.ops() == 6
+    assert e6.ops() == 16
+    assert e6x.ops() == 10
+    assert e7.ops() == 18
+    assert e7x.ops() == 17
+    assert e8.ops() == 14
+    assert e8x.ops() == 8
+    assert e9.ops() == 3
+    assert e9x.ops() == 4
+
+    # More expressions (from old expand tests)
+    PF = Product([F0, F1])
+    E0 = Product([s1, f0, S1])
+    E1 = Sum([P0, E0])
+    E2 = Fraction(Sum([Product([f1])]), f2)
+    E3 = Sum([F0, F0])
+    E4 = Product([Sum([Product([sx, Sum([sy, Product([Sum([sy, Product([sy, sz]), sy])]), sy])]),
+                       Product([sx, Sum([Product([sy, sz]), sy])])])])
+    P4 = Product([s1, Sum([s0, s1])])
+    P5 = Product([s0, E0])
+    P6 = Product([s1])
+    S4 = Sum([s1])
+
+    # Create 'real' term that caused me trouble
+    P00 = Product([Symbol("Jinv_00", GEO)] * 2)
+    P01 = Product([Symbol("Jinv_01", GEO)] * 2)
+    P20 = Product([Symbol("Jinv_00", GEO),
+                   Product([f1, Symbol("Jinv_20", GEO)])])
+    P21 = Product([Symbol("Jinv_01", GEO),
+                   Product([f1, Symbol("Jinv_21", GEO)])])
+    PS0 = Product([Symbol("Jinv_22", GEO),
+                   Sum([P00, P01])])
+    PS1 = Product([Product([f0, Symbol("Jinv_02", GEO)]),
+                   Sum([P20, P21])])
+    SP = Sum([PS0, PS1])
+
+    PFx = PF.expand()
+    E0x = E0.expand()
+    E1x = E1.expand()
+    E2x = E2.expand()
+    E3x = E3.expand()
+    E4x = E4.expand()
+    P4x = P4.expand()
+    P5x = P5.expand()
+    P6x = P6.expand()
+    S4x = S4.expand()
+    SPx = SP.expand()
+
+    Jinv_00, Jinv_01, Jinv_10, Jinv_02, Jinv_20, Jinv_22, Jinv_21, W1, det = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+
+    assert round(eval(str(SP)) - eval(str(SPx)), 10) == 0.0
+    assert round(eval(str(E0)) - eval(str(E0x)), 10) == 0.0
+    assert round(eval(str(E1)) - eval(str(E1x)), 10) == 0.0
+    assert round(eval(str(E2)) - eval(str(E2x)), 10) == 0.0
+    assert round(eval(str(E3)) - eval(str(E3x)), 10) == 0.0
+    assert round(eval(str(E4)) - eval(str(E4x)), 10) == 0.0
+    assert round(eval(str(SP)) - eval(str(SPx)), 10) == 0.0
+    assert round(eval(str(P4)) - eval(str(P4x)), 10) == 0.0
+    assert round(eval(str(P5)) - eval(str(P5x)), 10) == 0.0
+    assert P6x == s1
+    assert S4x == s1
+    assert PF.ops() == 6
+    assert PFx.ops() == 5
+    assert E0.ops() == 4
+    assert E0x.ops() == 6
+    assert E1.ops() == 7
+    assert E1x.ops() == 3
+    assert E2.ops() == 1
+    assert E2x.ops() == 0
+    assert E3.ops() == 5
+    assert E3x.ops() == 5
+    assert E4.ops() == 10
+    assert E4x.ops() == 6
+    assert SP.ops() == 11
+    assert SPx.ops() == 13
+    assert P4.ops() == 2
+    assert P4x.ops() == 3
+    assert P5.ops() == 5
+    assert P5x.ops() == 9
diff --git a/test/unit/symbolics/test_float.py b/test/unit/symbolics/test_float.py
new file mode 100755
index 0000000..5523165
--- /dev/null
+++ b/test/unit/symbolics/test_float.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS["precision"])
+
+
+def testFloat():
+    "Test simple FloatValue instance."
+    f0 = FloatValue(1.5)
+    f1 = FloatValue(-5)
+    f2 = FloatValue(-1e-14)
+    f3 = FloatValue(-1e-11)
+    f4 = FloatValue(1.5)
+
+    assert repr(f0) == "FloatValue(%s)" % format["float"](1.5)
+    assert repr(f1) == "FloatValue(%s)" % format["float"](-5)
+    assert repr(f2) == "FloatValue(%s)" % format["float"](0)
+    assert repr(f3) == "FloatValue(%s)" % format["float"](-1e-11)
+
+    assert f2.val == 0
+    assert not f3.val == 0
+
+    assert f0.ops() == 0
+    assert f1.ops() == 0
+    assert f2.ops() == 0
+    assert f3.ops() == 0
+
+    assert f0 == f4
+    assert f1 != f3
+    assert not f0 < f1
+    assert f2 > f3
+
+    # Test hash
+    l = [f0]
+    d = {f0: 0}
+    assert f0 in l
+    assert f0 in d
+    assert f4 in l
+    assert f4 in d
+    assert f1 not in l
+    assert f1 not in d
diff --git a/test/unit/symbolics/test_float_operators.py b/test/unit/symbolics/test_float_operators.py
new file mode 100755
index 0000000..fd95ce6
--- /dev/null
+++ b/test/unit/symbolics/test_float_operators.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import error, push_level, pop_level, CRITICAL
+
+
+def test_Float_Operators():
+    "Test binary operators"
+
+    f0 = FloatValue(0.0)
+    f2 = FloatValue(2.0)
+    f3 = FloatValue(3.0)
+    fm1 = FloatValue(-1.0)
+    fm3 = FloatValue(-3.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+    z = Symbol("z", GEO)
+
+    p0 = Product([f2, x])
+    p1 = Product([x, y])
+    p2 = Product([f2, z])
+    p3 = Product([y, x, z])
+    p4 = Product([fm1, f2, x])
+
+    S0 = Sum([p0, fm3])
+    S1 = Sum([x, y])
+    S2 = Sum([S1, fm3])
+    S3 = Sum([p4, fm3])
+    S4 = Sum([fm3, Product([fm1, Sum([x, y])])])
+
+    F0 = Fraction(f2, y)
+    F1 = Fraction(FloatValue(-1.5), x)
+    F2 = Fraction(fm3, S1)
+
+    SF0 = Sum([f3, F1])
+    SF1 = Sum([f3, Product([fm1, F1])])
+
+    # Test FloatValue '+'
+    assert str(f2 + fm3) == str(fm1)
+    assert str(f2 + fm3 + fm3 + f2 + f2) == str(f0)
+    assert str(f0 + p0) == str(p0)
+    assert str(fm3 + p0) == str(S0)
+    assert str(fm3 + S1) == str(S2)
+    assert str(f3 + F1) == str(SF0)
+
+    # Test FloatValue '-'
+    assert str(f2 - fm3) == str(FloatValue(5))
+    assert str(f0 - p0) == str(p4)
+    assert str(fm3 - p0) == str(S3)
+    assert str(fm3 - S1) == str(S4)
+    assert str(f3 - F1) == str(SF1)
+
+    # Test FloatValue '*', only need one because all other cases are
+    # handled by 'other'
+    assert str(f2 * f2) == '%s' % format["float"](4)
+
+    # Test FloatValue '/'
+    assert str(fm3 / f2) == str(FloatValue(-1.5))
+    assert str(f2 / y) == str(F0)
+    assert str(fm3 / p0) == str(F1)
+    assert str(fm3 / S1) == str(F2)
+
+    # Silence output
+    push_level(CRITICAL)
+    with pytest.raises(Exception):
+        truediv(f2, F0)
+    with pytest.raises(Exception):
+        truediv(f2, f0)
+    with pytest.raises(Exception):
+        truediv(f2, Product([f0, y]))
+    pop_level()
diff --git a/test/unit/symbolics/test_fraction.py b/test/unit/symbolics/test_fraction.py
new file mode 100755
index 0000000..c5da92b
--- /dev/null
+++ b/test/unit/symbolics/test_fraction.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import push_level, pop_level, CRITICAL
+
+
+def testFraction():
+    "Test simple fraction instance."
+
+    f0 = FloatValue(-2.0)
+    f1 = FloatValue(3.0)
+    f2 = FloatValue(0)
+    s0 = Symbol("x", BASIS)
+    s1 = Symbol("y", GEO)
+
+    F0 = Fraction(f1, f0)
+    F1 = Fraction(f2, f0)
+    F2 = Fraction(s0, s1)
+    F3 = Fraction(s0, f1)
+    F4 = Fraction(f0, s1)
+    F5 = Fraction(f2, s1)
+    F6 = Fraction(s0, s1)
+
+    # Silence output
+    push_level(CRITICAL)
+    with pytest.raises(Exception):
+        Fraction(f0, f2)
+    with pytest.raises(Exception):
+        Fraction(s0, f2)
+    pop_level()
+
+    assert repr(F0) == "Fraction(FloatValue(%s), FloatValue(%s))"\
+        % (format["float"](-1.5), format["float"](1))
+    assert repr(F2) == "Fraction(Symbol('x', BASIS), Symbol('y', GEO))"
+
+    assert str(F0) == "%s" % format["float"](-1.5)
+    assert str(F1) == "%s" % format["float"](0)
+    assert str(F2) == "x/y"
+    assert str(F3) == "x/%s" % format["float"](3)
+    assert str(F4) == "-%s/y" % format["float"](2)
+    assert str(F5) == "%s" % format["float"](0)
+
+    assert F2 == F2
+    assert F2 != F3
+    assert F5 != F4
+    assert F2 == F6
+
+    assert F0.ops() == 0
+    assert F1.ops() == 0
+    assert F2.ops() == 1
+    assert F3.ops() == 1
+    assert F4.ops() == 1
+    assert F5.ops() == 0
+
+    # Test hash
+    l = [F2]
+    d = {F2: 0}
+
+    assert F2 in l
+    assert F2 in d
+    assert F6 in l
+    assert F6 in d
diff --git a/test/unit/symbolics/test_fraction_operators.py b/test/unit/symbolics/test_fraction_operators.py
new file mode 100755
index 0000000..4a41437
--- /dev/null
+++ b/test/unit/symbolics/test_fraction_operators.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import error, push_level, pop_level, CRITICAL
+
+
+def testFractionOperators():
+    "Test binary operators"
+
+    f_0 = format["float"](0)
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+    f_5 = format["float"](5)
+
+    f2 = FloatValue(2.0)
+    fm3 = FloatValue(-3.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+
+    p0 = Product([f2, x])
+    p1 = Product([x, y])
+
+    S0 = Sum([x, y])
+
+    F0 = Fraction(f2, y)
+    F1 = Fraction(x, y)
+    F2 = Fraction(x, S0)
+    F3 = Fraction(x, y)
+    F4 = Fraction(p0, y)
+    F5 = Fraction(Product([fm3, x]), y)
+
+    # Test Fraction '+'
+    assert str(F0 + f2) == '(%s + %s/y)' % (f_2, f_2)
+    assert str(F1 + x) == '(x + x/y)'
+    assert str(F1 + p0) == '(%s*x + x/y)' % f_2
+    assert str(F1 + S0) == '(x + y + x/y)'
+    assert str(F1 + F3) == '%s*x/y' % f_2
+    assert str(F0 + F1) == '(%s + x)/y' % f_2
+    assert str(F2 + F4) == '(%s*x/y + x/(x + y))' % f_2
+
+    # Test Fraction '-'
+    assert str(F0 - f2) == '(%s/y-%s)' % (f_2, f_2)
+    assert str(F1 - x) == '(x/y - x)'
+    assert str(F1 - p0) == '(x/y-%s*x)' % f_2
+    assert str(F1 - S0) == '(x/y - (x + y))'
+    assert str(F1 - F3) == '%s' % f_0
+    assert str(F4 - F1) == 'x/y'
+    assert str(F4 - F5) == '%s*x/y' % f_5
+    assert str(F0 - F1) == '(%s - x)/y' % f_2
+    assert str(F2 - F4) == '(x/(x + y) - %s*x/y)' % f_2
+
+    # Test Fraction '*'
+    assert str(F1 * f2) == '%s*x/y' % f_2
+    assert str(F1 * x) == 'x*x/y'
+    assert str(F1 * p1) == 'x*x'
+    assert str(F1 * S0) == '(x + x*x/y)'
+    assert repr(F1 * S0) == repr(Sum([x, Fraction(Product([x, x]), y)]))
+    assert str(F1 * F0) == '%s*x/(y*y)' % f_2
+
+    # Test Fraction '/'
+    assert str(F0 / f2) == '%s/y' % f_1
+    assert str(F1 / x) == '%s/y' % f_1
+    assert str(F4 / p1) == '%s/(y*y)' % f_2
+    assert str(F4 / x) == '%s/y' % f_2
+    assert str(F2 / y) == 'x/(x*y + y*y)'
+    assert str(F0 / S0) == '%s/(x*y + y*y)' % f_2
+
+    with pytest.raises(Exception):
+        truediv(F0 / F0)
diff --git a/test/unit/symbolics/test_mixed_symbols.py b/test/unit/symbolics/test_mixed_symbols.py
new file mode 100755
index 0000000..7ddc807
--- /dev/null
+++ b/test/unit/symbolics/test_mixed_symbols.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testMixedSymbols():
+
+    f_0 = format["float"](0)
+    f_2 = format["float"](2)
+    f_3 = format["float"](3)
+    f_4 = format["float"](4)
+    f_6 = format["float"](6)
+
+    f0 = FloatValue(-2.0)
+    f1 = FloatValue(3.0)
+    f2 = FloatValue(0)
+
+    s0 = Symbol("x", BASIS)
+    s1 = Symbol("y", GEO)
+    s2 = Symbol("z", GEO)
+
+    p0 = Product([s0, s1])
+    p1 = Product([f1, s0, s1])
+    p2 = Product([s0, f2, s2])
+    p3 = Product([s0, f0, s1, f1, s2])
+
+    S0 = Sum([s0, s1])
+    S1 = Sum([s0, s0])
+    S2 = Sum([f0, s0])
+    S3 = Sum([s0, f0, s0])
+
+    F0 = Fraction(f1, f0)
+    F1 = Fraction(s0, s1)
+    F2 = Fraction(s0, f1)
+    F3 = Fraction(f0, s1)
+
+    x = 1.2
+    y = 2.36
+    z = 6.75
+    # Mixed products
+    mpp0 = Product([p0, s0])
+    mpp1 = Product([p1, p0])
+    mpp2 = Product([p2, p3])
+    mpp3 = Product([p1, mpp1])
+
+    mps0 = Product([S0, s0])
+    mps1 = Product([S1, S0])
+    mps2 = Product([S2, S3])
+    mps3 = Product([S1, mps1])
+
+    mpf0 = Product([F1, s0])
+    mpf1 = Product([F1, F2])
+    mpf2 = Product([F2, F3])
+    mpf3 = Product([F1, mpf1])
+
+    assert round(eval(str(mpp0)) - eval(str(p0)) * eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mpp1)) - eval(str(p1)) * eval(str(p0)), 10) == 0.0
+    assert round(eval(str(mpp2)) - eval(str(p2)) * eval(str(p3)), 10) == 0.0
+    assert round(eval(str(mpp3)) - eval(str(p1)) * eval(str(mpp1)), 10) == 0.0
+
+    assert round(eval(str(mps0)) - eval(str(S0)) * eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mps1)) - eval(str(S1)) * eval(str(S0)), 10) == 0.0
+    assert round(eval(str(mps2)) - eval(str(S2)) * eval(str(S3)), 10) == 0.0
+    assert round(eval(str(mps3)) - eval(str(S1)) * eval(str(mps1)), 10) == 0.0
+
+    assert round(eval(str(mpf0)) - eval(str(F1)) * eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mpf1)) - eval(str(F1)) * eval(str(F2)), 10) == 0.0
+    assert round(eval(str(mpf2)) - eval(str(F2)) * eval(str(F3)), 10) == 0.0
+    assert round(eval(str(mpf3)) - eval(str(F1)) * eval(str(mpf1)), 10) == 0.0
+
+    assert mpp0.ops() == 2
+    assert mpp1.ops() == 4
+    assert mpp2.ops() == 0
+    assert mpp3.ops() == 6
+
+    assert mps0.ops() == 2
+    assert mps1.ops() == 3
+    assert mps2.ops() == 4
+    assert mps3.ops() == 5
+
+    assert mpf0.ops() == 2
+    assert mpf1.ops() == 3
+    assert mpf2.ops() == 3
+    assert mpf3.ops() == 5
+
+    assert str(mpp0) == 'x*x*y'
+    assert str(mpp1) == '%s*x*x*y*y' % f_3
+    assert str(mpp2) == '%s' % f_0
+    assert str(mpp3) == '%s*x*x*x*y*y*y' % format["float"](9)
+    assert str(mps0) == 'x*(x + y)'
+    assert str(mps1) == '(x + x)*(x + y)'
+    assert str(mps2) == '(x + x-%s)*(x-%s)' % (f_2, f_2)
+    assert str(mps3) == '(x + x)*(x + x)*(x + y)'
+    assert str(mpf0) == 'x*x/y'
+    assert str(mpf1) == 'x/%s*x/y' % f_3
+    assert str(mpf2) == '-%s/y*x/%s' % (f_2, f_3)
+    assert str(mpf3) == 'x/%s*x/y*x/y' % f_3
+
+    # Mixed sums
+    msp0 = Sum([p0, s0])
+    msp1 = Sum([p1, p0])
+    msp2 = Sum([p2, p3])
+    msp3 = Sum([p1, msp1])
+    msp4 = Sum([f2, f2])
+
+    mss0 = Sum([S0, s0])
+    mss1 = Sum([S1, S0])
+    mss2 = Sum([S2, S3])
+    mss3 = Sum([S1, mps1])
+
+    msf0 = Sum([F1, s0])
+    msf1 = Sum([F1, F2])
+    msf2 = Sum([F2, F3])
+    msf3 = Sum([F1, msf1])
+
+    assert round(eval(str(msp0)) - (eval(str(p0)) + eval(str(s0))), 10) == 0.0
+    assert round(eval(str(msp1)) - (eval(str(p1)) + eval(str(p0))), 10) == 0.0
+    assert round(eval(str(msp2)) - (eval(str(p2)) + eval(str(p3))), 10) == 0.0
+    assert round(eval(str(msp3)) - (eval(str(p1)) + eval(str(msp1))), 10) == 0.0
+    assert str(msp4) == '%s' % f_0
+
+    assert round(eval(str(mss0)) - (eval(str(S0)) + eval(str(s0))), 10) == 0.0
+    assert round(eval(str(mss1)) - (eval(str(S1)) + eval(str(S0))), 10) == 0.0
+    assert round(eval(str(mss2)) - (eval(str(S2)) + eval(str(S3))), 10) == 0.0
+    assert round(eval(str(mss3)) - (eval(str(S1)) + eval(str(mps1))), 10) == 0.0
+
+    assert round(eval(str(msf0)) - (eval(str(F1)) + eval(str(s0))), 10) == 0.0
+    assert round(eval(str(msf1)) - (eval(str(F1)) + eval(str(F2))), 10) == 0.0
+    assert round(eval(str(msf2)) - (eval(str(F2)) + eval(str(F3))), 10) == 0.0
+    assert round(eval(str(msf3)) - (eval(str(F1)) + eval(str(msf1))), 10) == 0.0
+
+    assert msp0.ops() == 2
+    assert msp1.ops() == 4
+    assert msp2.ops() == 3
+    assert msp3.ops() == 7
+
+    assert mss0.ops() == 2
+    assert mss1.ops() == 3
+    assert mss2.ops() == 3
+    assert mss3.ops() == 5
+
+    assert msf0.ops() == 2
+    assert msf1.ops() == 3
+    assert msf2.ops() == 3
+    assert msf3.ops() == 5
+
+    assert str(msp0) == '(x + x*y)'
+    assert str(msp1) == '(%s*x*y + x*y)' % f_3
+    assert str(msp2) == '-%s*x*y*z' % f_6
+    assert str(msp3) == '(%s*x*y + %s*x*y + x*y)' % (f_3, f_3)
+    assert str(mss0) == '(x + x + y)'
+    assert str(mss1) == '(x + x + x + y)'
+    assert str(mss2) == '(x + x + x-%s)' % f_4
+    assert str(mss3) == '(x + x + (x + x)*(x + y))'
+    assert str(msf0) == '(x + x/y)'
+    assert str(msf1) == '(x/%s + x/y)' % f_3
+    assert str(msf2) == '(x/%s-%s/y)' % (f_3, f_2)
+    assert str(msf3) == '(x/%s + x/y + x/y)' % f_3
+
+    # Mixed fractions
+    mfp0 = Fraction(p0, s0)
+    mfp1 = Fraction(p1, p0)
+    mfp2 = Fraction(p2, p3)
+    mfp3 = Fraction(p1, mfp1)
+
+    mfs0 = Fraction(S0, s0)
+    mfs1 = Fraction(S1, S0)
+    mfs2 = Fraction(S2, S3)
+    mfs3 = Fraction(S1, mfs1)
+
+    mff0 = Fraction(F1, s0)
+    mff1 = Fraction(F1, F2)
+    mff2 = Fraction(F2, F3)
+    mff3 = Fraction(F1, mff1)
+
+    assert round(eval(str(mfp0)) - eval(str(p0)) / eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mfp1)) - eval(str(p1)) / eval(str(p0)), 10) == 0.0
+    assert round(eval(str(mfp2)) - eval(str(p2)) / eval(str(p3)), 10) == 0.0
+    assert round(eval(str(mfp3)) - eval(str(p1)) / eval(str(mfp1)), 10) == 0.0
+
+    assert round(eval(str(mfs0)) - eval(str(S0)) / eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mfs1)) - eval(str(S1)) / eval(str(S0)), 10) == 0.0
+    assert round(eval(str(mfs2)) - eval(str(S2)) / eval(str(S3)), 10) == 0.0
+    assert round(eval(str(mfs3)) - eval(str(S1)) / eval(str(mfs1)), 10) == 0.0
+
+    assert round(eval(str(mff0)) - eval(str(F1)) / eval(str(s0)), 10) == 0.0
+    assert round(eval(str(mff1)) - eval(str(F1)) / eval(str(F2)), 10) == 0.0
+    assert round(eval(str(mff2)) - eval(str(F2)) / eval(str(F3)), 10) == 0.0
+    assert round(eval(str(mff3)) - eval(str(F1)) / eval(str(mff1)), 10) == 0.0
+
+    assert mfp0.ops() == 2
+    assert mfp1.ops() == 4
+    assert mfp2.ops() == 0
+    assert mfp3.ops() == 7
+
+    assert mfs0.ops() == 2
+    assert mfs1.ops() == 3
+    assert mfs2.ops() == 4
+    assert mfs3.ops() == 5
+
+    assert mff0.ops() == 2
+    assert mff1.ops() == 3
+    assert mff2.ops() == 3
+    assert mff3.ops() == 5
+
+    assert str(mfp0) == 'x*y/x'
+    assert str(mfp1) == '%s*x*y/(x*y)' % f_3
+    assert str(mfp2) == '%s' % f_0
+    assert str(mfp3) == '%s*x*y/(%s*x*y/(x*y))' % (f_3, f_3)
+    assert str(mfs0) == '(x + y)/x'
+    assert str(mfs1) == '(x + x)/(x + y)'
+    assert str(mfs2) == '(x-%s)/(x + x-%s)' % (f_2, f_2)
+    assert str(mfs3) == '(x + x)/((x + x)/(x + y))'
+    assert str(mff0) == '(x/y)/x'
+    assert str(mff1) == '(x/y)/(x/%s)' % f_3
+    assert str(mff2) == '(x/%s)/(-%s/y)' % (f_3, f_2)
+    assert str(mff3) == '(x/y)/((x/y)/(x/%s))' % f_3
+
+    # Use p1 as a base expression for Symbol
+    s3 = Symbol(format["cos"](str(p1)), CONST, p1, 1)
+    assert str(s3) == 'std::cos(%s*x*y)' % f_3
+    assert s3.ops() == 3
diff --git a/test/unit/symbolics/test_not_finished.py b/test/unit/symbolics/test_not_finished.py
new file mode 100755
index 0000000..a77a89c
--- /dev/null
+++ b/test/unit/symbolics/test_not_finished.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testNotFinished():
+    "Stuff that would be nice to implement."
+
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+    f_4 = format["float"](4)
+    f_8 = format["float"](8)
+
+    f0 = FloatValue(4)
+    f1 = FloatValue(2)
+    f2 = FloatValue(8)
+    s0 = Symbol("x", GEO)
+    s1 = Symbol("y", GEO)
+    s2 = Symbol("z", GEO)
+    a = Symbol("a", GEO)
+    b = Symbol("b", GEO)
+    c = Symbol("c", GEO)
+
+    # Aux. expressions
+    p0 = Product([f1, s0])
+    p1 = Product([f2, s1])
+    p2 = Product([s0, s1])
+
+    F0 = Fraction(f0, s0)
+
+    S0 = Sum([p0, p1])
+    S1 = Sum([s0, p2])
+    S2 = Sum([FloatValue(1), s1])
+    S3 = Sum([F0, F0])
+
+    # Thing to be implemented
+    e0 = f0 / S0
+    e1 = s0 / S1
+    e2 = S2 / S1
+    e3 = _group_fractions(S3)
+    e4 = Sum([Fraction(f1 * s0, a * b * c), Fraction(s0, a * b)]).expand().reduce_ops()
+
+    # Tests that pass the current implementation
+    assert str(e0) == '%s/(%s*x + %s*y)' % (f_4, f_2, f_8)
+    assert str(e1) == 'x/(x + x*y)'
+    assert str(e2) == '(%s + y)/(x + x*y)' % f_1
+    assert str(e3) == '%s/x' % f_8
+    assert str(e4) == 'x*(%s/(a*b) + %s/(a*b*c))' % (f_1, f_2)
+
+    # Tests that should pass in future implementations (change NotEqual to Equal)
+    assert str(e0) != '%s/(x + %s*y)' % (f_2, f_4)
+    assert str(e1) != '%s/(%s + y)' % (f_1, f_1)
+    assert str(e2) != '%s/x' % f_1
+    assert str(e4) != 'x*(%s/c + %s)/(a*b)' % (f_2, f_1)
+
+    # TODO: Would it be a good idea to reduce expressions
+    # wrt. var_type without first expanding?
+    E0 = Product([Sum([Product([Symbol('B0', BASIS), Product([Symbol('B1', BASIS), Sum([s0]), Sum([s0])])]),
+                       Product([Symbol('B0', BASIS), Symbol('B1', BASIS)])])])
+    Er0 = E0.reduce_vartype(BASIS)
+    Ex0 = E0.expand().reduce_vartype(BASIS)
+    assert Ex0[0][1] != Er0[0][1].expand()
+
+    # Both of these reductions should work at the same time
+    # 1) 2/(x/(a+b) + y/(a+b)) --> 2(a+b)/(x+y)
+    # 2) 2/(x + y/(a+b)) --> no reduction, or if divisions are more expensive
+    # 3) 2/(x + y/(a+b)) --> 2(a+b)/((a+b)x + y)
diff --git a/test/unit/symbolics/test_poisson.py b/test/unit/symbolics/test_poisson.py
new file mode 100755
index 0000000..36006d8
--- /dev/null
+++ b/test/unit/symbolics/test_poisson.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testPoisson():
+    poisson = """((Jinv_00*FE0_D10_ip_j + Jinv_10*FE0_D01_ip_j)*(Jinv_00*FE0_D10_ip_k + Jinv_10*FE0_D01_ip_k) + (Jinv_01*FE0_D10_ip_j + Jinv_11*FE0_D01_ip_j)*(Jinv_01*FE0_D10_ip_k + Jinv_11*FE0_D01_ip_k))*W4_ip*det"""
+
+    expr = Product([
+        Sum([
+            Product([
+                Sum([
+                    Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_j", BASIS)]),
+                    Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_j", BASIS)])
+                ]),
+                Sum([
+                    Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_k", BASIS)]),
+                    Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_k", BASIS)])
+                ])
+            ]),
+            Product([
+                Sum([
+                    Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_j", BASIS)]),
+                    Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_j", BASIS)])
+                ]),
+                Sum([
+                    Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_k", BASIS)]),
+                    Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_k", BASIS)])
+                ])
+            ])
+        ]),
+        Symbol("W4_ip", IP),
+        Symbol("det", GEO)
+    ])
+
+    expr_exp = expr.expand()
+    poisson_exp = expand_operations(poisson, format)
+
+    expr_red = expr_exp.reduce_ops()
+    poisson_red = reduce_operations(poisson, format)
+
+    poisson_exp_ops = operation_count(poisson_exp, format)
+    poisson_red_ops = operation_count(poisson_red, format)
+
+    Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3)
+    FE0_D01_ip_j, FE0_D10_ip_j, FE0_D01_ip_k, FE0_D10_ip_k = (1.12, 5.7, -9.3, 7.4)
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(poisson)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(poisson_exp)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(poisson_red)), 10) == 0.0
+    assert expr.ops() == 17
+    assert poisson_exp_ops == 47
+    assert expr_exp.ops() == 47
+    assert poisson_red_ops == 23
+    assert expr_red.ops() == 23
diff --git a/test/unit/symbolics/test_product.py b/test/unit/symbolics/test_product.py
new file mode 100755
index 0000000..3f75abc
--- /dev/null
+++ b/test/unit/symbolics/test_product.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testProduct():
+    "Test simple product instance."
+
+    f_0 = format["float"](0)
+    f_1 = format["float"](1)
+    f0 = FloatValue(-2.0)
+    f1 = FloatValue(3.0)
+    f2 = FloatValue(0)
+    f3 = FloatValue(-1)
+    f4 = FloatValue(1)
+    f5 = FloatValue(-0.5)
+    f6 = FloatValue(2.0)
+    s0 = Symbol("x", BASIS)
+    s1 = Symbol("y", GEO)
+    s2 = Symbol("z", GEO)
+
+    p0 = Product([])
+    p1 = Product([s0])
+    p2 = Product([s0, s1])
+    p3 = Product([f1, s0, s1])
+    p4 = Product([s0, f2, s2])
+    p5 = Product([s0, f0, s1, f1, s2])
+    p6 = Product([s0, f3, s1])
+    p7 = Product([s0, f4, s1]).expand().reduce_ops()
+    p8 = Product([s0, f0, s2, f5])
+    p9 = Product([s0, s1])
+    p10 = Product([p0, p1])
+    p11 = Product([f5, f0])
+    p12 = Product([f6, f5])
+    p13 = Product([f6, f5]).expand()
+    p14 = Product([f1, f2])
+    p_tmp = Product([f1])
+    p_tmp.expand()
+    p15 = Product([p_tmp, s0])
+
+    assert repr(p0) == "Product([FloatValue(%s)])" % f_0
+    assert repr(p1) == "Product([Symbol('x', BASIS)])"
+    assert repr(p3) == "Product([FloatValue(%s), Symbol('x', BASIS), Symbol('y', GEO)])"\
+        % format["float"](3)
+    assert repr(p6) == "Product([FloatValue(-%s), Symbol('x', BASIS), Symbol('y', GEO)])" % f_1
+    assert repr(p7) == "Product([Symbol('x', BASIS), Symbol('y', GEO)])"
+    assert repr(p8) == "Product([Symbol('x', BASIS), Symbol('z', GEO)])"
+    assert str(p2) == 'x*y'
+    assert str(p4) == '%s' % f_0
+    assert str(p5) == '-%s*x*y*z' % format["float"](6)
+    assert str(p6) == ' - x*y'
+    assert str(p7) == 'x*y'
+    assert str(p8) == 'x*z'
+    assert str(p9) == 'x*y'
+    assert p0.val == 0
+    assert str(p10) == '%s' % f_0
+    assert str(p11) == '%s' % f_1
+    assert str(p12) == '-%s' % f_1
+    assert str(p13) == '-%s' % f_1
+    assert repr(p14) == "Product([FloatValue(%s)])" % f_0
+    assert repr(p14.expand()) == "FloatValue(%s)" % f_0
+
+    assert p1 == p1
+    assert p1 != p7
+    assert p4 != p3
+    assert p2 == p9
+    assert p2 != p3
+
+    assert p0.ops() == 0
+    assert p1.ops() == 0
+    assert p2.ops() == 1
+    assert p3.ops() == 2
+    assert p4.ops() == 0
+    assert p5.ops() == 3
+    assert p6.ops() == 1
+    assert p7.ops() == 1
+    assert p8.ops() == 1
+    assert p9.ops() == 1
+    assert p10.ops() == 0
+    assert p14.ops() == 0
+
+    # Test hash
+    l = [p3]
+    d = {p3: 0}
+    p10 = Product([f1, s0, s1])
+
+    assert p3 in l
+    assert p3 in d
+    assert p10 in l
+    assert p10 in d
+    assert p2 not in l
+    assert p2 not in d
diff --git a/test/unit/symbolics/test_product_operators.py b/test/unit/symbolics/test_product_operators.py
new file mode 100755
index 0000000..f47e0dd
--- /dev/null
+++ b/test/unit/symbolics/test_product_operators.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import error, push_level, pop_level, CRITICAL
+
+
+def testProductOperators():
+    "Test binary operators"
+
+    f_0 = format["float"](0)
+    f_2 = format["float"](2)
+    f_4 = format["float"](4)
+
+    f0 = FloatValue(0.0)
+    f1 = FloatValue(1.0)
+    f2 = FloatValue(2.0)
+    fm1 = FloatValue(-1.0)
+    fm3 = FloatValue(-3.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+    z = Symbol("z", GEO)
+
+    p0 = Product([f2, x])
+    p1 = Product([x, y])
+    p2 = Product([f2, z])
+    p3 = Product([x, y, z])
+
+    S0 = Sum([x, y])
+    S1 = Sum([x, z])
+
+    F0 = Fraction(f2, x)
+    F1 = Fraction(x, y)
+    F2 = Fraction(x, S0)
+    F3 = Fraction(x, y)
+    F4 = Fraction(p0, y)
+
+    # Test Product '+'
+    assert str(p0 + f2) == '(%s + %s*x)' % (f_2, f_2)
+    assert str(p0 + x) == '%s*x' % format["float"](3)
+    assert str(p0 + y) == '(y + %s*x)' % f_2
+    assert str(p0 + p0) == '%s*x' % f_4
+    assert str(p0 + p1) == '(%s*x + x*y)' % f_2
+    assert p0 + Product([fm1, x]) == x
+    assert Product([fm1, x]) + x == f0
+    assert str(x + Product([fm1, x])) == '%s' % f_0
+    assert str(p0 + S0) == '(x + y + %s*x)' % f_2
+    assert str(p0 + F3) == '(%s*x + x/y)' % f_2
+
+    # Test Product '-'
+    assert str(p0 - f2) == '(%s*x-%s)' % (f_2, f_2)
+    assert str(p0 - x) == 'x'
+    assert str(p0 - y) == '(%s*x - y)' % f_2
+    assert str(p0 - p0) == '%s' % f_0
+    assert str(p0 - p1) == '(%s*x - x*y)' % f_2
+    assert str(p0 - S0) == '(%s*x - (x + y))' % f_2
+    assert str(p0 - F3) == '(%s*x - x/y)' % f_2
+
+    # Test Product '*', only need to test float, symbol and product.
+    # Sum and fraction are handled by 'other'
+    assert str(p0 * f0) == '%s' % f_0
+    assert str(p0 * fm3) == '-%s*x' % format["float"](6)
+    assert str(p0 * y) == '%s*x*y' % f_2
+    assert str(p0 * p1) == '%s*x*x*y' % f_2
+
+    # Test Product '/'
+    assert str(Product([f0, x]) / x) == '%s' % f_0
+    assert str(p0 / S0) == '%s*x/(x + y)' % f_2
+    assert p1 / y == x
+    assert p1 / p2 == Fraction(Product([p1, FloatValue(0.5)]), z)
+    assert p1 / z == Fraction(p1, z)
+    assert p0 / Product([f2, p1]) == Fraction(f1, y)
+    assert p1 / p0 == Product([FloatValue(0.5), y])
+    assert p1 / p1 == f1
+    assert p1 / p3 == Fraction(f1, z)
+    assert str(p1 / p3) == '%s/z' % format["float"](1)
+
+    with pytest.raises(Exception):
+        truediv(p0, f0)
+    with pytest.raises(Exception):
+        truediv(p0, F0)
diff --git a/test/unit/symbolics/test_real_examples.py b/test/unit/symbolics/test_real_examples.py
new file mode 100755
index 0000000..b826c03
--- /dev/null
+++ b/test/unit/symbolics/test_real_examples.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import print_function
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testRealExamples():
+    p = Product([Symbol('FE0_C1_D01[ip][k]', BASIS),
+                 Sum([
+                     Symbol('Jinv_10', GEO),
+                      Symbol('w[4][0]', GEO)
+                     ]),
+        Sum([
+            Symbol('Jinv_10', GEO),
+                      Symbol('w[4][0]', GEO)
+        ])
+    ])
+
+    br = p.reduce_vartype(BASIS)
+    be = p.expand().reduce_vartype(BASIS)
+
+    if len(be) == 1:
+        if be[0][0] == br[0]:
+            if be[0][1] != br[1].expand():
+                print("\nbe: ", repr(be[0][1]))
+                print("\nbr: ", repr(br[1].expand()))
+                print("\nbe: ", be[0][1])
+                print("\nbr: ", br[1].expand())
+                RuntimeError("Should not be here")
diff --git a/test/unit/symbolics/test_reduce_gip.py b/test/unit/symbolics/test_reduce_gip.py
new file mode 100755
index 0000000..20aabc3
--- /dev/null
+++ b/test/unit/symbolics/test_reduce_gip.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testReduceGIP():
+
+    expr = Sum([
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP),
+            Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP),
+            Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP),
+            Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G5", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G6", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP),
+            Symbol("F8", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G1", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP),
+            Symbol("F8", IP), Symbol("W9", IP), Symbol("G7", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP),
+            Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP),
+            Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP),
+            Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP),
+            Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP),
+            Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP),
+            Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G3", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP),
+            Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G9", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G0", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G8", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP),
+            Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ]),
+        Product([
+            Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP),
+            Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F8", IP),
+            Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP),
+            Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F3", IP),
+            Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO)
+        ]),
+        Product([
+            Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO)
+        ]),
+        Product([
+            Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP),
+            Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
+        ])
+    ])
+
+    expr_exp = expr.expand()
+    expr_red = expr_exp.reduce_ops()
+
+    W9 = 9
+    F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20 = [0.123 * i for i in range(1, 21)]
+    G0, G1, G2, G3, G4, G5, G6, G7, G8, G9 = [2.64 + 1.0 / i for i in range(20, 30)]
+
+    assert round(eval(str(expr)) - eval(str(expr_exp)), 10) == 0.0
+    assert round(eval(str(expr)) - eval(str(expr_red)), 10) == 0.0
+    assert expr.ops() == 314
+    assert expr_exp.ops() == 314
+    assert expr_red.ops() == 120
diff --git a/test/unit/symbolics/test_reduce_operations.py b/test/unit/symbolics/test_reduce_operations.py
new file mode 100755
index 0000000..4581233
--- /dev/null
+++ b/test/unit/symbolics/test_reduce_operations.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testReduceOperations():
+
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+
+    # Aux. variables
+    f2 = FloatValue(2)
+    f0_5 = FloatValue(0.5)
+    f1 = FloatValue(1.0)
+    fm1 = FloatValue(-1.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+    z = Symbol("z", GEO)
+    a = Symbol("a", GEO)
+    b = Symbol("b", GEO)
+    c = Symbol("c", GEO)
+    d = Symbol("d", GEO)
+
+    # Simple expand and reduce simple float and symbol objects
+    fx2 = f2.expand()
+    xx = x.expand()
+
+    fr2 = fx2.reduce_ops()
+    xr = xx.reduce_ops()
+
+    assert f2 == fr2
+    assert x == xr
+
+    # Test product
+    p0 = f2 * x
+    p1 = y * x
+    p2 = x * f2 / y
+    p3 = x * Sum([x, y])
+
+    px0 = p0.expand()
+    px1 = p1.expand()
+
+    pr0 = px0.reduce_ops()
+    pr1 = px1.reduce_ops()
+
+    assert p0 == pr0
+    assert p1 == pr1
+
+    # Test fraction
+    F0 = Fraction(p0, y)
+    F1 = Fraction(x, p0)
+    F2 = Fraction(p0, p1)
+    F3 = Fraction(Sum([x * x, x * y]), y)
+    F4 = Fraction(Sum([f2 * x, x * y]), a)
+
+    Fx0 = F0.expand()
+    Fx1 = F1.expand()
+    Fx2 = F2.expand()
+    Fx3 = F3.expand()
+    Fx4 = F4.expand()
+
+    Fr0 = Fx0.reduce_ops()
+    Fr1 = Fx1.reduce_ops()
+    Fr2 = Fx2.reduce_ops()
+    Fr3 = Fx3.reduce_ops()
+    Fr4 = Fx4.reduce_ops()
+
+    assert Fr0 == F0
+    assert Fr1 == f0_5
+    assert Fr2 == Fraction(f2, y)
+    assert str(Fr3) == "x*(%s + x/y)" % f_1
+    assert str(Fr4) == "x*(%s + y)/a" % f_2
+
+    # Test sum
+    # TODO: Here we might have to add additional tests
+    S0 = Sum([x, y])
+    S1 = Sum([p0, p1])
+    S2 = Sum([x, p1])
+    S3 = Sum([p0, f2 * y])
+    S4 = Sum([f2 * p1, z * p1])
+    S5 = Sum([x, x * x, x * x * x])
+    S6 = Sum([a * x * x, b * x * x * x, c * x * x, d * x * x * x])
+    S7 = Sum([p0, p1, x * x, f2 * z, y * z])
+    S8 = Sum([a * y, b * y, x * x * x * y, x * x * x * z])
+    S9 = Sum([a * y, b * y, c * y, x * x * x * y, f2 * x * x, x * x * x * z])
+    S10 = Sum([f2 * x * x * y, x * x * y * z])
+    S11 = Sum([f2 * x * x * y * y, x * x * y * y * z])
+    S12 = Sum([f2 * x * x * y * y, x * x * y * y * z, a * z, b * z, c * z])
+    S13 = Sum([Fraction(f1, x), Fraction(f1, y)])
+    S14 = Sum([Fraction(fm1, x), Fraction(fm1, y)])
+    S15 = Sum([Fraction(f2, x), Fraction(f2, x)])
+    S16 = Sum([Fraction(f2 * x, y * z), Fraction(f0_5, y * z)])
+    S17 = Sum([(f2 * x * y) / a, (x * y * z) / b])
+    S18 = Sum([(x * y) / a, (x * z) / a, f2 / a, (f2 * x * y) / a])
+    S19 = Sum([(f2 * x) / a, (x * y) / a, z * x])
+    S20 = Product([Sum([x, y]), Fraction(a, b), Fraction(Product([c, d]), z)])
+    S21 = Sum([a * x, b * x, c * x, x * y, x * z, f2 * y, a * y, b * y, f2 * z, a * z, b * z])
+    S22 = Sum([FloatValue(0.5) * x / y, FloatValue(-0.5) * x / y])
+    S23 = Sum([x * y * z, x * y * y * y * z * z * z, y * y * y * z * z * z * z, z * z * z * z * z])
+
+    Sx0 = S0.expand()
+    Sx1 = S1.expand()
+    Sx2 = S2.expand()
+    Sx3 = S3.expand()
+    Sx4 = S4.expand()
+    Sx5 = S5.expand()
+    Sx6 = S6.expand()
+    Sx7 = S7.expand()
+    Sx8 = S8.expand()
+    Sx9 = S9.expand()
+    Sx10 = S10.expand()
+    Sx11 = S11.expand()
+    Sx12 = S12.expand()
+    Sx13 = S13.expand()
+    Sx14 = S14.expand()
+    Sx15 = S15.expand()
+    Sx16 = S16.expand()
+    Sx17 = S17.expand()
+    Sx18 = S18.expand()
+    Sx19 = S19.expand()
+    Sx20 = S20.expand()
+    Sx21 = S21.expand()
+    Sx22 = S22.expand()
+    Sx23 = S23.expand()
+
+    Sr0 = Sx0.reduce_ops()
+    Sr1 = Sx1.reduce_ops()
+    Sr2 = Sx2.reduce_ops()
+    Sr3 = Sx3.reduce_ops()
+    Sr4 = Sx4.reduce_ops()
+    Sr5 = Sx5.reduce_ops()
+    Sr6 = Sx6.reduce_ops()
+    Sr7 = Sx7.reduce_ops()
+    Sr8 = Sx8.reduce_ops()
+    Sr9 = Sx9.reduce_ops()
+    Sr10 = Sx10.reduce_ops()
+    Sr11 = Sx11.reduce_ops()
+    Sr12 = Sx12.reduce_ops()
+    Sr13 = Sx13.reduce_ops()
+    Sr14 = Sx14.reduce_ops()
+    Sr15 = Sx15.reduce_ops()
+    Sr16 = Sx16.reduce_ops()
+    Sr17 = Sx17.reduce_ops()
+    Sr18 = Sx18.reduce_ops()
+    Sr19 = Sx19.reduce_ops()
+    Sr20 = Sx20.reduce_ops()
+    Sr21 = Sx21.reduce_ops()
+    Sr22 = Sx22.reduce_ops()
+    Sr23 = Sx23.reduce_ops()
+
+    assert Sr0 == S0
+    assert str(Sr1) == "x*(%s + y)" % f_2
+    # TODO: Should this be (x + x*y)?
+    assert str(Sr2) == "x*(%s + y)" % f_1
+    # assert str(Sr2), "(x + x*y)")
+    assert str(Sr3) == "%s*(x + y)" % f_2
+    assert str(Sr4) == "x*y*(%s + z)" % f_2
+    assert str(Sr5) == "x*(%s + x*(%s + x))" % (f_1, f_1)
+    assert str(Sr6) == "x*x*(a + c + x*(b + d))"
+    assert str(Sr7) == "(x*(%s + x + y) + z*(%s + y))" % (f_2, f_2)
+    assert str(Sr8) == "(x*x*x*(y + z) + y*(a + b))"
+    assert str(Sr9) == "(x*x*(%s + x*(y + z)) + y*(a + b + c))" % f_2
+    assert str(Sr10) == "x*x*y*(%s + z)" % f_2
+    assert str(Sr11) == "x*x*y*y*(%s + z)" % f_2
+    assert str(Sr12) == "(x*x*y*y*(%s + z) + z*(a + b + c))" % f_2
+    assert str(Sr13) == "(%s/x + %s/y)" % (f_1, f_1)
+    assert str(Sr14) == "(-%s/x-%s/y)" % (f_1, f_1)
+    assert str(Sr15) == "%s/x" % format["float"](4)
+    assert str(Sr16) == "(%s + %s*x)/(y*z)" % (format["float"](0.5), f_2)
+    assert str(Sr17) == "x*y*(%s/a + z/b)" % f_2
+    assert str(Sr18) == "(%s + x*(z + %s*y))/a" % (f_2, format["float"](3))
+    assert str(Sr19) == "x*(z + (%s + y)/a)" % f_2
+    assert str(Sr20) == "a*c*d*(x + y)/(b*z)"
+    assert str(Sr21) == "(x*(a + b + c + y + z) + y*(%s + a + b) + z*(%s + a + b))" % (f_2, f_2)
+    assert str(Sr22) == "%s" % format["float"](0)
+    assert str(Sr23) == "(x*y*z + z*z*z*(y*y*y*(x + z) + z*z))"
+
+    assert S0.ops() == 1
+    assert Sr0.ops() == 1
+    assert S1.ops() == 3
+    assert Sr1.ops() == 2
+    assert S2.ops() == 2
+    assert Sr2.ops() == 2
+    assert S3.ops() == 3
+    assert Sr3.ops() == 2
+    assert S4.ops() == 5
+    assert Sr4.ops() == 3
+    assert S5.ops() == 5
+    assert Sr5.ops() == 4
+    assert S6.ops() == 13
+    assert Sr6.ops() == 6
+    assert S7.ops() == 9
+    assert Sr7.ops() == 6
+    assert S8.ops() == 11
+    assert Sr8.ops() == 7
+    assert S9.ops() == 16
+    assert Sr9.ops() == 9
+    assert S10.ops() == 7
+    assert Sr10.ops() == 4
+    assert S11.ops() == 9
+    assert Sr11.ops() == 5
+    assert S12.ops() == 15
+    assert Sr12.ops() == 9
+    assert S13.ops() == 3
+    assert Sr13.ops() == 3
+    assert S14.ops() == 3
+    assert Sr14.ops() == 3
+    assert S15.ops() == 3
+    assert Sr15.ops() == 1
+    assert S16.ops() == 6
+    assert Sr16.ops() == 4
+    assert S17.ops() == 7
+    assert Sr17.ops() == 5
+    assert S18.ops() == 11
+    assert Sr18.ops() == 5
+    assert S19.ops() == 7
+    assert Sr19.ops() == 4
+    assert S20.ops() == 6
+    assert Sr20.ops() == 6
+    assert S21.ops() == 21
+    assert Sr21.ops() == 13
+    assert S23.ops() == 21
+    assert Sr23.ops() == 12
diff --git a/test/unit/symbolics/test_reduce_vartype.py b/test/unit/symbolics/test_reduce_vartype.py
new file mode 100755
index 0000000..eceec58
--- /dev/null
+++ b/test/unit/symbolics/test_reduce_vartype.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testReduceVarType():
+    f1 = FloatValue(1)
+    f2 = FloatValue(2)
+    f3 = FloatValue(3)
+    f5 = FloatValue(5)
+    fm4 = FloatValue(-4)
+
+    B0 = Symbol("B0", BASIS)
+    B1 = Symbol("B1", BASIS)
+    Bm4 = Product([fm4, B1])
+    B5 = Product([f5, B0])
+
+    I0 = Symbol("I0", IP)
+    I1 = Symbol("I1", IP)
+    I2 = Symbol("I2", IP)
+    I5 = Product([f5, I0])
+
+    G0 = Symbol("G0", GEO)
+    G1 = Symbol("G1", GEO)
+    G2 = Symbol("G2", GEO)
+    G3 = Product([f3, G0])
+
+    C0 = Symbol("C0", CONST)
+    C2 = Product([f2, C0])
+
+    p0 = Product([B0, I5])
+    p1 = Product([B0, B1])
+
+    S0 = Sum([B0, I5])
+    S1 = Sum([p0, p1])
+    S2 = Sum([B0, B1])
+    S3 = Sum([B0, p0])
+    S4 = Sum([f5, p0])
+    S5 = Sum([I0, G0])
+
+    F0 = Fraction(B0, I5).expand()
+    F1 = Fraction(p1, I5).expand()
+    F2 = Fraction(G3, S2).expand()
+    F3 = Fraction(G3, S3).expand()
+    F4 = Fraction(I1, Sum([I1, I0]))
+    F5 = Fraction(S5, I1)
+    F6 = Fraction(I0,
+                  Sum([
+                      Fraction(Sum([I0, I1]), Sum([G0, G1])),
+                      Fraction(Sum([I1, I2]), Sum([G1, G2])),
+                  ]))
+
+    r0 = B0.reduce_vartype(BASIS)
+    r1 = B0.reduce_vartype(CONST)
+
+    rp0 = p0.reduce_vartype(BASIS)
+    rp1 = p0.reduce_vartype(IP)
+    rp2 = p1.reduce_vartype(BASIS)
+    rp3 = p1.reduce_vartype(GEO)
+
+    rs0 = S0.reduce_vartype(BASIS)
+    rs1 = S0.reduce_vartype(IP)
+    rs2 = S1.reduce_vartype(BASIS)
+    rs3 = S4.reduce_vartype(BASIS)
+    rs4 = S4.reduce_vartype(CONST)
+
+    rf0 = F0.reduce_vartype(BASIS)
+    rf1 = F1.reduce_vartype(BASIS)
+    rf2 = F0.reduce_vartype(IP)
+    rf3 = F2.reduce_vartype(BASIS)
+    rf4 = F3.reduce_vartype(BASIS)
+    rf5 = F4.reduce_vartype(IP)
+    rf6 = F5.reduce_vartype(IP)
+    rf7 = F6.reduce_vartype(IP)
+
+    assert [(B0, f1)] == r0
+    assert [((), B0)] == r1
+
+    assert [(B0, I5)] == rp0
+    assert [(I0, B5)] == rp1
+    assert [(p1, f1)] == rp2
+    assert [((), p1)] == rp3
+
+    assert ((), I5) == rs0[0]
+    assert (B0, f1) == rs0[1]
+    assert (I0, f5) == rs1[1]
+    assert ((), B0) == rs1[0]
+    assert (Product([B0, B1]), f1) == rs2[1]
+    assert (B0, I5) == rs2[0]
+    assert ((), f5) == rs3[0]
+    assert (B0, I5) == rs3[1]
+    assert (f5, Sum([f1, Product([B0, I0])])) == rs4[0]
+
+    assert [(B0, Fraction(FloatValue(0.2), I0))] == rf0
+    assert [(Product([B0, B1]), Fraction(FloatValue(0.2), I0))] == rf1
+    assert [(Fraction(f1, I0), Product([FloatValue(0.2), B0]))] == rf2
+    assert [(Fraction(f1, S2), G3)] == rf3
+    assert [(Fraction(f1, B0), Fraction(G3, Sum([I5, f1])))] == rf4
+    assert F4 == rf5[0][0]
+    assert FloatValue(1) == rf5[0][1]
+    assert Fraction(I0, I1) == rf6[1][0]
+    assert f1 == rf6[1][1]
+    assert Fraction(f1, I1) == rf6[0][0]
+    assert G0 == rf6[0][1]
+    assert F6 == rf7[0][0]
+    assert f1 == rf7[0][1]
+
+    expr = Sum([Symbol('W1', GEO), Fraction(Symbol('det', GEO), Sum([Symbol('F0', IP), Symbol('K_11', GEO)]))])
+    red = expr.expand().reduce_vartype(IP)
+    vals = []
+    for ip in red:
+        ip_dec, geo = ip
+        if ip_dec and geo:
+            vals.append(Product([ip_dec, geo]))
+        elif geo:
+            vals.append(geo)
+        elif ip_dec:
+            vals.append(ip_dec)
+    comb = Sum(vals).expand()
+    K_11 = 1.4
+    F0 = 1.5
+    W1 = 1.9
+    det = 2.1
+    assert round(eval(str(expr)) - eval(str(comb)), 10) == 0.0
diff --git a/test/unit/symbolics/test_sum.py b/test/unit/symbolics/test_sum.py
new file mode 100755
index 0000000..8e870c6
--- /dev/null
+++ b/test/unit/symbolics/test_sum.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testSum():
+    "Test simple sum instance."
+
+    f_0 = format["float"](0)
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+    f_3 = format["float"](3)
+
+    f0 = FloatValue(-2.0)
+    f1 = FloatValue(3.0)
+    f2 = FloatValue(0)
+    s0 = Symbol("x", BASIS)
+    s1 = Symbol("y", GEO)
+    s2 = Symbol("z", GEO)
+
+    S0 = Sum([])
+    S1 = Sum([s0])
+    S2 = Sum([s0, s1])
+    S3 = Sum([s0, s0])
+    S4 = Sum([f0, s0])
+    S5 = Sum([s0, f0, s0])
+    S6 = Sum([s0, f0, s0, f1])
+    S7 = Sum([s0, f0, s1, f2])
+    S8 = Sum([s0, f1, s0])
+    S9 = Sum([f0, f0, f0, f1, f1, s1])
+    S10 = Sum([s1, s0])
+
+    assert repr(S0) == "Sum([FloatValue(%s)])" % f_0
+    assert S0.t == CONST
+    assert repr(S1) == "Sum([Symbol('x', BASIS)])"
+    assert repr(S4) == "Sum([FloatValue(-%s), Symbol('x', BASIS)])" % f_2
+    assert repr(S9) == "Sum([Symbol('y', GEO)])"
+
+    assert str(S2) == "(x + y)"
+    assert str(S3) == "(x + x)"
+    assert str(S5) == "(x + x-%s)" % f_2
+    assert str(S6) == "(%s + x + x)" % f_1
+    assert str(S7) == "(x + y-%s)" % f_2
+    assert str(S8) == "(%s + x + x)" % f_3
+    assert str(S9) == "y"
+
+    assert S2 == S2
+    assert S2 != S3
+    assert S5 != S6
+    assert S2 == S10
+
+    assert S0.ops() == 0
+    assert S1.ops() == 0
+    assert S2.ops() == 1
+    assert S3.ops() == 1
+    assert S4.ops() == 1
+    assert S5.ops() == 2
+    assert S6.ops() == 2
+    assert S7.ops() == 2
+    assert S8.ops() == 2
+    assert S9.ops() == 0
+
+    # Test hash
+    l = [S2]
+    d = {S2: 0}
+
+    assert S2 in l
+    assert S2 in d
+    assert S10 in l
+    assert S10 in d
diff --git a/test/unit/symbolics/test_sum_operators.py b/test/unit/symbolics/test_sum_operators.py
new file mode 100755
index 0000000..a73bf47
--- /dev/null
+++ b/test/unit/symbolics/test_sum_operators.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import error, push_level, pop_level, CRITICAL
+
+
+def testSumOperators():
+    "Test binary operators"
+
+    f_0_5 = format["float"](0.5)
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+    f_3 = format["float"](3)
+    f_6 = format["float"](6)
+    f2 = FloatValue(2.0)
+    fm3 = FloatValue(-3.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+    z = Symbol("z", GEO)
+
+    p0 = Product([f2, x])
+    p1 = Product([x, y])
+
+    S0 = Sum([x, y])
+    S1 = Sum([x, z])
+
+    F0 = Fraction(p0, y)
+
+    # Test Sum '+'
+    assert str(S0 + f2) == '(%s + x + y)' % f_2
+    assert str(S0 + x) == '(x + x + y)'
+    assert str(S0 + p0) == '(x + y + %s*x)' % f_2
+    assert str(S0 + S0) == '(x + x + y + y)'
+    assert str(S0 + F0) == '(x + y + %s*x/y)' % f_2
+
+    # Test Sum '-'
+    assert str(S0 - f2) == '(x + y-%s)' % f_2
+    assert str(S0 - fm3) == '(x + y + %s)' % f_3
+    assert str(S0 - x) == '(x + y - x)'
+    assert str(S0 - p0) == '(x + y-%s*x)' % f_2
+    assert str(S0 - Product([fm3, p0])) == '(x + y + %s*x)' % f_6
+    assert str(S0 - S0) == '(x + y - (x + y))'
+    assert str(S0 - F0) == '(x + y - %s*x/y)' % f_2
+
+    # Test Sum '*'
+    assert str(S0 * f2) == '(%s*x + %s*y)' % (f_2, f_2)
+    assert str(S0 * x) == '(x*x + x*y)'
+    assert str(S0 * p0) == '(%s*x*x + %s*x*y)' % (f_2, f_2)
+    assert str(S0 * S0) == '(%s*x*y + x*x + y*y)' % f_2
+    assert str(S0 * F0) == '(%s*x + %s*x*x/y)' % (f_2, f_2)
+
+    # Test Sum '/'
+    assert str(S0 / f2) == '(%s*x + %s*y)' % (f_0_5, f_0_5)
+    assert str(S0 / x) == '(%s + y/x)' % f_1
+    assert str(S0 / p0) == '(%s + %s*y/x)' % (f_0_5, f_0_5)
+    assert str(S0 / p1) == '(%s/x + %s/y)' % (f_1, f_1)
+    assert str(S0 / S0) == '(x + y)/(x + y)'
+    assert str(S0 / S1) == '(x + y)/(x + z)'
+
+    with pytest.raises(Exception):
+        truediv(S0, FloatValue(0))
+    with pytest.raises(Exception):
+        truediv(S0, F0)
diff --git a/test/unit/symbolics/test_symbol.py b/test/unit/symbolics/test_symbol.py
new file mode 100755
index 0000000..3a63b8a
--- /dev/null
+++ b/test/unit/symbolics/test_symbol.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.symbolics import *
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+
+
+def testSymbol():
+    "Test simple symbol instance."
+
+    s0 = Symbol("x", BASIS)
+    s1 = Symbol("y", IP)
+    s2 = Symbol("z", GEO)
+    s3 = Symbol("z", GEO)
+    s4 = Symbol("z", IP)
+
+    assert repr(s0) == "Symbol('x', BASIS)"
+    assert repr(s1) == "Symbol('y', IP)"
+    assert repr(s2) == "Symbol('z', GEO)"
+    assert repr(s4) == "Symbol('z', IP)"
+
+    assert s2 == s3
+    assert (s2 == s1) is False
+    assert (s2 == s4) is False
+    assert (s2 != s3) is False
+    assert s2 != s1
+
+    assert s0 < s1
+    assert s4 > s1
+
+    assert s0.ops() == 0
+    assert s1.ops() == 0
+    assert s2.ops() == 0
+    assert s3.ops() == 0
+    assert s4.ops() == 0
+
+    # Test hash
+    l = [s0]
+    d = {s0: 0}
+    s5 = Symbol('x', BASIS)
+
+    assert s0 in l
+    assert s0 in d
+    assert s5 in l
+    assert s5 in d
diff --git a/test/unit/symbolics/test_symbol_operators.py b/test/unit/symbolics/test_symbol_operators.py
new file mode 100755
index 0000000..98b739c
--- /dev/null
+++ b/test/unit/symbolics/test_symbol_operators.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2010 Kristian B. Oelgaard
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import pytest
+import time
+
+# FFC modules
+from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
+from ffc.quadrature.symbolics import *
+from ffc.quadrature.sumobj import _group_fractions
+from ffc.cpp import format, set_float_formatting
+from ffc.parameters import FFC_PARAMETERS
+set_float_formatting(FFC_PARAMETERS['precision'])
+from ffc.log import error, push_level, pop_level, CRITICAL
+
+
+def testSymbolOperators():
+    "Test binary operators"
+
+    f_0 = format["float"](0)
+    f_1 = format["float"](1)
+    f_2 = format["float"](2)
+    f_3 = format["float"](3)
+    f_0_5 = format["float"](0.5)
+    f0 = FloatValue(0.0)
+    f2 = FloatValue(2.0)
+    fm1 = FloatValue(-1.0)
+    fm3 = FloatValue(-3.0)
+
+    x = Symbol("x", GEO)
+    y = Symbol("y", GEO)
+    z = Symbol("z", GEO)
+
+    p0 = Product([f2, x])
+    p1 = Product([x, y])
+    p2 = Product([f2, z])
+    p3 = Product([y, x, z])
+
+    S0 = Sum([x, y])
+    S1 = Sum([x, z])
+
+    F0 = Fraction(f2, y)
+    F1 = Fraction(x, y)
+    F2 = Fraction(x, S0)
+    F3 = Fraction(x, y)
+    F4 = Fraction(p0, y)
+    F5 = Fraction(fm3, y)
+
+    # Test Symbol '+'
+    assert str(x + f2) == '(%s + x)' % f_2
+    assert str(x + x) == '%s*x' % f_2
+    assert str(x + y) == '(x + y)'
+    assert str(x + p0) == '%s*x' % f_3
+    assert str(x + p1) == '(x + x*y)'
+    assert str(x + S0) == '(x + x + y)'
+    assert str(x + F0) == '(x + %s/y)' % f_2
+
+    # Test Symbol '-'
+    assert str(x - f2) == '(x-%s)' % f_2
+    assert str(x - x) == '%s' % f_0
+    assert str(x - y) == '(x - y)'
+    assert str(x - p0) == ' - x'
+    assert str(x - p1) == '(x - x*y)'
+    assert str(x - S0) == '(x - (x + y))'
+    assert str(x - F5) == '(x - -%s/y)' % f_3
+
+    # Test Symbol '*', only need to test float, symbol and product. Sum and
+    # fraction are handled by 'other'
+    assert str(x * f2) == '%s*x' % f_2
+    assert str(x * y) == 'x*y'
+    assert str(x * p1) == 'x*x*y'
+
+    # Test Symbol '/'
+    assert str(x / f2) == '%s*x' % f_0_5
+    assert str(x / x) == '%s' % f_1
+    assert str(x / y) == 'x/y'
+    assert str(x / S0) == 'x/(x + y)'
+    assert str(x / p0) == '%s' % f_0_5
+    assert str(y / p1) == '%s/x' % f_1
+    assert str(z / p0) == '%s*z/x' % f_0_5
+    assert str(z / p1) == 'z/(x*y)'
+    with pytest.raises(Exception):
+        truediv(x, F0)
+    with pytest.raises(Exception):
+        truediv(y, FloatValue(0))
diff --git a/test/unit/symbolics/testdgelastodyn.py b/test/unit/symbolics/testdgelastodyn.py
deleted file mode 100755
index f419423..0000000
--- a/test/unit/symbolics/testdgelastodyn.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestDGElastoDyn(unittest.TestCase):
-
-    def testDGElastoDyn(self):
-        expr = Product([
-                       Sum([
-                            Symbol("F0", IP),
-                            Symbol("F1", IP)
-                          ]),
-                       Fraction(
-                                 Symbol("w4", GEO),
-                                 Symbol("w3", GEO)
-                                ),
-                       Fraction(
-                                 Product([
-                                          Symbol("w2", GEO),
-                                          Symbol("w5", GEO)
-                                         ]),
-                                 Symbol("w6", GEO)
-                                )
-                      ])
-
-#        print "\nDGElastoDyn"
-#        start = time.time()
-        expr_exp = expr.expand()
-#        print "DGElastoDyn: time, expand():     ", time.time() - start
-
-#        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "DGElastoDyn: time, reduce_ops(): ", time.time() - start
-
-#        print "expr.ops():     ", expr.ops()
-#        print "expr_exp.ops(): ", expr_exp.ops()
-#        print "expr_red.ops(): ", expr_red.ops()
-
-#        print "expr:\n", expr
-#        print "exp:\n", expr_exp
-#        print "red:\n", expr_red
-
-        F0, F1, w2, w3, w4, w5, w6 = (3.12, -8.1, -45.3, 17.5, 2.2, 5.3, 9.145)
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertEqual(expr.ops(), 6)
-        self.assertEqual(expr_exp.ops(), 11)
-        self.assertEqual(expr_red.ops(), 6)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestDGElastoDyn('testDGElastoDyn'))
-
diff --git a/test/unit/symbolics/testelasticity2d.py b/test/unit/symbolics/testelasticity2d.py
deleted file mode 100755
index 05c50dd..0000000
--- a/test/unit/symbolics/testelasticity2d.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-class TestElasticity2D(unittest.TestCase):
-
-    def testElasticity2D(self):
-        elasticity = """(((Jinv_00*FE0_C0_D10_ip_j + Jinv_10*FE0_C0_D01_ip_j)*2*(Jinv_00*FE0_C0_D10_ip_k + Jinv_10*FE0_C0_D01_ip_k)*2 + ((Jinv_00*FE0_C1_D10_ip_j + Jinv_10*FE0_C1_D01_ip_j) + (Jinv_01*FE0_C0_D10_ip_j + Jinv_11*FE0_C0_D01_ip_j))*((Jinv_00*FE0_C1_D10_ip_k + Jinv_10*FE0_C1_D01_ip_k) + (Jinv_01*FE0_C0_D10_ip_k + Jinv_11*FE0_C0_D01_ip_k))) + ((Jinv_01*FE0_C1_D10_ip_j + Jinv_11*FE0_C1_D01_ip_j)*2*(Jinv_01*FE0_C1_D10_ip_k + Jinv_11*FE0_C1_D01_ip_k)*2 + ((Jinv_01*FE0_C0_D10_ip_j  [...]
-
-        expr = Product([
-                     Sum([
-                          Sum([
-                               Product([
-                                        Sum([
-                                             Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)])
-                                             ,
-                                             Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
-                                            ])
-                                        ,
-                                        FloatValue(2)
-                                        ,
-                                        Sum([
-                                             Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)])
-                                             ,
-                                             Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
-                                            ])
-                                        ,
-                                        FloatValue(2)
-                                        ])
-                               ,
-                               Product([
-                                        Sum([
-                                             Sum([
-                                                  Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
-                                                 ])
-                                             ,
-                                             Sum([
-                                                  Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
-                                                 ])
-                                            ])
-                                        ,
-                                        Sum([
-                                             Sum([
-                                                  Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
-                                                 ])
-                                             ,
-                                             Sum([
-                                                  Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
-                                                 ])
-                                            ])
-                                       ])
-                              ])
-                          ,
-                          Sum([
-                               Product([
-                                        Sum([
-                                             Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)])
-                                             ,
-                                             Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
-                                            ])
-                                        ,
-                                        FloatValue(2)
-                                        ,
-                                        Sum([
-                                             Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)])
-                                             ,
-                                             Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
-                                            ])
-                                        ,
-                                        FloatValue(2)
-                                       ])
-                               ,
-                               Product([
-                                        Sum([
-                                             Sum([
-                                                  Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)])
-                                                 ])
-                                             ,
-                                             Sum([
-                                                  Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)])
-                                                 ])
-                                            ])
-                                        ,
-                                        Sum([
-                                             Sum([
-                                                  Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)])
-                                                 ])
-                                             ,
-                                             Sum([
-                                                  Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)])
-                                                  ,
-                                                  Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)])
-                                                 ])
-                                            ])
-                                       ])
-                              ])
-                     ])
-                     ,
-                     FloatValue(0.25)
-                     ,
-                     Symbol("W4_ip", IP)
-                     ,
-                     Symbol("det", GEO)
-                     ])
-
-#        print "\nElasticity2D"
-#        start = time.time()
-        expr_exp = expr.expand()
-#        print "Elasticity2D: time, expand():     ", time.time() - start
-
-#        start = time.time()
-        elasticity_exp = expand_operations(elasticity, format)
-#        print "Elasticity2D: time, old expand(): ", time.time() - start
-
-#        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "Elasticity2D: time, reduce_ops(): ", time.time() - start
-
-#        start = time.time()
-        elasticity_red = reduce_operations(elasticity, format)
-#        print "Elasticity2D: time, old reduce(): ", time.time() - start
-
-        elasticity_exp_ops = operation_count(elasticity_exp, format)
-        elasticity_red_ops = operation_count(elasticity_red, format)
-#        print "expr.ops():                ", expr.ops()
-#        print "Elasticity2D old exp: ops: ", elasticity_exp_ops
-#        print "expr_exp.ops():            ", expr_exp.ops()
-#        print "Elasticity2D old red: ops: ", elasticity_red_ops
-#        print "expr_red.ops():            ", expr_red.ops()
-
-#        print "expr:\n", expr
-#        print "exp:\n", expr_exp
-#        print "red:\n", expr_red
-#        print "old red:\n", elasticity_red
-
-        Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3)
-        FE0_C0_D01_ip_j, FE0_C0_D10_ip_j, FE0_C0_D01_ip_k, FE0_C0_D10_ip_k = (1.12, 5.7, -9.3, 7.4)
-        FE0_C1_D01_ip_j, FE0_C1_D10_ip_j, FE0_C1_D01_ip_k, FE0_C1_D10_ip_k = (3.12, -8.1, -45.3, 17.5)
-
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity_red)))
-        self.assertEqual(expr.ops(), 52)
-        self.assertEqual(elasticity_exp_ops, 159)
-        self.assertEqual(expr_exp.ops(), 159)
-        self.assertEqual(elasticity_red_ops, 71)
-        self.assertEqual(expr_red.ops(), 71)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestElasticity2D('testElasticity2D'))
-
diff --git a/test/unit/symbolics/testelasticityterm.py b/test/unit/symbolics/testelasticityterm.py
deleted file mode 100755
index 39d8d36..0000000
--- a/test/unit/symbolics/testelasticityterm.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-03-11
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestElasticityTerm(unittest.TestCase):
-
-    def testElasticityTerm(self):
-        # expr:  0.25*W1*det*(FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21 + FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21)
-        expr = Product([
-                         FloatValue(0.25), Symbol('W1', GEO), Symbol('det', GEO),
-                         Sum([Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS),
-                                       Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)]),
-                              Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS),
-                                  Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)])
-                             ])
-                      ])
-
-#        print "\nElasticityTerm"
-#        start = time.time()
-        expr_exp = expr.expand()
-#        print "ElasticityTerm: time, expand():     ", time.time() - start
-
-#        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "ElasticityTerm: time, reduce_ops(): ", time.time() - start
-
-#        print "expr.ops():     ", expr.ops()
-#        print "expr_exp.ops(): ", expr_exp.ops()
-#        print "expr_red.ops(): ", expr_red.ops()
-
-#        print "expr:\n", expr
-#        print "exp:\n", expr_exp
-#        print "red:\n", expr_red
-
-        det, W1, Jinv_00, Jinv_21, FE0_C2_D001_0_j, FE0_C2_D001_0_k = [0.123 + i for i in range(6)]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertEqual(expr.ops(), 10)
-        self.assertEqual(expr_exp.ops(), 6)
-        self.assertEqual(expr_red.ops(), 6)
-
-        # Generate code
-        ip_consts = {}
-        geo_consts = {}
-        trans_set = set()
-
-        start = time.time()
-        opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
-#        print "ElasticityTerm, optimise_code(): ", time.time() - start
-
-        G = [eval(str(list(geo_consts.items())[0][0]))]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code)))
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestElasticityTerm('testElasticityTerm'))
-
diff --git a/test/unit/symbolics/testelasweighted.py b/test/unit/symbolics/testelasweighted.py
deleted file mode 100755
index d0db52a..0000000
--- a/test/unit/symbolics/testelasweighted.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-03-11
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestElasWeighted(unittest.TestCase):
-
-    def testElasWeighted(self):
-        expr = Product([
-                          Symbol('W4', IP),
-                          Sum([
-                              Product([
-                                        Symbol('FE0_C1_D01_ip_j', BASIS),
-                                        Symbol('FE0_C1_D01_ip_k', BASIS),
-                                        Symbol('Jinv_00', GEO),
-                                        Symbol('w1', GEO)
-                                        ]),
-                              Product([
-                                        Symbol('FE0_C1_D01_ip_j', BASIS),
-                                        Symbol('FE0_C1_D01_ip_k', BASIS),
-                                        Symbol('Jinv_01', GEO),
-                                        Symbol('w0', GEO)
-                                        ]),
-                              Product([
-                                        Symbol('w2', GEO),
-                                        Sum([
-                                              Product([
-                                                      Symbol('FE0_C1_D01_ip_j', BASIS),
-                                                      Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                      Symbol('Jinv_00', GEO),
-                                                      Symbol('w1', GEO)
-                                                      ]),
-                                              Product([
-                                                      Symbol('FE0_C1_D01_ip_j', BASIS),
-                                                      Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                      Symbol('Jinv_01', GEO),
-                                                      Symbol('w0', GEO)
-                                                      ])
-                                            ])
-                                      ])
-                              ])
-                          ])
-                                                       
-#        print "\nElasticityWeighted"
-#        start = time.time()
-        expr_exp = expr.expand()
-#        print "ElasWeighted: time, expand():     ", time.time() - start
-
-#        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "ElasWeighted: time, reduce_ops(): ", time.time() - start
-
-#        print "expr.ops():     ", expr.ops()
-#        print "expr_exp.ops(): ", expr_exp.ops()
-#        print "expr_red.ops(): ", expr_red.ops()
-
-#        print "expr:\n", expr
-#        print "exp:\n", expr_exp
-#        print "red:\n", expr_red
-
-        det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertEqual(expr.ops(), 17)
-        self.assertEqual(expr_exp.ops(), 21)
-        self.assertEqual(expr_red.ops(), 10)
-
-        # Generate code
-        ip_consts = {}
-        geo_consts = {}
-        trans_set = set()
-
-        start = time.time()
-        opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
-#        print "ElasWeighted, optimise_code(): ", time.time() - start
-
-        G = [eval(str(list(geo_consts.items())[0][0]))]
-        I = [eval(str(list(ip_consts.items())[0][0]))]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code)))
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestElasWeighted('testElasWeighted'))
-
diff --git a/test/unit/symbolics/testelasweighted2.py b/test/unit/symbolics/testelasweighted2.py
deleted file mode 100755
index 615b1cd..0000000
--- a/test/unit/symbolics/testelasweighted2.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-03-11
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestElasWeighted2(unittest.TestCase):
-
-    def testElasWeighted2(self):
-
-        expr = Product([
-                        Symbol('W4', IP),
-                                        Sum([
-                              Product([
-                                        Symbol('FE0_C1_D01_ip_j', BASIS),
-                                        Symbol('FE0_C1_D01_ip_k', BASIS),
-                                        Symbol('Jinv_00', GEO),
-                                        Symbol('w1', GEO)
-                                        ]),
-                              Product([
-                                        Symbol('FE0_C1_D01_ip_j', BASIS),
-                                        Symbol('Jinv_01', GEO),
-                                        Sum([
-                                              Product([
-                                                        Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                        Symbol('w0', GEO)
-                                                        ]),
-                                              Product([
-                                                        Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                        Symbol('w1', GEO)
-                                                        ])
-                                              ])
-                                        ]),
-                              Product([
-                                        Symbol('w2', GEO),
-                                        Sum([
-                                            Product([
-                                                    Symbol('FE0_C1_D01_ip_j', BASIS),
-                                                    Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                    Symbol('Jinv_00', GEO),
-                                                    Symbol('w1', GEO)
-                                                    ]),
-                                            Product([
-                                                    Symbol('FE0_C1_D01_ip_j', BASIS),
-                                                    Symbol('Jinv_01', GEO),
-                                                    Sum([
-                                                          Product([
-                                                                  Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                                  Symbol('w0', GEO)
-                                                                  ]),
-                                                          Product([
-                                                                   Symbol('FE0_C1_D01_ip_k', BASIS),
-                                                                   Symbol('w1', GEO)
-                                                                  ])
-                                                          ])
-                                                    ])
-                                            ])
-                                        ])
-                              ])
-                        ])
-
-#        print "\nElasticityWeighted2"
-        start = time.time()
-        expr_exp = expr.expand()
-#        print "ElasWeighted2: time, expand():     ", time.time() - start
-
-        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "ElasWeighted2: time, reduce_ops(): ", time.time() - start
-
-#        print "expr.ops():     ", expr.ops()
-#        print "expr_exp.ops(): ", expr_exp.ops()
-#        print "expr_red.ops(): ", expr_red.ops()
-
-#        print "expr:\n", expr
-#        print "exp:\n", expr_exp
-#        print "red:\n", expr_red
-
-        det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertEqual(expr.ops(), 21)
-        self.assertEqual(expr_exp.ops(), 32)
-        self.assertEqual(expr_red.ops(), 12)
-
-        # Generate code
-        ip_consts = {}
-        geo_consts = {}
-        trans_set = set()
-
-        start = time.time()
-        opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set)
-#        print "ElasWeighted2, optimise_code(): ", time.time() - start
-
-        G = [eval(str(list(geo_consts.items())[0][0]))]
-        I = [eval(str(list(ip_consts.items())[0][0]))]
-        self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code)))
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestElasWeighted2('testElasWeighted2'))
-
diff --git a/test/unit/symbolics/testexpandoperations.py b/test/unit/symbolics/testexpandoperations.py
deleted file mode 100755
index c42b0fc..0000000
--- a/test/unit/symbolics/testexpandoperations.py
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestExpandOperations(unittest.TestCase):
-
-    def testExpandOperations(self):
-        f0 = FloatValue(-1)
-        f1 = FloatValue(2)
-        f2 = FloatValue(1)
-        sx = Symbol("x", GEO)
-        sy = Symbol("y", GEO)
-        sz = Symbol("z", GEO)
-        s0 = Product([FloatValue(-1), Symbol("x", GEO)])
-        s1 = Symbol("y", GEO)
-        s2 = Product([FloatValue(5), Symbol("z", IP)])
-        s3 = Product([FloatValue(-4), Symbol("z", GEO)])
-
-        # Random variable values
-        x = 2.2
-        y = -0.2
-        z = 1.1
-
-        # Aux. expressions
-        P0 = Product([s2, s1])
-        P1 = Product([P0, s0])
-        P2 = Product([P1, s1, P0])
-        P3 = Product([P1, P2])
-
-        S0 = Sum([s2, s1])
-        S1 = Sum([S0, s0])
-        S2 = Sum([S1, s1, S0])
-        S3 = Sum([S1, S2])
-
-        F0 = Fraction(s2, s1)
-        F1 = Fraction(F0, s0)
-        F2 = Fraction(F1, F0)
-        F3 = Fraction(F1, F2)
-
-        # Special fractions
-        F4 = Fraction(P0, F0)
-        F5 = Fraction(Fraction(s0, P0), P0)
-        F6 = Fraction( Fraction( Fraction(s1, s0), Fraction(s1, s2)), Fraction( Fraction(s2, s0), Fraction(s1, s0)) )
-        F7 = Fraction(s1, Product([s1, Symbol("x", GEO)]))
-        F8 = Fraction( Sum([sx, Fraction(sy, sx)]), FloatValue(2))
-
-        F4x = F4.expand()
-        F5x = F5.expand()
-        F6x = F6.expand()
-        F7x = F7.expand()
-        F8x = F8.expand()
-
-#        print "\nF4: '%s'" %F4
-#        print "F4x: '%s'" %F4x
-#        print "\nF5: '%s'" %F5
-#        print "F5x: '%s'" %F5x
-#        print "\nF6: '%s'" %F6
-#        print "F6x: '%s'" %F6x
-#        print "\nF7: '%s'" %F7
-#        print "F7x: '%s'" %F7x
-#        print "\nF8: '%s'" %F8
-#        print "F8x: '%s'" %F8x
-
-        self.assertAlmostEqual(eval(str(F4)), eval(str(F4x)))
-        self.assertAlmostEqual(eval(str(F5)), eval(str(F5x)))
-        self.assertAlmostEqual(eval(str(F6)), eval(str(F6x)))
-        self.assertAlmostEqual(eval(str(F7)), eval(str(F7x)))
-        self.assertAlmostEqual(eval(str(F8)), eval(str(F8x)))
-
-        self.assertEqual(F4.ops(), 5)
-        self.assertEqual(F4x.ops(), 1)
-        self.assertEqual(F5.ops(), 6)
-        self.assertEqual(F5x.ops(), 5)
-        self.assertEqual(F6.ops(), 9)
-        self.assertEqual(F6x.ops(), 1)
-        self.assertEqual(F7.ops(), 2)
-        self.assertEqual(F7x.ops(), 1)
-        self.assertEqual(F8.ops(), 3)
-        self.assertEqual(F8x.ops(), 4)
-
-        # Expressions that should be expanded
-        e0 = Product([P3, F2])
-        e1 = Product([S3, P2])
-        e2 = Product([F3, S1])
-
-        e3 = Sum([P3, F2])
-        e4 = Sum([S3, P2])
-        e5 = Sum([F3, S1])
-
-        e6 = Fraction(P3, F2)
-        e7 = Fraction(S3, P2)
-        e8 = Fraction(F3, S1)
-        e9 = Fraction(S0, s0)
-
-        e0x = e0.expand()
-        e1x = e1.expand()
-        e2x = e2.expand()
-        e3x = e3.expand()
-        e4x = e4.expand()
-        e5x = e5.expand()
-        e6x = e6.expand()
-        e7x = e7.expand()
-        e8x = e8.expand()
-        e9x = e9.expand()
-
-#        print "\ne0: '%s'" %e0
-#        print "e0x: '%s'" %e0x
-#        print "\ne1: '%s'" %e1
-#        print "e1x: '%s'" %e1x
-#        print "\ne2: '%s'" %e2
-#        print "e2x: '%s'" %e2x
-#        print "\ne3: '%s'" %e3
-#        print "e3x: '%s'" %e3x
-#        print "\ne4: '%s'" %e4
-#        print "e4x: '%s'" %e4x
-#        print "\ne5: '%s'" %e5
-#        print "e5x: '%s'" %e5x
-#        print "\ne6: '%s'" %e6
-#        print "e6x: '%s'" %e6x
-#        print "\ne7: '%s'" %e7
-#        print "e7x: '%s'" %e7x
-#        print "\ne8: '%s'" %e8
-#        print "e8x: '%s'" %e8x
-#        print "\ne9: '%s'" %e9
-#        print "e9x: '%s'" %e9x
-
-        self.assertAlmostEqual(eval(str(e0)), eval(str(e0x)))
-        self.assertAlmostEqual(eval(str(e1)), eval(str(e1x)))
-        self.assertAlmostEqual(eval(str(e2)), eval(str(e2x)))
-        self.assertAlmostEqual(eval(str(e3)), eval(str(e3x)))
-        self.assertAlmostEqual(eval(str(e4)), eval(str(e4x)))
-        self.assertAlmostEqual(eval(str(e5)), eval(str(e5x)))
-        self.assertAlmostEqual(eval(str(e6)), eval(str(e6x)))
-        self.assertAlmostEqual(eval(str(e7)), eval(str(e7x)))
-        self.assertAlmostEqual(eval(str(e8)), eval(str(e8x)))
-        self.assertAlmostEqual(eval(str(e9)), eval(str(e9x)))
-
-        self.assertEqual(e0.ops(), 16)
-        self.assertEqual(e0x.ops(), 8)
-        self.assertEqual(e1.ops(), 18)
-        self.assertEqual(e1x.ops(), 23)
-        self.assertEqual(e2.ops(), 14)
-        self.assertEqual(e2x.ops(), 9)
-        self.assertEqual(e3.ops(), 16)
-        self.assertEqual(e3x.ops(), 11)
-        self.assertEqual(e4.ops(), 18)
-        self.assertEqual(e4x.ops(), 12)
-        self.assertEqual(e5.ops(), 14)
-        self.assertEqual(e5x.ops(), 6)
-        self.assertEqual(e6.ops(), 16)
-        self.assertEqual(e6x.ops(), 10)
-        self.assertEqual(e7.ops(), 18)
-        self.assertEqual(e7x.ops(), 17)
-        self.assertEqual(e8.ops(), 14)
-        self.assertEqual(e8x.ops(), 8)
-        self.assertEqual(e9.ops(), 3)
-        self.assertEqual(e9x.ops(), 4)
-
-        # More expressions (from old expand tests)
-        PF = Product([F0, F1])
-        E0 = Product([s1, f0, S1])
-        E1 = Sum([P0, E0])
-        E2 = Fraction(Sum([Product([f1])]), f2)
-        E3 = Sum([F0, F0])
-        E4 = Product([ Sum([ Product([sx, Sum([sy, Product([ Sum([sy, Product([sy, sz]), sy])]), sy])]),
-                             Product([sx, Sum([ Product([sy, sz]), sy])])])])
-        P4 = Product([s1,
-        Sum([s0, s1])])
-        P5 = Product([s0, E0])
-        P6 = Product([s1])
-        S4 = Sum([s1])
-
-
-        # Create 'real' term that caused me trouble
-        P00 = Product([Symbol("Jinv_00", GEO)]*2)
-        P01 = Product([Symbol("Jinv_01", GEO)]*2)
-        P20 = Product([Symbol("Jinv_00", GEO),
-        Product([f1, Symbol("Jinv_20", GEO)]) ])
-        P21 = Product([Symbol("Jinv_01", GEO),
-        Product([f1, Symbol("Jinv_21", GEO)]) ])
-        PS0 = Product([Symbol("Jinv_22", GEO),
-        Sum([P00, P01])])
-        PS1 = Product([ Product([f0, Symbol("Jinv_02", GEO)]),
-        Sum([P20, P21])])
-        SP = Sum([PS0, PS1])
-
-        PFx = PF.expand()
-        E0x = E0.expand()
-        E1x = E1.expand()
-        E2x = E2.expand()
-        E3x = E3.expand()
-        E4x = E4.expand()
-        P4x = P4.expand()
-        P5x = P5.expand()
-        P6x = P6.expand()
-        S4x = S4.expand()
-        SPx = SP.expand()
-
-#        print "\nPF: '%s'" %PF
-#        print "PFx: '%s'" %PFx
-#        print "\nE0: '%s'" %E0
-#        print "E0x: '%s'" %E0x
-#        print "\nE1: '%s'" %E1
-#        print "E1x: '%s'" %E1x
-#        print "\nE2: '%s'" %E2
-#        print "E2x: '%s'" %E2x
-#        print "\nE3: '%s'" %E3
-#        print "E3x: '%s'" %E3x
-#        print "\nE4: '%s'" %E4
-#        print "E4x: '%s'" %E4x
-#        print "\nP4: '%s'" %P4
-#        print "P4x: '%s'" %P4x
-#        print "\nP5: '%s'" %P5
-#        print "P5x: '%s'" %P5x
-#        print "\nP6: '%s'" %repr(P6)
-#        print "P6x: '%s'" %repr(P6x)
-#        print "\nS4: '%s'" %repr(S4)
-#        print "S4x: '%s'" %repr(S4x)
-#        print "\nSP: '%s'" %SP
-#        print "SPx: '%s'" %SPx
-
-        Jinv_00, Jinv_01, Jinv_10, Jinv_02, Jinv_20, Jinv_22, Jinv_21, W1, det = [1,2,3,4,5,6,7,8,9]
-
-        self.assertAlmostEqual(eval(str(SP)), eval(str(SPx)))
-        self.assertAlmostEqual(eval(str(E0)), eval(str(E0x)))
-        self.assertAlmostEqual(eval(str(E1)), eval(str(E1x)))
-        self.assertAlmostEqual(eval(str(E2)), eval(str(E2x)))
-        self.assertAlmostEqual(eval(str(E3)), eval(str(E3x)))
-        self.assertAlmostEqual(eval(str(E4)), eval(str(E4x)))
-        self.assertAlmostEqual(eval(str(SP)), eval(str(SPx)))
-        self.assertAlmostEqual(eval(str(P4)), eval(str(P4x)))
-        self.assertAlmostEqual(eval(str(P5)), eval(str(P5x)))
-        self.assertEqual(P6x, s1)
-        self.assertEqual(S4x, s1)
-        self.assertEqual(PF.ops(), 6)
-        self.assertEqual(PFx.ops(), 5)
-        self.assertEqual(E0.ops(), 4)
-        self.assertEqual(E0x.ops(), 6)
-        self.assertEqual(E1.ops(), 7)
-        self.assertEqual(E1x.ops(), 3)
-        self.assertEqual(E2.ops(), 1)
-        self.assertEqual(E2x.ops(), 0)
-        self.assertEqual(E3.ops(), 5)
-        self.assertEqual(E3x.ops(), 5)
-        self.assertEqual(E4.ops(), 10)
-        self.assertEqual(E4x.ops(), 6)
-        self.assertEqual(SP.ops(), 11)
-        self.assertEqual(SPx.ops(), 13)
-        self.assertEqual(P4.ops(), 2)
-        self.assertEqual(P4x.ops(), 3)
-        self.assertEqual(P5.ops(), 5)
-        self.assertEqual(P5x.ops(), 9)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestExpandOperations('testExpandOperations'))
-
diff --git a/test/unit/symbolics/testfloat.py b/test/unit/symbolics/testfloat.py
deleted file mode 100755
index 2d7a2b6..0000000
--- a/test/unit/symbolics/testfloat.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS["precision"])
-
-class TestFloat(unittest.TestCase):
-
-    def testFloat(self):
-            "Test simple FloatValue instance."
-            f0 = FloatValue(1.5)
-            f1 = FloatValue(-5)
-            f2 = FloatValue(-1e-14)
-            f3 = FloatValue(-1e-11)
-            f4 = FloatValue(1.5)
-
-    #        print "\nTesting FloatValue"
-    #        print "f0: '%s'" %f0
-    #        print "f1: '%s'" %f1
-    #        print "f2: '%s'" %f2
-    #        print "f3: '%s'" %f3
-
-            self.assertEqual(repr(f0), "FloatValue(%s)" % format["float"](1.5))
-            self.assertEqual(repr(f1), "FloatValue(%s)" % format["float"](-5))
-            self.assertEqual(repr(f2), "FloatValue(%s)" % format["float"](0))
-            self.assertEqual(repr(f3), "FloatValue(%s)" % format["float"](-1e-11))
-
-            self.assertEqual(f2.val == 0, True)
-            self.assertEqual(f3.val == 0, False)
-
-            self.assertEqual(f0.ops(), 0)
-            self.assertEqual(f1.ops(), 0)
-            self.assertEqual(f2.ops(), 0)
-            self.assertEqual(f3.ops(), 0)
-
-            self.assertEqual(f0 == f4, True)
-            self.assertEqual(f1 != f3, True)
-            self.assertEqual(f0 < f1, False)
-            self.assertEqual(f2 > f3, True)
-
-            # Test hash
-            l = [f0]
-            d = {f0:0}
-            self.assertEqual(f0 in l, True)
-            self.assertEqual(f0 in d, True)
-            self.assertEqual(f4 in l, True)
-            self.assertEqual(f4 in d, True)
-            self.assertEqual(f1 in l, False)
-            self.assertEqual(f1 in d, False)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestFloat('testFloat'))
diff --git a/test/unit/symbolics/testfloatoperators.py b/test/unit/symbolics/testfloatoperators.py
deleted file mode 100755
index 4e7d2b8..0000000
--- a/test/unit/symbolics/testfloatoperators.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import error, push_level, pop_level, CRITICAL
-
-class TestFloatOperators(unittest.TestCase):
-
-    def testFloatOperators(self):
-        "Test binary operators"
-
-        f0 = FloatValue(0.0)
-        f2 = FloatValue(2.0)
-        f3= FloatValue(3.0)
-        fm1= FloatValue(-1.0)
-        fm3= FloatValue(-3.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-        z = Symbol("z", GEO)
-
-        p0 = Product([f2, x])
-        p1 = Product([x, y])
-        p2 = Product([f2, z])
-        p3 = Product([y, x, z])
-        p4 = Product([fm1, f2, x])
-
-        S0 = Sum([p0, fm3])
-        S1 = Sum([x, y])
-        S2 = Sum([S1, fm3])
-        S3 = Sum([p4, fm3])
-        S4 = Sum([fm3, Product([fm1, Sum([x, y])])])
-
-        F0 = Fraction(f2, y)
-        F1 = Fraction(FloatValue(-1.5), x)
-        F2 = Fraction(fm3, S1)
-
-        SF0 = Sum([f3, F1])
-        SF1 = Sum([f3, Product([fm1, F1])])
-
-        # Test FloatValue '+'
-        self.assertEqual(str(f2 + fm3), str(fm1))
-        self.assertEqual(str(f2 + fm3 + fm3 + f2 + f2), str(f0))
-        self.assertEqual(str(f0 + p0), str(p0))
-        self.assertEqual(str(fm3 + p0), str(S0))
-        self.assertEqual(str(fm3 + S1), str(S2))
-        self.assertEqual(str(f3 + F1), str(SF0))
-
-        # Test FloatValue '-'
-        self.assertEqual(str(f2 - fm3), str(FloatValue(5)))
-        self.assertEqual(str(f0 - p0), str(p4))
-        self.assertEqual(str(fm3 - p0), str(S3))
-        self.assertEqual(str(fm3 - S1), str(S4))
-        self.assertEqual(str(f3 - F1), str(SF1))
-
-        # Test FloatValue '*', only need one because all other cases are
-        # handled by 'other'
-        self.assertEqual(str(f2*f2), '%s' % format["float"](4))
-
-        # Test FloatValue '/'
-        self.assertEqual(str(fm3/f2), str(FloatValue(-1.5)))
-        self.assertEqual(str(f2/y), str(F0))
-        self.assertEqual(str(fm3/p0), str(F1))
-        self.assertEqual(str(fm3/S1), str(F2))
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, f2.__truediv__, F0)
-        self.assertRaises(Exception, f2.__truediv__, f0)
-        self.assertRaises(Exception, f2.__truediv__, Product([f0, y]))
-        pop_level()
-
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestFloatOperators('testFloatOperators'))
-
diff --git a/test/unit/symbolics/testfraction.py b/test/unit/symbolics/testfraction.py
deleted file mode 100755
index b8cc9aa..0000000
--- a/test/unit/symbolics/testfraction.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import push_level, pop_level, CRITICAL
-
-class TestFraction(unittest.TestCase):
-
-    def testFraction(self):
-        "Test simple fraction instance."
-
-        f0 = FloatValue(-2.0)
-        f1 = FloatValue(3.0)
-        f2 = FloatValue(0)
-        s0 = Symbol("x", BASIS)
-        s1 = Symbol("y", GEO)
-
-        F0 = Fraction(f1, f0)
-        F1 = Fraction(f2, f0)
-        F2 = Fraction(s0, s1)
-        F3 = Fraction(s0, f1)
-        F4 = Fraction(f0, s1)
-        F5 = Fraction(f2, s1)
-        F6 = Fraction(s0, s1)
-
-#        print "\nTesting Fractions"
-#        print "F0 = frac(%s, %s) = '%s'" %(f1, f0, F0)
-#        print "F1 = frac(%s, %s) = '%s'" %(f2, f0, F1)
-#        print "F2 = frac(%s, %s) = '%s'" %(s0, s1, F2)
-#        print "F3 = frac(%s, %s) = '%s'" %(s0, f1, F3)
-#        print "F4 = frac(%s, %s) = '%s'" %(f0, s1, F4)
-#        print "F5 = frac(%s, %s) = '%s'" %(f2, s1, F5)
-#        print "F6 = frac(%s, %s) = '%s'" %(s0, s1, F6)
-
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, Fraction, f0, f2)
-        self.assertRaises(Exception, Fraction, s0, f2)
-        pop_level()
-
-        self.assertEqual(repr(F0), "Fraction(FloatValue(%s), FloatValue(%s))"\
-                                    % (format["float"](-1.5), format["float"](1)))
-        self.assertEqual(repr(F2), "Fraction(Symbol('x', BASIS), Symbol('y', GEO))")
-
-        self.assertEqual(str(F0), "%s" % format["float"](-1.5))
-        self.assertEqual(str(F1), "%s" % format["float"](0))
-        self.assertEqual(str(F2), "x/y")
-        self.assertEqual(str(F3), "x/%s" % format["float"](3))
-        self.assertEqual(str(F4), "-%s/y" % format["float"](2))
-        self.assertEqual(str(F5), "%s" % format["float"](0))
-
-        self.assertEqual(F2 == F2, True)
-        self.assertEqual(F2 == F3, False)
-        self.assertEqual(F5 != F4, True)
-        self.assertEqual(F2 == F6, True)
-
-        self.assertEqual(F0.ops(), 0)
-        self.assertEqual(F1.ops(), 0)
-        self.assertEqual(F2.ops(), 1)
-        self.assertEqual(F3.ops(), 1)
-        self.assertEqual(F4.ops(), 1)
-        self.assertEqual(F5.ops(), 0)
-
-        # Test hash
-        l = [F2]
-        d = {F2:0}
-
-        self.assertEqual(F2 in l, True)
-        self.assertEqual(F2 in d, True)
-        self.assertEqual(F6 in l, True)
-        self.assertEqual(F6 in d, True)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestFraction('testFraction'))
-
diff --git a/test/unit/symbolics/testfractionoperators.py b/test/unit/symbolics/testfractionoperators.py
deleted file mode 100755
index b7c0f34..0000000
--- a/test/unit/symbolics/testfractionoperators.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import error, push_level, pop_level, CRITICAL
-
-class TestFractionOperators(unittest.TestCase):
-
-    def testFractionOperators(self):
-        "Test binary operators"
-
-        f_0 = format["float"](0)
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-        f_5 = format["float"](5)
-
-        f2 = FloatValue(2.0)
-        fm3 = FloatValue(-3.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-
-        p0 = Product([f2, x])
-        p1 = Product([x, y])
-
-        S0 = Sum([x, y])
-
-        F0 = Fraction(f2, y)
-        F1 = Fraction(x, y)
-        F2 = Fraction(x, S0)
-        F3 = Fraction(x, y)
-        F4 = Fraction(p0, y)
-        F5 = Fraction(Product([fm3, x]), y)
-
-        # Test Fraction '+'
-        self.assertEqual(str(F0 + f2), '(%s + %s/y)' % (f_2, f_2))
-        self.assertEqual(str(F1 + x), '(x + x/y)')
-        self.assertEqual(str(F1 + p0), '(%s*x + x/y)' % f_2)
-        self.assertEqual(str(F1 + S0), '(x + y + x/y)')
-        self.assertEqual(str(F1 + F3), '%s*x/y' % f_2)
-        self.assertEqual(str(F0 + F1), '(%s + x)/y' % f_2)
-        self.assertEqual(str(F2 + F4), '(%s*x/y + x/(x + y))' % f_2)
-
-        # Test Fraction '-'
-        self.assertEqual(str(F0 - f2), '(%s/y-%s)' % (f_2, f_2))
-        self.assertEqual(str(F1 - x), '(x/y - x)')
-        self.assertEqual(str(F1 - p0), '(x/y-%s*x)' % f_2)
-        self.assertEqual(str(F1 - S0), '(x/y - (x + y))')
-        self.assertEqual(str(F1 - F3), '%s' % f_0)
-        self.assertEqual(str(F4 - F1), 'x/y')
-        self.assertEqual(str(F4 - F5), '%s*x/y' % f_5)
-        self.assertEqual(str(F0 - F1), '(%s - x)/y' % f_2)
-        self.assertEqual(str(F2 - F4), '(x/(x + y) - %s*x/y)' % f_2)
-
-        # Test Fraction '*'
-        self.assertEqual(str(F1 * f2), '%s*x/y' % f_2)
-        self.assertEqual(str(F1 * x), 'x*x/y')
-        self.assertEqual(str(F1 * p1), 'x*x')
-        self.assertEqual(str(F1 * S0), '(x + x*x/y)')
-        self.assertEqual(repr(F1 * S0), repr(Sum([x, Fraction( Product([x, x]), y)]) ))
-        self.assertEqual(str(F1 * F0), '%s*x/(y*y)' % f_2)
-
-        # Test Fraction '/'
-        self.assertEqual(str(F0 / f2), '%s/y' % f_1)
-        self.assertEqual(str(F1 / x), '%s/y' % f_1)
-        self.assertEqual(str(F4 / p1), '%s/(y*y)' % f_2)
-        self.assertEqual(str(F4 / x), '%s/y' % f_2)
-        self.assertEqual(str(F2 / y), 'x/(x*y + y*y)')
-        self.assertEqual(str(F0 / S0), '%s/(x*y + y*y)' % f_2)
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, F0.__truediv__, F0)
-        pop_level()
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestFractionOperators('testFractionOperators'))
-
diff --git a/test/unit/symbolics/testmixedsymbols.py b/test/unit/symbolics/testmixedsymbols.py
deleted file mode 100755
index 777d95e..0000000
--- a/test/unit/symbolics/testmixedsymbols.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-03-11
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestMixedSymbols(unittest.TestCase):
-
-    def testMixedSymbols(self):
-
-        f_0 = format["float"](0)
-        f_2 = format["float"](2)
-        f_3 = format["float"](3)
-        f_4 = format["float"](4)
-        f_6 = format["float"](6)
-
-        f0 = FloatValue(-2.0)
-        f1 = FloatValue(3.0)
-        f2 = FloatValue(0)
-
-        s0 = Symbol("x", BASIS)
-        s1 = Symbol("y", GEO)
-        s2 = Symbol("z", GEO)
-
-        p0 = Product([s0, s1])
-        p1 = Product([f1, s0, s1])
-        p2 = Product([s0, f2, s2])
-        p3 = Product([s0, f0, s1, f1, s2])
-
-        S0 = Sum([s0, s1])
-        S1 = Sum([s0, s0])
-        S2 = Sum([f0, s0])
-        S3 = Sum([s0, f0, s0])
-
-        F0 = Fraction(f1, f0)
-        F1 = Fraction(s0, s1)
-        F2 = Fraction(s0, f1)
-        F3 = Fraction(f0, s1)
-
-        x = 1.2; y = 2.36; z = 6.75;
-        # Mixed products
-        mpp0 = Product([p0, s0])
-        mpp1 = Product([p1, p0])
-        mpp2 = Product([p2, p3])
-        mpp3 = Product([p1, mpp1])
-
-        mps0 = Product([S0, s0])
-        mps1 = Product([S1, S0])
-        mps2 = Product([S2, S3])
-        mps3 = Product([S1, mps1])
-
-        mpf0 = Product([F1, s0])
-        mpf1 = Product([F1, F2])
-        mpf2 = Product([F2, F3])
-        mpf3 = Product([F1, mpf1])
-
-#        print "\nMixed Products"
-#        print "\nmpp0: %s * %s = '%s'" % (p0, s0, mpp0)
-#        print "mpp1: %s * %s = '%s'" % (p1, p0, mpp1)
-#        print "mpp2: %s * %s = '%s'" % (p2, p3, mpp2)
-#        print "mpp3: %s * %s = '%s'" % (p1, mpp1, mpp3)
-#        print "\nmps0: %s * %s = '%s'" % (S0, s0, mps0)
-#        print "mps1: %s * %s = '%s'" % (S1, S0, mps1)
-#        print "mps2: %s * %s = '%s'" % (S2, S3, mps2)
-#        print "mps3: %s * %s = '%s'" % (S1, mps1, mps3)
-#        print "\nmpf0: %s * %s = '%s'" % (F1, s0, mpf0)
-#        print "mpf1: %s * %s = '%s'" % (F1, F2, mpf1)
-#        print "mpf2: %s * %s = '%s'" % (F2, F3, mpf2)
-#        print "mpf3: %s * %s = '%s'" % (F1, mpf1, mpf3)
-
-        self.assertAlmostEqual(eval(str(mpp0)), eval(str(p0))*eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mpp1)), eval(str(p1))*eval(str(p0)))
-        self.assertAlmostEqual(eval(str(mpp2)), eval(str(p2))*eval(str(p3)))
-        self.assertAlmostEqual(eval(str(mpp3)), eval(str(p1))*eval(str(mpp1)))
-
-        self.assertAlmostEqual(eval(str(mps0)), eval(str(S0))*eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mps1)), eval(str(S1))*eval(str(S0)))
-        self.assertAlmostEqual(eval(str(mps2)), eval(str(S2))*eval(str(S3)))
-        self.assertAlmostEqual(eval(str(mps3)), eval(str(S1))*eval(str(mps1)))
-
-        self.assertAlmostEqual(eval(str(mpf0)), eval(str(F1))*eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mpf1)), eval(str(F1))*eval(str(F2)))
-        self.assertAlmostEqual(eval(str(mpf2)), eval(str(F2))*eval(str(F3)))
-        self.assertAlmostEqual(eval(str(mpf3)), eval(str(F1))*eval(str(mpf1)))
-
-        self.assertEqual(mpp0.ops(), 2)
-        self.assertEqual(mpp1.ops(), 4)
-        self.assertEqual(mpp2.ops(), 0)
-        self.assertEqual(mpp3.ops(), 6)
-
-        self.assertEqual(mps0.ops(), 2)
-        self.assertEqual(mps1.ops(), 3)
-        self.assertEqual(mps2.ops(), 4)
-        self.assertEqual(mps3.ops(), 5)
-
-        self.assertEqual(mpf0.ops(), 2)
-        self.assertEqual(mpf1.ops(), 3)
-        self.assertEqual(mpf2.ops(), 3)
-        self.assertEqual(mpf3.ops(), 5)
-
-        self.assertEqual(str(mpp0), 'x*x*y')
-        self.assertEqual(str(mpp1), '%s*x*x*y*y' % f_3)
-        self.assertEqual(str(mpp2), '%s' % f_0)
-        self.assertEqual(str(mpp3), '%s*x*x*x*y*y*y' % format["float"](9))
-        self.assertEqual(str(mps0), 'x*(x + y)')
-        self.assertEqual(str(mps1), '(x + x)*(x + y)')
-#        self.assertEqual(str(mps2), '(x-2)*(x + x-2)')
-        self.assertEqual(str(mps2), '(x + x-%s)*(x-%s)' % (f_2, f_2))
-        self.assertEqual(str(mps3), '(x + x)*(x + x)*(x + y)')
-        self.assertEqual(str(mpf0), 'x*x/y')
-        self.assertEqual(str(mpf1), 'x/%s*x/y' % f_3)
-        self.assertEqual(str(mpf2), '-%s/y*x/%s' % (f_2, f_3))
-        self.assertEqual(str(mpf3), 'x/%s*x/y*x/y' % f_3)
-
-
-        # Mixed sums
-        msp0 = Sum([p0, s0])
-        msp1 = Sum([p1, p0])
-        msp2 = Sum([p2, p3])
-        msp3 = Sum([p1, msp1])
-        msp4 = Sum([f2, f2])
-
-        mss0 = Sum([S0, s0])
-        mss1 = Sum([S1, S0])
-        mss2 = Sum([S2, S3])
-        mss3 = Sum([S1, mps1])
-
-        msf0 = Sum([F1, s0])
-        msf1 = Sum([F1, F2])
-        msf2 = Sum([F2, F3])
-        msf3 = Sum([F1, msf1])
-
-#        print "\nTesting Mixed Sums"
-#        print "\nmsp0: %s + %s = '%s'" % (p0, s0, msp0)
-#        print "msp1: %s + %s = '%s'" % (p1, p0, msp1)
-#        print "msp2: %s + %s = '%s'" % (p2, p3, msp2)
-#        print "msp3: %s + %s = '%s'" % (p1, msp1, msp3)
-#        print "msp4: %s + %s = '%s'" % (f2, f2, msp4)
-#        print "\nmss0: %s + %s = '%s'" % (S0, s0, mss0)
-#        print "mss1: %s + %s = '%s'" % (S1, S0, mss1)
-#        print "mss2: %s + %s = '%s'" % (S2, S3, mss2)
-#        print "mss3: %s + %s = '%s'" % (S1, mss1, mss3)
-#        print "\nmsf0: %s + %s = '%s'" % (F1, s0, msf0)
-#        print "msf1: %s + %s = '%s'" % (F1, F2, msf1)
-#        print "msf2: %s + %s = '%s'" % (F2, F3, msf2)
-#        print "msf3: %s + %s = '%s'" % (F1, msf1, msf3)
-#        print "msf3: %s + %s = '%s'" % (F1, msf1, msf3)
-
-        self.assertAlmostEqual(eval(str(msp0)), eval(str(p0))+eval(str(s0)))
-        self.assertAlmostEqual(eval(str(msp1)), eval(str(p1))+eval(str(p0)))
-        self.assertAlmostEqual(eval(str(msp2)), eval(str(p2))+eval(str(p3)))
-        self.assertAlmostEqual(eval(str(msp3)), eval(str(p1))+eval(str(msp1)))
-        self.assertEqual(str(msp4), '%s' % f_0)
-
-        self.assertAlmostEqual(eval(str(mss0)), eval(str(S0))+eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mss1)), eval(str(S1))+eval(str(S0)))
-        self.assertAlmostEqual(eval(str(mss2)), eval(str(S2))+eval(str(S3)))
-        self.assertAlmostEqual(eval(str(mss3)), eval(str(S1))+eval(str(mps1)))
-
-        self.assertAlmostEqual(eval(str(msf0)), eval(str(F1))+eval(str(s0)))
-        self.assertAlmostEqual(eval(str(msf1)), eval(str(F1))+eval(str(F2)))
-        self.assertAlmostEqual(eval(str(msf2)), eval(str(F2))+eval(str(F3)))
-        self.assertAlmostEqual(eval(str(msf3)), eval(str(F1))+eval(str(msf1)))
-
-        self.assertEqual(msp0.ops(), 2)
-        self.assertEqual(msp1.ops(), 4)
-        self.assertEqual(msp2.ops(), 3)
-        self.assertEqual(msp3.ops(), 7)
-
-        self.assertEqual(mss0.ops(), 2)
-        self.assertEqual(mss1.ops(), 3)
-        self.assertEqual(mss2.ops(), 3)
-        self.assertEqual(mss3.ops(), 5)
-
-        self.assertEqual(msf0.ops(), 2)
-        self.assertEqual(msf1.ops(), 3)
-        self.assertEqual(msf2.ops(), 3)
-        self.assertEqual(msf3.ops(), 5)
-
-        self.assertEqual(str(msp0), '(x + x*y)')
-        self.assertEqual(str(msp1), '(%s*x*y + x*y)' % f_3)
-        self.assertEqual(str(msp2), '-%s*x*y*z' % f_6)
-        self.assertEqual(str(msp3), '(%s*x*y + %s*x*y + x*y)' % (f_3, f_3))
-        self.assertEqual(str(mss0), '(x + x + y)')
-        self.assertEqual(str(mss1), '(x + x + x + y)')
-        self.assertEqual(str(mss2), '(x + x + x-%s)' % f_4)
-        self.assertEqual(str(mss3), '(x + x + (x + x)*(x + y))')
-        self.assertEqual(str(msf0), '(x + x/y)')
-        self.assertEqual(str(msf1), '(x/%s + x/y)' % f_3)
-        self.assertEqual(str(msf2), '(x/%s-%s/y)' % (f_3, f_2))
-        self.assertEqual(str(msf3), '(x/%s + x/y + x/y)' % f_3)
-
-
-        # Mixed fractions
-        mfp0 = Fraction(p0, s0)
-        mfp1 = Fraction(p1, p0)
-        mfp2 = Fraction(p2, p3)
-        mfp3 = Fraction(p1, mfp1)
-
-        mfs0 = Fraction(S0, s0)
-        mfs1 = Fraction(S1, S0)
-        mfs2 = Fraction(S2, S3)
-        mfs3 = Fraction(S1, mfs1)
-
-        mff0 = Fraction(F1, s0)
-        mff1 = Fraction(F1, F2)
-        mff2 = Fraction(F2, F3)
-        mff3 = Fraction(F1, mff1)
-
-#        print "\nTesting Mixed Fractions"
-#        print "\nmfp0: %s / %s = '%s'" % (p0, s0, mfp0)
-#        print "mfp1: %s / %s = '%s'" % (p1, p0, mfp1)
-#        print "mfp2: %s / %s = '%s'" % (p2, p3, mfp2)
-#        print "mfp3: %s / %s = '%s'" % (p1, mfp1, mfp3)
-#        print "\nmfs0: %s / %s = '%s'" % (S0, s0, mfs0)
-#        print "mfs1: %s / %s = '%s'" % (S1, S0, mfs1)
-#        print "mfs2: %s / %s = '%s'" % (S2, S3, mfs2)
-#        print "mfs3: %s / %s = '%s'" % (S1, mfs1, mfs3)
-#        print "\nmff0: %s / %s = '%s'" % (F1, s0, mff0)
-#        print "mff1: %s / %s = '%s'" % (F1, F2, mff1)
-#        print "mff2: %s / %s = '%s'" % (F2, F3, mff2)
-#        print "mff3: %s / %s = '%s'" % (F1, mff1, mff3)
-
-        self.assertAlmostEqual(eval(str(mfp0)), eval(str(p0))/eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mfp1)), eval(str(p1))/eval(str(p0)))
-        self.assertAlmostEqual(eval(str(mfp2)), eval(str(p2))/eval(str(p3)))
-        self.assertAlmostEqual(eval(str(mfp3)), eval(str(p1))/eval(str(mfp1)))
-
-        self.assertAlmostEqual(eval(str(mfs0)), eval(str(S0))/eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mfs1)), eval(str(S1))/eval(str(S0)))
-        self.assertAlmostEqual(eval(str(mfs2)), eval(str(S2))/eval(str(S3)))
-        self.assertAlmostEqual(eval(str(mfs3)), eval(str(S1))/eval(str(mfs1)))
-
-        self.assertAlmostEqual(eval(str(mff0)), eval(str(F1))/eval(str(s0)))
-        self.assertAlmostEqual(eval(str(mff1)), eval(str(F1))/eval(str(F2)))
-        self.assertAlmostEqual(eval(str(mff2)), eval(str(F2))/eval(str(F3)))
-        self.assertAlmostEqual(eval(str(mff3)), eval(str(F1))/eval(str(mff1)))
-
-        self.assertEqual(mfp0.ops(), 2)
-        self.assertEqual(mfp1.ops(), 4)
-        self.assertEqual(mfp2.ops(), 0)
-        self.assertEqual(mfp3.ops(), 7)
-
-        self.assertEqual(mfs0.ops(), 2)
-        self.assertEqual(mfs1.ops(), 3)
-        self.assertEqual(mfs2.ops(), 4)
-        self.assertEqual(mfs3.ops(), 5)
-
-        self.assertEqual(mff0.ops(), 2)
-        self.assertEqual(mff1.ops(), 3)
-        self.assertEqual(mff2.ops(), 3)
-        self.assertEqual(mff3.ops(), 5)
-
-        self.assertEqual(str(mfp0), 'x*y/x')
-        self.assertEqual(str(mfp1), '%s*x*y/(x*y)' % f_3)
-        self.assertEqual(str(mfp2), '%s' % f_0)
-        self.assertEqual(str(mfp3), '%s*x*y/(%s*x*y/(x*y))' % (f_3, f_3))
-        self.assertEqual(str(mfs0), '(x + y)/x')
-        self.assertEqual(str(mfs1), '(x + x)/(x + y)')
-        self.assertEqual(str(mfs2), '(x-%s)/(x + x-%s)' % (f_2, f_2))
-        self.assertEqual(str(mfs3), '(x + x)/((x + x)/(x + y))')
-        self.assertEqual(str(mff0), '(x/y)/x')
-        self.assertEqual(str(mff1), '(x/y)/(x/%s)' % f_3)
-        self.assertEqual(str(mff2), '(x/%s)/(-%s/y)' % (f_3, f_2))
-        self.assertEqual(str(mff3), '(x/y)/((x/y)/(x/%s))' % f_3)
-
-        # Use p1 as a base expression for Symbol
-        s3 = Symbol(format["cos"](str(p1)), CONST, p1, 1)
-        self.assertEqual(str(s3), 'std::cos(%s*x*y)' % f_3)
-        self.assertEqual(s3.ops(), 3)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestMixedSymbols('testMixedSymbols'))
-
diff --git a/test/unit/symbolics/testnotfinished.py b/test/unit/symbolics/testnotfinished.py
deleted file mode 100755
index a14a1d4..0000000
--- a/test/unit/symbolics/testnotfinished.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestNotFinished(unittest.TestCase):
-
-    def testNotFinished(self):
-        "Stuff that would be nice to implement."
-
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-        f_4 = format["float"](4)
-        f_8 = format["float"](8)
-
-        f0 = FloatValue(4)
-        f1 = FloatValue(2)
-        f2 = FloatValue(8)
-        s0 = Symbol("x", GEO)
-        s1 = Symbol("y", GEO)
-        s2 = Symbol("z", GEO)
-        a = Symbol("a", GEO)
-        b = Symbol("b", GEO)
-        c = Symbol("c", GEO)
-
-        # Aux. expressions
-        p0 = Product([f1, s0])
-        p1 = Product([f2, s1])
-        p2 = Product([s0, s1])
-
-        F0 = Fraction(f0, s0)
-
-        S0 = Sum([p0, p1])
-        S1 = Sum([s0, p2])
-        S2 = Sum([FloatValue(1), s1])
-        S3 = Sum([F0, F0])
-
-        # Thing to be implemented
-        e0 = f0 / S0
-        e1 = s0 / S1
-        e2 = S2 / S1
-        e3 = _group_fractions(S3)
-        e4 = Sum([Fraction(f1*s0, a*b*c), Fraction(s0, a*b)]).expand().reduce_ops()
-
-        # Tests that pass the current implementation
-        self.assertEqual(str(e0), '%s/(%s*x + %s*y)' % (f_4, f_2, f_8))
-        self.assertEqual(str(e1), 'x/(x + x*y)')
-        self.assertEqual(str(e2), '(%s + y)/(x + x*y)' % f_1)
-        self.assertEqual(str(e3), '%s/x' % f_8)
-        self.assertEqual(str(e4), 'x*(%s/(a*b) + %s/(a*b*c))' % (f_1, f_2))
-
-        # Tests that should pass in future implementations (change NotEqual to Equal)
-        self.assertNotEqual(str(e0), '%s/(x + %s*y)' % (f_2, f_4))
-        self.assertNotEqual(str(e1), '%s/(%s + y)' % (f_1, f_1))
-        self.assertNotEqual(str(e2), '%s/x' % f_1)
-        self.assertNotEqual(str(e4), 'x*(%s/c + %s)/(a*b)' % (f_2, f_1))
-
-        # TODO: Would it be a good idea to reduce expressions wrt. var_type
-        # without first expanding?
-        E0 = Product([ Sum([ Product([ Symbol('B0', BASIS), Product([Symbol('B1', BASIS), Sum([s0]), Sum([s0])]) ]),
-                             Product([Symbol('B0', BASIS), Symbol('B1', BASIS)]) ]) ])
-        Er0 = E0.reduce_vartype(BASIS)
-        Ex0 = E0.expand().reduce_vartype(BASIS)
-#        print "%s, red(BASIS): ('%s', '%s')" %(E0, Er0[0][0], Er0[0][1])
-#        print "%s, red(BASIS): ('%s', '%s')" %(E0, Ex0[0][0], Ex0[0][1])
-        self.assertNotEqual( Ex0[0][1], Er0[0][1].expand() )
-
-        # Both of these reductions should work at the same time
-        # 1) 2/(x/(a+b) + y/(a+b)) --> 2(a+b)/(x+y)
-        # 2) 2/(x + y/(a+b)) --> no reduction, or if divisions are more expensive
-        # 3) 2/(x + y/(a+b)) --> 2(a+b)/((a+b)x + y)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestNotFinished('testNotFinished'))
-
diff --git a/test/unit/symbolics/testpoisson.py b/test/unit/symbolics/testpoisson.py
deleted file mode 100755
index 9843ea3..0000000
--- a/test/unit/symbolics/testpoisson.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestPoisson(unittest.TestCase):
-
-    def testPoisson(self):
-
-        poisson = """((Jinv_00*FE0_D10_ip_j + Jinv_10*FE0_D01_ip_j)*(Jinv_00*FE0_D10_ip_k + Jinv_10*FE0_D01_ip_k) + (Jinv_01*FE0_D10_ip_j + Jinv_11*FE0_D01_ip_j)*(Jinv_01*FE0_D10_ip_k + Jinv_11*FE0_D01_ip_k))*W4_ip*det"""
-
-        expr = Product([
-                     Sum([
-                          Product([
-                                   Sum([
-                                        Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_j", BASIS)])
-                                        ,
-                                        Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_j", BASIS)])
-                                       ]),
-                                   Sum([
-                                        Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_k", BASIS)])
-                                        ,
-                                        Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_k", BASIS)])
-                                       ])
-                                  ])
-                          ,
-                          Product([
-                                   Sum([
-                                        Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_j", BASIS)])
-                                        ,
-                                        Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_j", BASIS)])
-                                       ]),
-                                   Sum([
-                                        Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_k", BASIS)])
-                                        ,
-                                        Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_k", BASIS)])
-                                       ])
-                                  ])
-                         ])
-                     ,
-                     Symbol("W4_ip", IP)
-                     ,
-                     Symbol("det", GEO)
-                    ])
-
-#        print "\nPoisson"
-#        start = time.time()
-        expr_exp = expr.expand()
-#        print "Poisson: time, expand():     ", time.time() - start
-
-#        start = time.time()
-        poisson_exp = expand_operations(poisson, format)
-#        print "Poisson: time, old expand(): ", time.time() - start
-
-#        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "Poisson: time, reduce_ops(): ", time.time() - start
-
-#        start = time.time()
-        poisson_red = reduce_operations(poisson, format)
-#        print "Poisson: time, old reduce(): ", time.time() - start
-
-        poisson_exp_ops = operation_count(poisson_exp, format)
-        poisson_red_ops = operation_count(poisson_red, format)
-#        print "expr.ops():           ", expr.ops()
-#        print "Poisson old exp: ops: ", poisson_exp_ops
-#        print "expr_exp.ops():       ", expr_exp.ops()
-#        print "Poisson old red: ops: ", poisson_red_ops
-#        print "expr_red.ops():       ", expr_red.ops()
-
-#        print "expr: ", expr
-#        print "exp:  ", expr_exp
-#        print "red:  ", expr_red
-
-        Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3)
-        FE0_D01_ip_j, FE0_D10_ip_j, FE0_D01_ip_k, FE0_D10_ip_k = (1.12, 5.7, -9.3, 7.4)
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(poisson)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(poisson_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(poisson_red)))
-        self.assertEqual(expr.ops(), 17)
-        self.assertEqual(poisson_exp_ops, 47)
-        self.assertEqual(expr_exp.ops(), 47)
-        self.assertEqual(poisson_red_ops, 23)
-        self.assertEqual(expr_red.ops(), 23)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestPoisson('testPoisson'))
-
diff --git a/test/unit/symbolics/testproduct.py b/test/unit/symbolics/testproduct.py
deleted file mode 100755
index 6cd0e00..0000000
--- a/test/unit/symbolics/testproduct.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestProduct(unittest.TestCase):
-
-    def testProduct(self):
-        "Test simple product instance."
-
-        f_0 = format["float"](0)
-        f_1 = format["float"](1)
-        f0 = FloatValue(-2.0)
-        f1 = FloatValue(3.0)
-        f2 = FloatValue(0)
-        f3 = FloatValue(-1)
-        f4 = FloatValue(1)
-        f5 = FloatValue(-0.5)
-        f6 = FloatValue(2.0)
-        s0 = Symbol("x", BASIS)
-        s1 = Symbol("y", GEO)
-        s2 = Symbol("z", GEO)
-
-        p0 = Product([])
-        p1 = Product([s0])
-        p2 = Product([s0, s1])
-        p3 = Product([f1, s0, s1])
-        p4 = Product([s0, f2, s2])
-        p5 = Product([s0, f0, s1, f1, s2])
-        p6 = Product([s0, f3, s1])
-        p7 = Product([s0, f4, s1]).expand().reduce_ops()
-        p8 = Product([s0, f0, s2, f5])
-        p9 = Product([s0, s1])
-        p10 = Product([p0, p1])
-        p11 = Product([f5, f0])
-        p12 = Product([f6, f5])
-        p13 = Product([f6, f5]).expand()
-        p14 = Product([f1, f2])
-        p_tmp = Product([f1])
-        p_tmp.expand()
-        p15 = Product([p_tmp, s0])
-
-#        print "\nTesting Products"
-#        print "\np0: [] '%s'" % (p0)
-#        print "\np1: %s '%s'" % (s0, p1)
-#        print "\np2: %s * %s = '%s'" % (s0, s1, p2)
-#        print "\np3: %s * %s * %s = '%s'" % (f1, s0, s1, p3)
-#        print "\np4: %s * %s * %s = '%s'" % (s0, f2, s2, p4)
-#        print "\np5: %s * %s * %s * %s * %s = '%s'" % (s0, f0, s1, f1, s2, p5)
-#        print "\np6: %s * %s * %s = '%s'" % (s0, f3, s1, p6)
-#        print "\np7: %s * %s * %s = '%s'" % (s0, f4, s1, p7)
-#        print "\np8: %s * %s * %s * %s = '%s'" % (s0, f0, s2, f5, p8)
-#        print "\np9: %s * %s = '%s'" % (s0, s1, p9)
-#        print "\np10: %s * %s = '%s'" % (p0, p1, p10)
-#        print "\np11: %s * %s = '%s'" % (f6, f1, p11)
-#        print "\np12: %s * %s = '%s'" % (f6, f5, p12)
-#        print "\np13: %s * %s = '%s'" % (f6, f5, p13)
-#        print "\np14: %s * %s = '%s'" % (f1, f2, p14)
-
-        self.assertEqual(repr(p0), "Product([FloatValue(%s)])" % f_0)
-        self.assertEqual(repr(p1), "Product([Symbol('x', BASIS)])")
-        self.assertEqual(repr(p3), "Product([FloatValue(%s), Symbol('x', BASIS), Symbol('y', GEO)])"\
-                                    % format["float"](3))
-        self.assertEqual(repr(p6), "Product([FloatValue(-%s), Symbol('x', BASIS), Symbol('y', GEO)])" % f_1)
-        self.assertEqual(repr(p7), "Product([Symbol('x', BASIS), Symbol('y', GEO)])")
-        self.assertEqual(repr(p8), "Product([Symbol('x', BASIS), Symbol('z', GEO)])")
-        self.assertEqual(str(p2), 'x*y')
-        self.assertEqual(str(p4), '%s' % f_0)
-        self.assertEqual(str(p5), '-%s*x*y*z' % format["float"](6))
-        self.assertEqual(str(p6), ' - x*y')
-        self.assertEqual(str(p7), 'x*y')
-        self.assertEqual(str(p8), 'x*z')
-        self.assertEqual(str(p9), 'x*y')
-        self.assertEqual(p0.val, 0)
-        self.assertEqual(str(p10), '%s' % f_0)
-        self.assertEqual(str(p11), '%s' % f_1)
-        self.assertEqual(str(p12), '-%s' % f_1)
-        self.assertEqual(str(p13), '-%s' % f_1)
-        self.assertEqual(repr(p14), "Product([FloatValue(%s)])" % f_0)
-        self.assertEqual(repr(p14.expand()), "FloatValue(%s)" % f_0)
-
-        self.assertEqual(p1 == p1, True)
-        self.assertEqual(p1 == p7, False)
-        self.assertEqual(p4 != p3, True)
-        self.assertEqual(p2 == p9, True)
-        self.assertEqual(p2 == p3, False)
-
-        self.assertEqual(p0.ops(), 0)
-        self.assertEqual(p1.ops(), 0)
-        self.assertEqual(p2.ops(), 1)
-        self.assertEqual(p3.ops(), 2)
-        self.assertEqual(p4.ops(), 0)
-        self.assertEqual(p5.ops(), 3)
-        self.assertEqual(p6.ops(), 1)
-        self.assertEqual(p7.ops(), 1)
-        self.assertEqual(p8.ops(), 1)
-        self.assertEqual(p9.ops(), 1)
-        self.assertEqual(p10.ops(), 0)
-        self.assertEqual(p14.ops(), 0)
-
-        # Test hash
-        l = [p3]
-        d = {p3:0}
-        p10 = Product([f1, s0, s1])
-
-        self.assertEqual(p3 in l, True)
-        self.assertEqual(p3 in d, True)
-        self.assertEqual(p10 in l, True)
-        self.assertEqual(p10 in d, True)
-        self.assertEqual(p2 in l, False)
-        self.assertEqual(p2 in d, False)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestProduct('testProduct'))
-
diff --git a/test/unit/symbolics/testproductoperators.py b/test/unit/symbolics/testproductoperators.py
deleted file mode 100755
index 609b8fe..0000000
--- a/test/unit/symbolics/testproductoperators.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import error, push_level, pop_level, CRITICAL
-
-class TestProductOperators(unittest.TestCase):
-
-    def testProductOperators(self):
-        "Test binary operators"
-
-        f_0 = format["float"](0)
-        f_2 = format["float"](2)
-        f_4 = format["float"](4)
-
-        f0 = FloatValue(0.0)
-        f1 = FloatValue(1.0)
-        f2 = FloatValue(2.0)
-        fm1 = FloatValue(-1.0)
-        fm3 = FloatValue(-3.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-        z = Symbol("z", GEO)
-
-        p0 = Product([f2, x])
-        p1 = Product([x, y])
-        p2 = Product([f2, z])
-        p3 = Product([x, y, z])
-
-        S0 = Sum([x, y])
-        S1 = Sum([x, z])
-
-        F0 = Fraction(f2, x)
-        F1 = Fraction(x, y)
-        F2 = Fraction(x, S0)
-        F3 = Fraction(x, y)
-        F4 = Fraction(p0, y)
-
-        # Test Product '+'
-        self.assertEqual(str(p0 + f2), '(%s + %s*x)' % (f_2, f_2))
-        self.assertEqual(str(p0 + x), '%s*x' % format["float"](3))
-        self.assertEqual(str(p0 + y), '(y + %s*x)' % f_2)
-        self.assertEqual(str(p0 + p0), '%s*x' % f_4)
-        self.assertEqual(str(p0 + p1), '(%s*x + x*y)' % f_2)
-        self.assertEqual(p0 + Product([fm1, x]), x)
-        self.assertEqual(Product([fm1, x]) + x, f0)
-        self.assertEqual(str(x + Product([fm1, x])), '%s' % f_0)
-        self.assertEqual(str(p0 + S0), '(x + y + %s*x)' % f_2)
-        self.assertEqual(str(p0 + F3), '(%s*x + x/y)' % f_2)
-
-        # Test Product '-'
-        self.assertEqual(str(p0 - f2), '(%s*x-%s)' % (f_2, f_2))
-        self.assertEqual(str(p0 - x), 'x')
-        self.assertEqual(str(p0 - y), '(%s*x - y)' % f_2)
-        self.assertEqual(str(p0 - p0), '%s' % f_0)
-        self.assertEqual(str(p0 - p1), '(%s*x - x*y)' % f_2)
-        self.assertEqual(str(p0 - S0), '(%s*x - (x + y))' % f_2)
-        self.assertEqual(str(p0 - F3), '(%s*x - x/y)' % f_2)
-
-        # Test Product '*', only need to test float, symbol and product.
-        # Sum and fraction are handled by 'other'
-        self.assertEqual(str(p0 * f0), '%s' % f_0)
-        self.assertEqual(str(p0 * fm3), '-%s*x' % format["float"](6))
-        self.assertEqual(str(p0 * y), '%s*x*y' % f_2)
-        self.assertEqual(str(p0 * p1), '%s*x*x*y' % f_2)
-
-        # Test Product '/'
-        self.assertEqual(str(Product([f0, x])/x), '%s' % f_0)
-        self.assertEqual(str(p0/S0), '%s*x/(x + y)' % f_2)
-        self.assertEqual(p1/y, x)
-        self.assertEqual(p1/p2, Fraction(Product([p1, FloatValue(0.5)]), z))
-        self.assertEqual(p1/z, Fraction(p1, z))
-        self.assertEqual(p0/Product([f2, p1]), Fraction(f1, y))
-        self.assertEqual(p1/p0,
-        Product([FloatValue(0.5), y]))
-        self.assertEqual(p1/p1, f1)
-        self.assertEqual(p1/p3, Fraction(f1, z))
-        self.assertEqual(str(p1/p3), '%s/z' % format["float"](1))
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, p0.__truediv__, f0)
-        self.assertRaises(Exception, p0.__truediv__, F0)
-        pop_level()
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestProductOperators('testProductOperators'))
-
diff --git a/test/unit/symbolics/testrealexamples.py b/test/unit/symbolics/testrealexamples.py
deleted file mode 100755
index 5efbd03..0000000
--- a/test/unit/symbolics/testrealexamples.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-from __future__ import print_function
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestRealExamples(unittest.TestCase):
-
-    def testRealExamples(self):
-
-#        p = Product([
-#                    Sum([
-#                        Product([
-#                                  Symbol('w[5][0]', GEO),
-#                                  Fraction(
-#                                            Product([
-#                                                    Symbol('FE0_C1_D01[ip][k]', BASIS), Symbol('Jinv_10', GEO)
-#                                                    ]),
-#                                            Product([
-#                                                    Symbol('w[5][0]', GEO), Symbol('w[5][0]', GEO)
-#                                                    ])
-#                                            )
-#                                                    
-#                                ]),
-#                        Product([
-#                                  Symbol('w[5][0]', GEO),
-#                                  Fraction(
-#                                          Product([
-#                                                    Symbol('FE0_C1_D01[ip][k]', BASIS), Symbol('Jinv_11', GEO)
-#                                                  ]),
-#                                          Product([
-#                                                    Symbol('w[5][0]', GEO), Symbol('w[5][0]', GEO)
-#                                                  ])
-#                                          )
-#                                ])
-#                        ])
-#                   ])
-
-#        p = Product([
-#                      Sum([
-#                            Product([
-#                                      Symbol('x', BASIS),
-#                                      Sum([
-#                                            Symbol('y', BASIS),
-#                                            Product([
-#                                                      Sum([
-#                                                           Symbol('y', BASIS),
-#                                                           Product([
-#                                                                    Symbol('y', BASIS),
-#                                                                    Symbol('z', GEO)
-#                                                                   ]),
-#                                                           Symbol('y', BASIS)
-#                                                         ])
-#                                                    ]),
-#                                           Symbol('y', BASIS)
-#                                          ])
-#                                    ]),
-#                          Product([
-#                                  Symbol('x', BASIS),
-#                                  Sum([
-#                                        Product([
-#                                                Symbol('y', BASIS),
-#                                                              Symbol('z', GEO)
-#                                              ]),
-#                                        Symbol('y', BASIS)
-#                                      ])
-#                                ])
-
-#                          ])
-#                      ])
-
-#        p = Product([
-#                     Sum([
-#                          Product([
-#                                    Symbol('FE0_C1_D01[ip][j]', BASIS),
-#                                    Product([
-#                                            Symbol('FE0_C1_D01[ip][k]', BASIS),
-#                                            Sum([
-#                                                 Symbol('w[4][0]', GEO)
-#                                                ]),
-#                                            Sum([
-#                                                  Symbol('w[4][0]', GEO)
-#                                                ])
-#                                          ])
-#                                    ]),
-#                        Product([
-#                                  Symbol('FE0_C1_D01[ip][j]', BASIS),
-#                                  Symbol('FE0_C1_D01[ip][k]', BASIS)
-#                                ])
-#                         ])
-#                      ])
-
-        p = Product([ Symbol('FE0_C1_D01[ip][k]', BASIS),
-                      Sum([
-                            Symbol('Jinv_10', GEO),
-                            Symbol('w[4][0]', GEO)
-                          ]),
-                      Sum([
-                            Symbol('Jinv_10', GEO),
-                            Symbol('w[4][0]', GEO)
-                          ])
-                    ])
-
-#        print "p: ", p
-#        print p.expand()
-
-        br = p.reduce_vartype(BASIS)
-#        print
-#        print br[0]
-#        print br[1]
-
-        be = p.expand().reduce_vartype(BASIS)
-#        print
-#        print be[0][0]
-#        print be[0][1]
-        if len(be) == 1:
-            if be[0][0] == br[0]:
-                if be[0][1] != br[1].expand():
-#                        print "\np: ", repr(p)
-                        print("\nbe: ", repr(be[0][1]))
-                        print("\nbr: ", repr(br[1].expand()))
-                        print("\nbe: ", be[0][1])
-                        print("\nbr: ", br[1].expand())
-                        error("here1")
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestRealExamples('testRealExamples'))
-
diff --git a/test/unit/symbolics/testreducegip.py b/test/unit/symbolics/testreducegip.py
deleted file mode 100755
index 3b75418..0000000
--- a/test/unit/symbolics/testreducegip.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestReduceGIP(unittest.TestCase):
-
-    def testReduceGIP(self):
-
-        expr = Sum([
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP),
-                              Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP),
-                              Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP),
-                              Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G5", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G6", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP),
-                              Symbol("F8", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G1", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP),
-                              Symbol("F8", IP), Symbol("W9", IP), Symbol("G7", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP),
-                              Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP),
-                              Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP),
-                              Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP),
-                              Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP),
-                              Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP),
-                              Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G3", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP),
-                              Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G9", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G0", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G8", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP),
-                              Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ]),
-                    Product([
-                              Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP),
-                              Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F8", IP),
-                              Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP),
-                              Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F3", IP),
-                              Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO)
-                            ]),
-                    Product([
-                              Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO)
-                            ]),
-                    Product([
-                              Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP),
-                              Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO)
-                            ])
-                   ])
-
-#        print "\nReduceGIP"
-        start = time.time()
-        expr_exp = expr.expand()
-#        print "ReduceGIP: time, expand()      ", time.time() - start
-
-        start = time.time()
-        expr_red = expr_exp.reduce_ops()
-#        print "ReduceGIP: time, reduce_ops(): ", time.time() - start
-
-#        print "expr.ops():     ", expr.ops()
-#        print "expr_exp.ops(): ", expr_exp.ops()
-#        print "expr_red.ops(): ", expr_red.ops()
-
-#        print "expr: ", expr
-#        print "exp:  ", expr_exp
-#        print "red:  ", expr_red
-
-        W9 = 9
-        F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20 = [0.123 * i for i in range(1,21)]
-        G0, G1, G2, G3, G4, G5, G6, G7, G8, G9 = [2.64 + 1.0/i for i in range(20, 30)]
-
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp)))
-        self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red)))
-        self.assertEqual(expr.ops(), 314)
-        self.assertEqual(expr_exp.ops(), 314)
-        self.assertEqual(expr_red.ops(), 120)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestReduceGIP('testReduceGIP'))
-
diff --git a/test/unit/symbolics/testreduceoperations.py b/test/unit/symbolics/testreduceoperations.py
deleted file mode 100755
index 777752f..0000000
--- a/test/unit/symbolics/testreduceoperations.py
+++ /dev/null
@@ -1,397 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestReduceOperations(unittest.TestCase):
-
-    def testReduceOperations(self):
-
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-
-        # Aux. variables
-        f2 = FloatValue(2)
-        f0_5 = FloatValue(0.5)
-        f1 = FloatValue(1.0)
-        fm1 = FloatValue(-1.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-        z = Symbol("z", GEO)
-        a = Symbol("a", GEO)
-        b = Symbol("b", GEO)
-        c = Symbol("c", GEO)
-        d = Symbol("d", GEO)
-
-        # Simple expand and reduce simple float and symbol objects
-        fx2 = f2.expand()
-        xx = x.expand()
-
-        fr2 = fx2.reduce_ops()
-        xr = xx.reduce_ops()
-
-#        print "\nTest float and symbol"
-#        print "f0:  '%s'" %f2
-#        print "fx0: '%s'" %fx2
-#        print "fr0: '%s'" %fr2
-#        print
-#        print "x:  '%s'" %x
-#        print "xx: '%s'" %xx
-#        print "xr: '%s'" %xr
-
-        self.assertEqual(f2, fr2)
-        self.assertEqual(x, xr)
-
-        # Test product
-        p0 = f2*x
-        p1 = y*x
-        p2 = x*f2/y
-        p3 = x*Sum([x, y])
-
-        px0 = p0.expand()
-        px1 = p1.expand()
-
-        pr0 = px0.reduce_ops()
-        pr1 = px1.reduce_ops()
-
-#        print "\nTest product"
-#        print "p0:  '%s'" %p0
-#        print "px0: '%s'" %px0
-#        print "pr0: '%s'" %pr0
-#        print
-#        print "p1:  '%s'" %p1
-#        print "px1: '%s'" %px1
-#        print "pr1: '%s'" %pr1
-
-        self.assertEqual(p0, pr0)
-        self.assertEqual(p1, pr1)
-
-        # Test fraction
-        F0 = Fraction(p0, y)
-        F1 = Fraction(x, p0)
-        F2 = Fraction(p0, p1)
-        F3 = Fraction(Sum([x*x, x*y]), y)
-        F4 = Fraction(Sum([f2*x, x*y]), a)
-
-        Fx0 = F0.expand()
-        Fx1 = F1.expand()
-        Fx2 = F2.expand()
-        Fx3 = F3.expand()
-        Fx4 = F4.expand()
-
-        Fr0 = Fx0.reduce_ops()
-        Fr1 = Fx1.reduce_ops()
-        Fr2 = Fx2.reduce_ops()
-        Fr3 = Fx3.reduce_ops()
-        Fr4 = Fx4.reduce_ops()
-
-#        print "\nTest fraction"
-#        print "F0:  '%s'" %F0
-#        print "Fx0: '%s'" %Fx0
-#        print "Fr0: '%s'" %Fr0
-#        print
-#        print "F1:  '%s'" %F1
-#        print "Fx1: '%s'" %Fx1
-#        print "Fr1: '%s'" %Fr1
-#        print
-#        print "F2:  '%s'" %F2
-#        print "Fx2: '%s'" %Fx2
-#        print "Fr2: '%s'" %Fr2
-#        print
-#        print "F3:  '%s'" %F3
-#        print "Fx3: '%s'" %Fx3
-#        print "Fr3: '%s'" %Fr3
-#        print
-#        print "F4:  '%s'" %F4
-#        print "Fx4: '%s'" %Fx4
-#        print "Fr4: '%s'" %Fr4
-
-        self.assertEqual(Fr0, F0)
-        self.assertEqual(Fr1, f0_5)
-        self.assertEqual(Fr2, Fraction(f2, y))
-        self.assertEqual(str(Fr3), "x*(%s + x/y)" % f_1)
-        self.assertEqual(str(Fr4), "x*(%s + y)/a" % f_2)
-
-        # Test sum
-        # TODO: Here we might have to add additional tests
-        S0 = Sum([x, y])
-        S1 = Sum([p0, p1])
-        S2 = Sum([x, p1])
-        S3 = Sum([p0, f2*y])
-        S4 = Sum([f2*p1, z*p1])
-        S5 = Sum([x, x*x, x*x*x])
-        S6 = Sum([a*x*x, b*x*x*x, c*x*x, d*x*x*x])
-        S7 = Sum([p0, p1, x*x, f2*z, y*z])
-        S8 = Sum([a*y, b*y, x*x*x*y, x*x*x*z])
-        S9 = Sum([a*y, b*y, c*y, x*x*x*y, f2*x*x, x*x*x*z])
-        S10 = Sum([f2*x*x*y, x*x*y*z])
-        S11 = Sum([f2*x*x*y*y, x*x*y*y*z])
-        S12 = Sum([f2*x*x*y*y, x*x*y*y*z, a*z, b*z, c*z])
-        S13 = Sum([Fraction(f1, x), Fraction(f1, y)])
-        S14 = Sum([Fraction(fm1, x), Fraction(fm1, y)])
-        S15 = Sum([Fraction(f2, x), Fraction(f2, x)])
-        S16 = Sum([Fraction(f2*x, y*z), Fraction(f0_5, y*z)])
-        S17 = Sum([(f2*x*y)/a, (x*y*z)/b])
-        S18 = Sum([(x*y)/a, (x*z)/a, f2/a, (f2*x*y)/a])
-        S19 = Sum([(f2*x)/a, (x*y)/a, z*x])
-        S20 = Product([ Sum([x, y]), Fraction(a, b), Fraction( Product([c, d]), z ) ])
-        S21 = Sum([a*x, b*x, c*x, x*y, x*z, f2*y, a*y, b*y, f2*z, a*z, b*z])
-        S22 = Sum([ FloatValue(0.5)*x/y, FloatValue(-0.5)*x/y ])
-        S23 = Sum([x*y*z, x*y*y*y*z*z*z, y*y*y*z*z*z*z, z*z*z*z*z])
-
-
-        Sx0 = S0.expand()
-        Sx1 = S1.expand()
-        Sx2 = S2.expand()
-        Sx3 = S3.expand()
-        Sx4 = S4.expand()
-        Sx5 = S5.expand()
-        Sx6 = S6.expand()
-        Sx7 = S7.expand()
-        Sx8 = S8.expand()
-        Sx9 = S9.expand()
-        Sx10 = S10.expand()
-        Sx11 = S11.expand()
-        Sx12 = S12.expand()
-        Sx13 = S13.expand()
-        Sx14 = S14.expand()
-        Sx15 = S15.expand()
-        Sx16 = S16.expand()
-        Sx17 = S17.expand()
-        Sx18 = S18.expand()
-        Sx19 = S19.expand()
-        Sx20 = S20.expand()
-        Sx21 = S21.expand()
-        Sx22 = S22.expand()
-        Sx23 = S23.expand()
-
-        Sr0 = Sx0.reduce_ops()
-        Sr1 = Sx1.reduce_ops()
-        Sr2 = Sx2.reduce_ops()
-        Sr3 = Sx3.reduce_ops()
-        Sr4 = Sx4.reduce_ops()
-        Sr5 = Sx5.reduce_ops()
-        Sr6 = Sx6.reduce_ops()
-        Sr7 = Sx7.reduce_ops()
-        Sr8 = Sx8.reduce_ops()
-        Sr9 = Sx9.reduce_ops()
-        Sr10 = Sx10.reduce_ops()
-        Sr11 = Sx11.reduce_ops()
-        Sr12 = Sx12.reduce_ops()
-        Sr13 = Sx13.reduce_ops()
-        Sr14 = Sx14.reduce_ops()
-        Sr15 = Sx15.reduce_ops()
-        Sr16 = Sx16.reduce_ops()
-        Sr17 = Sx17.reduce_ops()
-        Sr18 = Sx18.reduce_ops()
-        Sr19 = Sx19.reduce_ops()
-        Sr20 = Sx20.reduce_ops()
-        Sr21 = Sx21.reduce_ops()
-        Sr22 = Sx22.reduce_ops()
-        Sr23 = Sx23.reduce_ops()
-
-#        print "Test sum"
-#        print "S0:  '%s'" %S0
-#        print "Sx0: '%s'" %Sx0
-#        print "Sr0: '%s'" %Sr0
-#        print
-#        print "S1:  '%s'" %S1
-#        print "Sx1: '%s'" %Sx1
-#        print "Sr1: '%s'" %Sr1
-#        print
-#        print "S2:  '%s'" %S2
-#        print "Sx2: '%s'" %Sx2
-#        print "Sr2: '%s'" %Sr2
-#        print
-#        print "S3:  '%s'" %S3
-#        print "Sx3: '%s'" %Sx3
-#        print "Sr3: '%s'" %Sr3
-#        print
-#        print "S4:  '%s'" %S4
-#        print "Sx4: '%s'" %Sx4
-#        print "Sr4: '%s'" %Sr4
-#        print
-#        print "S5:  '%s'" %S5
-#        print "Sx5: '%s'" %Sx5
-#        print "Sr5: '%s'" %Sr5
-#        print
-#        print "S6:  '%s'" %S6
-#        print "Sx6: '%s'" %Sx6
-#        print "Sr6: '%s'" %Sr6
-#        print
-#        print "S7:  '%s'" %S7
-#        print "Sx7: '%s'" %Sx7
-#        print "Sr7: '%s'" %Sr7
-#        print
-#        print "S8:  '%s'" %S8
-#        print "Sx8: '%s'" %Sx8
-#        print "Sr8: '%s'" %Sr8
-#        print
-#        print "S9:  '%s'" %S9
-#        print "Sx9: '%s'" %Sx9
-#        print "Sr9: '%s'" %Sr9
-#        print
-#        print "S10:  '%s'" %S10
-#        print "Sx10: '%s'" %Sx10
-#        print "Sr10: '%s'" %Sr10
-#        print
-#        print "S11:  '%s'" %S11
-#        print "Sx11: '%s'" %Sx11
-#        print "Sr11: '%s'" %Sr11
-#        print
-#        print "S12:  '%s'" %S12
-#        print "Sx12: '%s'" %Sx12
-#        print "Sr12: '%s'" %Sr12
-#        print
-#        print "S13:  '%s'" %S13
-#        print "Sx13: '%s'" %Sx13
-#        print "Sr13: '%s'" %Sr13
-#        print
-#        print "S14:  '%s'" %S14
-#        print "Sx14: '%s'" %Sx14
-#        print "Sr14: '%s'" %Sr14
-#        print
-#        print "S15:  '%s'" %S15
-#        print "Sx15: '%s'" %Sx15
-#        print "Sr15: '%s'" %Sr15
-#        print
-#        print "S16:  '%s'" %S16
-#        print "Sx16: '%s'" %Sx16
-#        print "Sr16: '%s'" %Sr16
-#        print
-#        print "S17:  '%s'" %S17
-#        print "Sx17: '%s'" %Sx17
-#        print "Sr17: '%s'" %Sr17
-#        print
-#        print "S18:  '%s'" %S18
-#        print "Sx18: '%s'" %Sx18
-#        print "Sr18: '%s'" %Sr18
-#        print
-#        print "S19:  '%s'" %S19
-#        print "Sx19: '%s'" %Sx19
-#        print "Sr19: '%s'" %Sr19
-#        print
-#        print "S20:  '%s'" %S20
-#        print "Sx20: '%s'" %Sx20
-#        print "Sr20: '%s'" %Sr20
-#        print
-#        print "S21:  '%s'" %S21
-#        print "Sx21: '%s'" %Sx21
-#        print "Sr21: '%s'" %Sr21
-#        print
-#        print "S22:  '%s'" %S22
-#        print "Sx22: '%s'" %Sx22
-#        print "Sr22: '%s'" %Sr22
-#        print
-#        print "S23:  '%s'" %S23
-#        print "Sx23: '%s'" %Sx23
-#        print "Sr23: '%s'" %Sr23
-
-        self.assertEqual(Sr0, S0)
-        self.assertEqual(str(Sr1), "x*(%s + y)" % f_2)
-        # TODO: Should this be (x + x*y)?
-        self.assertEqual(str(Sr2), "x*(%s + y)" % f_1)
-#        self.assertEqual(str(Sr2), "(x + x*y)")
-        self.assertEqual(str(Sr3), "%s*(x + y)" % f_2)
-        self.assertEqual(str(Sr4), "x*y*(%s + z)" % f_2)
-        self.assertEqual(str(Sr5), "x*(%s + x*(%s + x))" % (f_1, f_1))
-        self.assertEqual(str(Sr6), "x*x*(a + c + x*(b + d))")
-        self.assertEqual(str(Sr7), "(x*(%s + x + y) + z*(%s + y))" % (f_2, f_2))
-        self.assertEqual(str(Sr8), "(x*x*x*(y + z) + y*(a + b))")
-        self.assertEqual(str(Sr9), "(x*x*(%s + x*(y + z)) + y*(a + b + c))" % f_2)
-        self.assertEqual(str(Sr10), "x*x*y*(%s + z)" % f_2)
-        self.assertEqual(str(Sr11), "x*x*y*y*(%s + z)" % f_2)
-        self.assertEqual(str(Sr12), "(x*x*y*y*(%s + z) + z*(a + b + c))" % f_2)
-        self.assertEqual(str(Sr13), "(%s/x + %s/y)" % (f_1, f_1))
-        self.assertEqual(str(Sr14), "(-%s/x-%s/y)" % (f_1, f_1))
-        self.assertEqual(str(Sr15), "%s/x" % format["float"](4))
-        self.assertEqual(str(Sr16), "(%s + %s*x)/(y*z)" % (format["float"](0.5), f_2))
-        self.assertEqual(str(Sr17), "x*y*(%s/a + z/b)" % f_2)
-        self.assertEqual(str(Sr18), "(%s + x*(z + %s*y))/a" % (f_2, format["float"](3)))
-        self.assertEqual(str(Sr19), "x*(z + (%s + y)/a)" % f_2)
-        self.assertEqual(str(Sr20), "a*c*d*(x + y)/(b*z)")
-        self.assertEqual(str(Sr21), "(x*(a + b + c + y + z) + y*(%s + a + b) + z*(%s + a + b))" % (f_2, f_2))
-        self.assertEqual(str(Sr22), "%s" % format["float"](0))
-        self.assertEqual(str(Sr23), "(x*y*z + z*z*z*(y*y*y*(x + z) + z*z))")
-
-        self.assertEqual(S0.ops(), 1)
-        self.assertEqual(Sr0.ops(), 1)
-        self.assertEqual(S1.ops(), 3)
-        self.assertEqual(Sr1.ops(), 2)
-        self.assertEqual(S2.ops(), 2)
-        self.assertEqual(Sr2.ops(), 2)
-        self.assertEqual(S3.ops(), 3)
-        self.assertEqual(Sr3.ops(), 2)
-        self.assertEqual(S4.ops(), 5)
-        self.assertEqual(Sr4.ops(), 3)
-        self.assertEqual(S5.ops(), 5)
-        self.assertEqual(Sr5.ops(), 4)
-        self.assertEqual(S6.ops(), 13)
-        self.assertEqual(Sr6.ops(), 6)
-        self.assertEqual(S7.ops(), 9)
-        self.assertEqual(Sr7.ops(), 6)
-        self.assertEqual(S8.ops(), 11)
-        self.assertEqual(Sr8.ops(), 7)
-        self.assertEqual(S9.ops(), 16)
-        self.assertEqual(Sr9.ops(), 9)
-        self.assertEqual(S10.ops(), 7)
-        self.assertEqual(Sr10.ops(), 4)
-        self.assertEqual(S11.ops(), 9)
-        self.assertEqual(Sr11.ops(), 5)
-        self.assertEqual(S12.ops(), 15)
-        self.assertEqual(Sr12.ops(), 9)
-        self.assertEqual(S13.ops(), 3)
-        self.assertEqual(Sr13.ops(), 3)
-        self.assertEqual(S14.ops(), 3)
-        self.assertEqual(Sr14.ops(), 3)
-        self.assertEqual(S15.ops(), 3)
-        self.assertEqual(Sr15.ops(), 1)
-        self.assertEqual(S16.ops(), 6)
-        self.assertEqual(Sr16.ops(), 4)
-        self.assertEqual(S17.ops(), 7)
-        self.assertEqual(Sr17.ops(), 5)
-        self.assertEqual(S18.ops(), 11)
-        self.assertEqual(Sr18.ops(), 5)
-        self.assertEqual(S19.ops(), 7)
-        self.assertEqual(Sr19.ops(), 4)
-        self.assertEqual(S20.ops(), 6)
-        self.assertEqual(Sr20.ops(), 6)
-        self.assertEqual(S21.ops(), 21)
-        self.assertEqual(Sr21.ops(), 13)
-        self.assertEqual(S23.ops(), 21)
-        self.assertEqual(Sr23.ops(), 12)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestReduceOperations('testReduceOperations'))
-
diff --git a/test/unit/symbolics/testreducevartype.py b/test/unit/symbolics/testreducevartype.py
deleted file mode 100755
index 1f47e64..0000000
--- a/test/unit/symbolics/testreducevartype.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestReduceVarType(unittest.TestCase):
-
-    def testReduceVarType(self):
-        f1 = FloatValue(1)
-        f2 = FloatValue(2)
-        f3 = FloatValue(3)
-        f5 = FloatValue(5)
-        fm4 = FloatValue(-4)
-
-        B0 = Symbol("B0",BASIS)
-        B1 = Symbol("B1", BASIS)
-        Bm4 = Product([fm4, B1])
-        B5 = Product([f5, B0])
-
-        I0 = Symbol("I0", IP)
-        I1 = Symbol("I1", IP)
-        I2 = Symbol("I2", IP)
-        I5 = Product([f5, I0])
-
-        G0 = Symbol("G0", GEO)
-        G1 = Symbol("G1", GEO)
-        G2 = Symbol("G2", GEO)
-        G3 = Product([f3, G0])
-
-
-        C0 = Symbol("C0", CONST)
-        C2 = Product([f2, C0])
-
-        p0 = Product([B0,I5])
-        p1 = Product([B0,B1])
-
-        S0 = Sum([B0, I5])
-        S1 = Sum([p0, p1])
-        S2 = Sum([B0, B1])
-        S3 = Sum([B0, p0])
-        S4 = Sum([f5, p0])
-        S5 = Sum([I0, G0])
-
-        F0 = Fraction(B0,I5).expand()
-        F1 = Fraction(p1,I5).expand()
-        F2 = Fraction(G3,S2).expand()
-        F3 = Fraction(G3,S3).expand()
-        F4 = Fraction(I1, Sum([I1, I0]))
-        F5 = Fraction(S5, I1)
-        F6 = Fraction(I0,
-              Sum([
-                Fraction(Sum([I0,I1]), Sum([G0,G1])),
-                Fraction(Sum([I1,I2]), Sum([G1,G2])),
-              ]))
-
-        r0 = B0.reduce_vartype(BASIS)
-        r1 = B0.reduce_vartype(CONST)
-
-        rp0 = p0.reduce_vartype(BASIS)
-        rp1 = p0.reduce_vartype(IP)
-        rp2 = p1.reduce_vartype(BASIS)
-        rp3 = p1.reduce_vartype(GEO)
-
-        rs0 = S0.reduce_vartype(BASIS)
-        rs1 = S0.reduce_vartype(IP)
-        rs2 = S1.reduce_vartype(BASIS)
-        rs3 = S4.reduce_vartype(BASIS)
-        rs4 = S4.reduce_vartype(CONST)
-
-        rf0 = F0.reduce_vartype(BASIS)
-        rf1 = F1.reduce_vartype(BASIS)
-        rf2 = F0.reduce_vartype(IP)
-        rf3 = F2.reduce_vartype(BASIS)
-        rf4 = F3.reduce_vartype(BASIS)
-        rf5 = F4.reduce_vartype(IP)
-        rf6 = F5.reduce_vartype(IP)
-        rf7 = F6.reduce_vartype(IP)
-#        print
-#        print "%s, red(BASIS): ('%s', '%s')" %(B0, r0[0][0], r0[0][1])
-#        print "%s, red(CONST): ('%s', '%s')" %(B0, r1[0][0], r1[0][1])
-
-#        print "\n%s, red(BASIS): ('%s', '%s')" %(p0, rp0[0][0], rp0[0][1])
-#        print "%s, red(IP):    ('%s', '%s')" %(p0, rp1[0][0], rp1[0][1])
-#        print "%s, red(BASIS): ('%s', '%s')" %(p1, rp2[0][0], rp2[0][1])
-#        print "%s, red(CONST): ('%s', '%s')" %(p1, rp3[0][0], rp3[0][1])
-
-#        print "\n%s, red(BASIS): ('%s', '%s')" %(S0, rs0[0][0], rs0[0][1])
-#        print "%s, red(IP):    ('%s', '%s')" %(S0, rs1[0][0], rs1[0][1])
-#        print "%s, red(BASIS): '%s', '%s'" %(S1, rs2[0][0], rs2[0][1])
-#        print "%s, red(BASIS): '%s', '%s'" %(S4, rs3[0][0], rs3[0][1])
-#        print "%s, red(BASIS): '%s'" %(S4, rs4[0][0])
-
-#        print "\nrf0: %s, red(BASIS): ('%s', '%s')" %(F0, rf0[0][0], rf0[0][1])
-#        print "rf1: %s, red(BASIS): ('%s', '%s')" %(F1, rf1[0][0], rf1[0][1])
-#        print "rf2: %s, red(IP): ('%s', '%s')" %(F0, rf2[0][0], rf2[0][1])
-#        print "rf3: %s, red(BASIS): ('%s', '%s')" %(F2, rf3[0][0], rf3[0][1])
-#        print "rf4: %s, red(BASIS): ('%s', '%s')" %(F3, rf4[0][0], rf4[0][1])
-#        print "rf5: %s, red(IP): ('%s', '%s')" %(F4, rf5[0][0], rf5[0][1])
-#        print "rf6: %s, red(IP): ('%s', '%s') + ('%s', '%s')" %(F5, rf6[0][0], rf6[0][1], rf6[1][0], rf6[1][1])
-#        print "rf7: %s, red(IP): ('%s', '%s')" %(F6, rf7[0][0], rf7[0][1])
-
-        self.assertEqual([(B0, f1)], r0)
-        self.assertEqual([((), B0)], r1)
-
-        self.assertEqual([(B0, I5)], rp0)
-        self.assertEqual([(I0, B5)],  rp1)
-        self.assertEqual([(p1, f1)], rp2)
-        self.assertEqual([((), p1)],  rp3)
-
-        self.assertEqual(((), I5), rs0[0])
-        self.assertEqual((B0, f1), rs0[1])
-        self.assertEqual((I0, f5), rs1[1])
-        self.assertEqual(((), B0), rs1[0])
-        self.assertEqual((
-        Product([B0, B1]), f1), rs2[1])
-        self.assertEqual((B0, I5), rs2[0])
-        self.assertEqual(((), f5), rs3[0])
-        self.assertEqual((B0, I5), rs3[1])
-        self.assertEqual((f5,
-        Sum([f1,
-        Product([B0, I0])])), rs4[0])
-
-        self.assertEqual([(B0, Fraction(FloatValue(0.2), I0))], rf0)
-        self.assertEqual([(
-        Product([B0, B1]), Fraction(FloatValue(0.2), I0))], rf1)
-        self.assertEqual( [( Fraction(f1, I0),
-        Product([FloatValue(0.2), B0]) )], rf2)
-        self.assertEqual([(Fraction(f1, S2), G3)], rf3)
-        self.assertEqual( [( Fraction(f1, B0), Fraction( G3, Sum([I5, f1])))], rf4)
-        self.assertEqual(F4, rf5[0][0])
-        self.assertEqual(FloatValue(1), rf5[0][1])
-        self.assertEqual(Fraction(I0,I1), rf6[1][0])
-        self.assertEqual(f1, rf6[1][1])
-        self.assertEqual(Fraction(f1,I1), rf6[0][0])
-        self.assertEqual(G0, rf6[0][1])
-        self.assertEqual(F6, rf7[0][0])
-        self.assertEqual(f1, rf7[0][1])
-
-        expr = Sum([Symbol('W1', GEO), Fraction(Symbol('det', GEO), Sum([Symbol('F0', IP), Symbol('K_11', GEO)]))])
-        red = expr.expand().reduce_vartype(IP)
-        vals = []
-        for ip in red:
-            ip_dec, geo = ip
-            if ip_dec and geo:
-                vals.append(Product([ip_dec, geo]))
-            elif geo:
-                vals.append(geo)
-            elif ip_dec:
-                vals.append(ip_dec)
-        comb = Sum(vals).expand()
-        K_11 = 1.4
-        F0   = 1.5
-        W1   = 1.9
-        det  = 2.1
-        self.assertAlmostEqual(eval(str(expr)), eval(str(comb)))
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestReduceVarType('testReduceVarType'))
-
diff --git a/test/unit/symbolics/testsum.py b/test/unit/symbolics/testsum.py
deleted file mode 100755
index 54347ff..0000000
--- a/test/unit/symbolics/testsum.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestSum(unittest.TestCase):
-
-    def testSum(self):
-        "Test simple sum instance."
-
-        f_0 = format["float"](0)
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-        f_3 = format["float"](3)
-
-        f0 = FloatValue(-2.0)
-        f1 = FloatValue(3.0)
-        f2 = FloatValue(0)
-        s0 = Symbol("x", BASIS)
-        s1 = Symbol("y", GEO)
-        s2 = Symbol("z", GEO)
-
-        S0 = Sum([])
-        S1 = Sum([s0])
-        S2 = Sum([s0, s1])
-        S3 = Sum([s0, s0])
-        S4 = Sum([f0, s0])
-        S5 = Sum([s0, f0, s0])
-        S6 = Sum([s0, f0, s0, f1])
-        S7 = Sum([s0, f0, s1, f2])
-        S8 = Sum([s0, f1, s0])
-        S9 = Sum([f0, f0, f0, f1, f1, s1])
-        S10 = Sum([s1, s0])
-
-#        print "\nTesting Sum"
-#        print "\nS0: [] '%s'" % (S0)
-#        print "\nS1: %s = '%s'" %(s0, S1)
-#        print "\nS2: %s + %s  = '%s'" %(s0, s1, S2)
-#        print "\nS3: %s + %s  = '%s'" %(s0, s0, S3)
-#        print "\nS4: %s + %s  = '%s'" %(f0, s0, S4)
-#        print "\nS5: %s + %s + %s = '%s'" %(s0, f0, s0, S5)
-#        print "\nS6: %s + %s + %s + %s = '%s'" %(s0, f0, s0, f1, S6)
-#        print "\nS7: %s + %s + %s + %s = '%s'" %(s0, f0, s1, f2, S7)
-#        print "\nS8: %s + %s + %s = '%s'" %(s0, f1, s0, S8)
-#        print "\nS9: %s + %s + %s + %s + %s + %s = '%s'" %(f0, f0, f0, f1, f1, s1, S9)
-#        print "\nS10: %s + %s  = '%s'" %(s1, s0, S10)
-
-        self.assertEqual(repr(S0), "Sum([FloatValue(%s)])" % f_0)
-        self.assertEqual(S0.t, CONST)
-        self.assertEqual(repr(S1), "Sum([Symbol('x', BASIS)])")
-#        self.assertEqual(repr(S4), "Sum([Symbol('x', BASIS), FloatValue(-2)])")
-        self.assertEqual(repr(S4), "Sum([FloatValue(-%s), Symbol('x', BASIS)])" % f_2)
-        self.assertEqual(repr(S9), "Sum([Symbol('y', GEO)])")
-
-        self.assertEqual(str(S2), "(x + y)")
-        self.assertEqual(str(S3), "(x + x)")
-        self.assertEqual(str(S5), "(x + x-%s)" % f_2)
-        self.assertEqual(str(S6), "(%s + x + x)" % f_1)
-        self.assertEqual(str(S7), "(x + y-%s)" % f_2)
-        self.assertEqual(str(S8), "(%s + x + x)" % f_3)
-        self.assertEqual(str(S9), "y")
- 
-        self.assertEqual(S2 == S2, True)
-        self.assertEqual(S2 == S3, False)
-        self.assertEqual(S5 != S6, True)
-        self.assertEqual(S2 == S10, True)
-
-        self.assertEqual(S0.ops(), 0)
-        self.assertEqual(S1.ops(), 0)
-        self.assertEqual(S2.ops(), 1)
-        self.assertEqual(S3.ops(), 1)
-        self.assertEqual(S4.ops(), 1)
-        self.assertEqual(S5.ops(), 2)
-        self.assertEqual(S6.ops(), 2)
-        self.assertEqual(S7.ops(), 2)
-        self.assertEqual(S8.ops(), 2)
-        self.assertEqual(S9.ops(), 0)
-
-        # Test hash
-        l = [S2]
-        d = {S2:0}
-
-        self.assertEqual(S2 in l, True)
-        self.assertEqual(S2 in d, True)
-        self.assertEqual(S10 in l, True)
-        self.assertEqual(S10 in d, True)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestSum('testSum'))
-
diff --git a/test/unit/symbolics/testsumoperators.py b/test/unit/symbolics/testsumoperators.py
deleted file mode 100755
index 972967d..0000000
--- a/test/unit/symbolics/testsumoperators.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import error, push_level, pop_level, CRITICAL
-
-class TestSumOperators(unittest.TestCase):
-
-    def testSumOperators(self):
-        "Test binary operators"
-
-        f_0_5 = format["float"](0.5)
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-        f_3 = format["float"](3)
-        f_6 = format["float"](6)
-        f2 = FloatValue(2.0)
-        fm3 = FloatValue(-3.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-        z = Symbol("z", GEO)
-
-        p0 = Product([f2, x])
-        p1 = Product([x, y])
-
-        S0 = Sum([x, y])
-        S1 = Sum([x, z])
-
-        F0 = Fraction(p0, y)
-
-        # Test Sum '+'
-        self.assertEqual(str(S0 + f2), '(%s + x + y)' % f_2)
-        self.assertEqual(str(S0 + x), '(x + x + y)')
-        self.assertEqual(str(S0 + p0), '(x + y + %s*x)' % f_2)
-        self.assertEqual(str(S0 + S0), '(x + x + y + y)')
-        self.assertEqual(str(S0 + F0), '(x + y + %s*x/y)' % f_2)
-
-        # Test Sum '-'
-        self.assertEqual(str(S0 - f2), '(x + y-%s)' % f_2)
-        self.assertEqual(str(S0 - fm3), '(x + y + %s)' % f_3)
-        self.assertEqual(str(S0 - x), '(x + y - x)')
-        self.assertEqual(str(S0 - p0), '(x + y-%s*x)' % f_2)
-        self.assertEqual(str(S0 - Product([fm3, p0])), '(x + y + %s*x)' % f_6)
-        self.assertEqual(str(S0 - S0), '(x + y - (x + y))')
-        self.assertEqual(str(S0 - F0), '(x + y - %s*x/y)' % f_2)
-
-        # Test Sum '*'
-        self.assertEqual(str(S0 * f2), '(%s*x + %s*y)' % (f_2, f_2))
-        self.assertEqual(str(S0 * x), '(x*x + x*y)')
-        self.assertEqual(str(S0 * p0), '(%s*x*x + %s*x*y)' % (f_2, f_2))
-        self.assertEqual(str(S0 * S0), '(%s*x*y + x*x + y*y)' % f_2)
-        self.assertEqual(str(S0 * F0), '(%s*x + %s*x*x/y)' % (f_2, f_2))
-
-        # Test Sum '/'
-        self.assertEqual(str(S0 / f2), '(%s*x + %s*y)' % (f_0_5, f_0_5))
-        self.assertEqual(str(S0 / x), '(%s + y/x)' % f_1)
-        self.assertEqual(str(S0 / p0), '(%s + %s*y/x)' % (f_0_5, f_0_5))
-        self.assertEqual(str(S0 / p1), '(%s/x + %s/y)' % (f_1, f_1))
-        self.assertEqual(str(S0 / S0), '(x + y)/(x + y)')
-        self.assertEqual(str(S0 / S1), '(x + y)/(x + z)')
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, S0.__truediv__, FloatValue(0))
-        self.assertRaises(Exception, S0.__truediv__, F0)
-        pop_level()
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestSumOperators('testSumOperators'))
-
diff --git a/test/unit/symbolics/testsymbol.py b/test/unit/symbolics/testsymbol.py
deleted file mode 100755
index 5a7abd7..0000000
--- a/test/unit/symbolics/testsymbol.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.symbolics import *
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-
-class TestSymbol(unittest.TestCase):
-
-    def testSymbol(self):
-        "Test simple symbol instance."
-
-        s0 = Symbol("x", BASIS)
-        s1 = Symbol("y", IP)
-        s2 = Symbol("z", GEO)
-        s3 = Symbol("z", GEO)
-        s4 = Symbol("z", IP)
-
-#        print "\nTesting Symbols"
-#        print "s0: '%s'" %s0
-#        print "s1: '%s'" %s1
-#        print "s2: '%s'" %s2
-#        print "s3: '%s'" %s3
-#        print "s4: '%s'" %s4
-
-        self.assertEqual(repr(s0), "Symbol('x', BASIS)")
-        self.assertEqual(repr(s1), "Symbol('y', IP)")
-        self.assertEqual(repr(s2), "Symbol('z', GEO)")
-        self.assertEqual(repr(s4), "Symbol('z', IP)")
-
-        self.assertEqual(s2 == s3, True)
-        self.assertEqual(s2 == s1, False)
-        self.assertEqual(s2 == s4, False)
-        self.assertEqual(s2 != s3, False)
-        self.assertEqual(s2 != s1, True)
-
-        self.assertEqual(s0 < s1, True)
-        self.assertEqual(s4 > s1, True)
-
-        self.assertEqual(s0.ops(), 0)
-        self.assertEqual(s1.ops(), 0)
-        self.assertEqual(s2.ops(), 0)
-        self.assertEqual(s3.ops(), 0)
-        self.assertEqual(s4.ops(), 0)
-
-        # Test hash
-        l = [s0]
-        d = {s0:0}
-        s5 = Symbol('x', BASIS)
-
-        self.assertEqual(s0 in l, True)
-        self.assertEqual(s0 in d, True)
-        self.assertEqual(s5 in l, True)
-        self.assertEqual(s5 in d, True)
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestSymbol('testSymbol'))
-
diff --git a/test/unit/symbolics/testsymboloperators.py b/test/unit/symbolics/testsymboloperators.py
deleted file mode 100755
index 9957f1e..0000000
--- a/test/unit/symbolics/testsymboloperators.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2010 Kristian B. Oelgaard
-#
-# This file is part of FFC.
-#
-# FFC is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# FFC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# First added:  2010-01-06
-# Last changed: 2010-02-01
-
-# Pyhton modules
-import unittest
-import time
-
-# FFC modules
-from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations
-from ffc.quadrature.symbolics import *
-from ffc.quadrature.sumobj import _group_fractions
-from ffc.cpp import format, set_float_formatting
-from ffc.parameters import FFC_PARAMETERS
-set_float_formatting(FFC_PARAMETERS['precision'])
-from ffc.log import error, push_level, pop_level, CRITICAL
-
-class TestSymbolOperators(unittest.TestCase):
-
-    def testSymbolOperators(self):
-        "Test binary operators"
-
-        f_0 = format["float"](0)
-        f_1 = format["float"](1)
-        f_2 = format["float"](2)
-        f_3 = format["float"](3)
-        f_0_5 = format["float"](0.5)
-        f0 = FloatValue(0.0)
-        f2 = FloatValue(2.0)
-        fm1 = FloatValue(-1.0)
-        fm3 = FloatValue(-3.0)
-
-        x = Symbol("x", GEO)
-        y = Symbol("y", GEO)
-        z = Symbol("z", GEO)
-
-        p0 = Product([f2, x])
-        p1 = Product([x, y])
-        p2 = Product([f2, z])
-        p3 = Product([y, x, z])
-
-        S0 = Sum([x, y])
-        S1 = Sum([x, z])
-
-        F0 = Fraction(f2, y)
-        F1 = Fraction(x, y)
-        F2 = Fraction(x, S0)
-        F3 = Fraction(x, y)
-        F4 = Fraction(p0, y)
-        F5 = Fraction(fm3, y)
-
-        # Test Symbol '+'
-        self.assertEqual(str(x + f2), '(%s + x)' % f_2)
-        self.assertEqual(str(x + x), '%s*x' % f_2)
-        self.assertEqual(str(x + y), '(x + y)')
-        self.assertEqual(str(x + p0), '%s*x' % f_3)
-        self.assertEqual(str(x + p1), '(x + x*y)')
-        self.assertEqual(str(x + S0), '(x + x + y)')
-        self.assertEqual(str(x + F0), '(x + %s/y)' % f_2)
-
-        # Test Symbol '-'
-        self.assertEqual(str(x - f2), '(x-%s)' % f_2)
-        self.assertEqual(str(x - x), '%s' % f_0)
-        self.assertEqual(str(x - y), '(x - y)')
-        self.assertEqual(str(x - p0), ' - x')
-        self.assertEqual(str(x - p1), '(x - x*y)')
-        self.assertEqual(str(x - S0), '(x - (x + y))')
-        self.assertEqual(str(x - F5), '(x - -%s/y)' % f_3)
-
-        # Test Symbol '*', only need to test float, symbol and product. Sum and
-        # fraction are handled by 'other'
-        self.assertEqual(str(x*f2), '%s*x' % f_2)
-        self.assertEqual(str(x*y), 'x*y')
-        self.assertEqual(str(x*p1), 'x*x*y')
-
-        # Test Symbol '/'
-        self.assertEqual(str(x/f2), '%s*x' % f_0_5)
-        self.assertEqual(str(x/x), '%s' % f_1)
-        self.assertEqual(str(x/y), 'x/y')
-        self.assertEqual(str(x/S0), 'x/(x + y)')
-        self.assertEqual(str(x/p0), '%s' % f_0_5)
-        self.assertEqual(str(y/p1), '%s/x' % f_1)
-        self.assertEqual(str(z/p0), '%s*z/x' % f_0_5)
-        self.assertEqual(str(z/p1), 'z/(x*y)')
-        # Silence output
-        push_level(CRITICAL)
-        self.assertRaises(Exception, x.__truediv__, F0)
-        self.assertRaises(Exception, y.__truediv__, FloatValue(0))
-        pop_level()
-
-if __name__ == "__main__":
-
-    # Run all returned tests
-    runner = unittest.TextTestRunner()
-    runner.run(TestSymbolOperators('testSymbolOperators'))
-
diff --git a/test/unit/test.py b/test/unit/test.py
index 29dac3b..5c9dc67 100644
--- a/test/unit/test.py
+++ b/test/unit/test.py
@@ -1,6 +1,5 @@
-from misc.test import *
-from symbolics.test import *
-from evaluate_basis.test import *
+# -*- coding: utf-8 -*-
+import pytest
 
 if __name__ == "__main__":
-    unittest.main()
+    pytest.main()
diff --git a/ufc-merge-into-ffc/COPYING.GPL-2 b/ufc-merge-into-ffc/COPYING.GPL-2
deleted file mode 100644
index d159169..0000000
--- a/ufc-merge-into-ffc/COPYING.GPL-2
+++ /dev/null
@@ -1,339 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Lesser General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-                            NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc.,
-    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-  `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-  <signature of Ty Coon>, 1 April 1989
-  Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.
diff --git a/ufc-merge-into-ffc/COPYING.LGPL b/ufc-merge-into-ffc/COPYING.LGPL
deleted file mode 100644
index 65c5ca8..0000000
--- a/ufc-merge-into-ffc/COPYING.LGPL
+++ /dev/null
@@ -1,165 +0,0 @@
-                   GNU LESSER GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
-  This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
-  0. Additional Definitions.
-
-  As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
-  "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
-  An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
-  A "Combined Work" is a work produced by combining or linking an
-Application with the Library.  The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
-  The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
-  The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
-  1. Exception to Section 3 of the GNU GPL.
-
-  You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
-  2. Conveying Modified Versions.
-
-  If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
-   a) under this License, provided that you make a good faith effort to
-   ensure that, in the event an Application does not supply the
-   function or data, the facility still operates, and performs
-   whatever part of its purpose remains meaningful, or
-
-   b) under the GNU GPL, with none of the additional permissions of
-   this License applicable to that copy.
-
-  3. Object Code Incorporating Material from Library Header Files.
-
-  The object code form of an Application may incorporate material from
-a header file that is part of the Library.  You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
-   a) Give prominent notice with each copy of the object code that the
-   Library is used in it and that the Library and its use are
-   covered by this License.
-
-   b) Accompany the object code with a copy of the GNU GPL and this license
-   document.
-
-  4. Combined Works.
-
-  You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
-   a) Give prominent notice with each copy of the Combined Work that
-   the Library is used in it and that the Library and its use are
-   covered by this License.
-
-   b) Accompany the Combined Work with a copy of the GNU GPL and this license
-   document.
-
-   c) For a Combined Work that displays copyright notices during
-   execution, include the copyright notice for the Library among
-   these notices, as well as a reference directing the user to the
-   copies of the GNU GPL and this license document.
-
-   d) Do one of the following:
-
-       0) Convey the Minimal Corresponding Source under the terms of this
-       License, and the Corresponding Application Code in a form
-       suitable for, and under terms that permit, the user to
-       recombine or relink the Application with a modified version of
-       the Linked Version to produce a modified Combined Work, in the
-       manner specified by section 6 of the GNU GPL for conveying
-       Corresponding Source.
-
-       1) Use a suitable shared library mechanism for linking with the
-       Library.  A suitable mechanism is one that (a) uses at run time
-       a copy of the Library already present on the user's computer
-       system, and (b) will operate properly with a modified version
-       of the Library that is interface-compatible with the Linked
-       Version.
-
-   e) Provide Installation Information, but only if you would otherwise
-   be required to provide such information under section 6 of the
-   GNU GPL, and only to the extent that such information is
-   necessary to install and execute a modified version of the
-   Combined Work produced by recombining or relinking the
-   Application with a modified version of the Linked Version. (If
-   you use option 4d0, the Installation Information must accompany
-   the Minimal Corresponding Source and Corresponding Application
-   Code. If you use option 4d1, you must provide the Installation
-   Information in the manner specified by section 6 of the GNU GPL
-   for conveying Corresponding Source.)
-
-  5. Combined Libraries.
-
-  You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
-   a) Accompany the combined library with a copy of the same work based
-   on the Library, uncombined with any other library facilities,
-   conveyed under the terms of this License.
-
-   b) Give prominent notice with the combined library that part of it
-   is a work based on the Library, and explaining where to find the
-   accompanying uncombined form of the same work.
-
-  6. Revised Versions of the GNU Lesser General Public License.
-
-  The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-  Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
-  If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/ufc-merge-into-ffc/LICENSE b/ufc-merge-into-ffc/LICENSE
deleted file mode 100644
index 566870e..0000000
--- a/ufc-merge-into-ffc/LICENSE
+++ /dev/null
@@ -1,48 +0,0 @@
-The header file ufc.h and the UFC Python utils are released into the
-public domain.
-
--------------------------------------------------------------------------
-
-Other files with different licenses attached:
-
-Files: doc/manual/code/Poisson.ufl
-       doc/manual/fenicsmanual.cls
-Copyright: © 2004-2007 Anders Logg <logg at simula.no>
-License:
- This file is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License, version 2, as
- published by the Free Software Foundation.
-
- This file is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
-Files: doc/manual/algorithm.sty
-Copyright: © 1994 Peter Williams
-License:
- This style file is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This style file is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this style file; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA  02111-1307, USA.
-
-Files: doc/manual/eps/insertion.eps
-Copyright: © 2004 artofcode LLC, Benicia, CA.
-License:
- This file was drawn with Inkscape by Anders Logg and is released into
- the public domain.
diff --git a/ufc-merge-into-ffc/README.merge b/ufc-merge-into-ffc/README.merge
deleted file mode 100644
index fdf54fd..0000000
--- a/ufc-merge-into-ffc/README.merge
+++ /dev/null
@@ -1,20 +0,0 @@
-Merge of UFC into FFC 2014-02-18 performed using this recipe:
-
-http://stackoverflow.com/questions/1683531/how-to-import-existing-git-repository-into-another
-
-cd <fenics-sources>
-cd ffc
-git remote add ufc ../ufc
-git fetch ufc
-git checkout -b ufc-merge-into-ffc ufc/master
-mkdir ufc-merge-into-ffc
-git mv <all UFC files> ufc-merge-into-ffc
-git commit -a
-git checkout master
-git merge ufc-merge-into-ffc
-git commit -a
-git remote rm ufc
-git branch -d ufc-merge-into-ffc
-
-This is followed by manually moving back files from the directory
-ufc-merge-into-ffc into the top level directory (in progress).
diff --git a/ufc-merge-into-ffc/README.rst b/ufc-merge-into-ffc/README.rst
deleted file mode 100644
index c8193f3..0000000
--- a/ufc-merge-into-ffc/README.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-==========
-UFC 2.3.0+
-==========
-
-Introduction
-============
-
-UFC (Unified Form-assembly Code) is a unified framework for finite
-element assembly. More precisely, it defines a fixed interface for
-communicating low level routines (functions) for evaluating and
-assembling finite element variational forms. The UFC interface
-consists of a single header file ufc.h that specifies a C++ interface
-that must be implemented by code that complies with the UFC
-specification. Examples of form compilers that support the UFC
-interface are FFC and SyFi. For more information, visit the FEniCS web
-page at
-
-    http://www.fenicsproject.org
-
-or refer to the UFC Specification and User Manual in
-
-    doc/manual/ufc-user-manual.pdf
-
-in this source tree.
-
-
-Installation
-============
-
-To install UFC, run::
-
-  cmake .
-  make
-  make install
-
-This installs the header file ufc.h and a small set of Python
-utilities (templates) for generating UFC code. Files will be installed
-under the default prefix. The installation prefix may be optionally
-specified, for example::
-
-  cmake -DCMAKE_INSTALL_PREFIX=$HOME/local .
-  make install
-
-Alternatively, just copy the single header file src/ufc/ufc.h into a
-suitable include directory. If you do not want to build and install
-the python extenstion module of UFC, needed by, e.g., PyDOLFIN, you
-can write::
-
-  cmake -DCMAKE_INSTALL_PREFIX=~/local -D UFC_ENABLE_PYTHON:BOOL=OFF .
-  make
-  make install
-
-For more options, it is convenient to use a CMake GUI. To use a GUI (if
-installed) for an out-of-source build, simply type::
-
-  mkdir build
-  cd build
-  cmake-gui ../
-  make
-  make install
-
-
-AUTHORS
-=======
-
-A list of authors can be found in the file AUTHORS.
-
-
-License
-=======
-
-Details about the license can be found the file LICENSE.
-
-
-Feedback
-========
-
-Feedback, comments and suggestions should be sent to
-
-  fenics-ufc at lists.launchpad.net
-
-For questions and bug reports, visit the UFC Launchpad page:
-
-  http://www.launchpad.net/ufc
diff --git a/ufc-merge-into-ffc/doc/manual/Makefile b/ufc-merge-into-ffc/doc/manual/Makefile
deleted file mode 100644
index 81d4f6d..0000000
--- a/ufc-merge-into-ffc/doc/manual/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-FILENAME=ufc-user-manual
-
-all:
-	latex $(FILENAME).tex
-
-final:
-	latex $(FILENAME).tex
-	bibtex $(FILENAME)
-	latex $(FILENAME).tex
-	makeindex $(FILENAME)
-	latex $(FILENAME)
-	dvips -P pdf -o $(FILENAME).ps $(FILENAME).dvi
-	ps2pdf $(FILENAME).ps $(FILENAME).pdf
-
-clean:
-	rm -f $(FILENAME).aux
-	rm -f $(FILENAME).idx
-	rm -f $(FILENAME).log
-	rm -f $(FILENAME).out
-	rm -f $(FILENAME).toc
-	rm -f $(FILENAME).bbl
-	rm -f $(FILENAME).blg
-	rm -f $(FILENAME).ilg
-	rm -f $(FILENAME).ind
-
-purge: clean
-	rm -f $(FILENAME).dvi
-	rm -f $(FILENAME).ps
-	rm -f $(FILENAME).pdf
-
diff --git a/ufc-merge-into-ffc/doc/manual/algorithm.sty b/ufc-merge-into-ffc/doc/manual/algorithm.sty
deleted file mode 100644
index 34e1f1f..0000000
--- a/ufc-merge-into-ffc/doc/manual/algorithm.sty
+++ /dev/null
@@ -1,96 +0,0 @@
-% ALGORITHM STYLE -- Released 8 April 1996
-%    for LaTeX-2e
-% Copyright -- 1994 Peter Williams
-%
-% E-mail pwil3058 at bigpond.net.au
-%
-% This style file is free software; you can redistribute it and/or
-% modify it under the terms of the GNU Lesser General Public
-% License as published by the Free Software Foundation; either
-% version 2 of the License, or (at your option) any later version.
-%
-% This style file is distributed in the hope that it will be useful,
-% but WITHOUT ANY WARRANTY; without even the implied warranty of
-% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-% Lesser General Public License for more details.
-%
-% You should have received a copy of the GNU Lesser General Public
-% License along with this style file; if not, write to the
-% Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-% Boston, MA  02111-1307, USA.
-%
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesPackage{algorithm}
-\typeout{Document Style `algorithm' - floating environment}
-
-\RequirePackage{float}
-\RequirePackage{ifthen}
-\newcommand{\ALG at within}{nothing}
-\newboolean{ALG at within}
-\setboolean{ALG at within}{false}
-\newcommand{\ALG at floatstyle}{ruled}
-\newcommand{\ALG at name}{Algorithm}
-\newcommand{\listalgorithmname}{List of \ALG at name s}
-
-% Declare Options
-% first appearance
-\DeclareOption{plain}{
-  \renewcommand{\ALG at floatstyle}{plain}
-}
-\DeclareOption{ruled}{
-  \renewcommand{\ALG at floatstyle}{ruled}
-}
-\DeclareOption{boxed}{
-  \renewcommand{\ALG at floatstyle}{boxed}
-}
-% then numbering convention
-\DeclareOption{part}{
-  \renewcommand{\ALG at within}{part}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption{chapter}{
-  \renewcommand{\ALG at within}{chapter}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption{section}{
-  \renewcommand{\ALG at within}{section}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption{subsection}{
-  \renewcommand{\ALG at within}{subsection}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption{subsubsection}{
-  \renewcommand{\ALG at within}{subsubsection}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption{nothing}{
-  \renewcommand{\ALG at within}{nothing}
-  \setboolean{ALG at within}{true}
-}
-\DeclareOption*{\edef\ALG at name{\CurrentOption}}
-
-% ALGORITHM
-%
-\ProcessOptions
-\floatstyle{\ALG at floatstyle}
-\ifthenelse{\boolean{ALG at within}}{
-  \ifthenelse{\equal{\ALG at within}{part}}
-     {\newfloat{algorithm}{htbp}{loa}[part]}{}
-  \ifthenelse{\equal{\ALG at within}{chapter}}
-     {\newfloat{algorithm}{htbp}{loa}[chapter]}{}
-  \ifthenelse{\equal{\ALG at within}{section}}
-     {\newfloat{algorithm}{htbp}{loa}[section]}{}
-  \ifthenelse{\equal{\ALG at within}{subsection}}
-     {\newfloat{algorithm}{htbp}{loa}[subsection]}{}
-  \ifthenelse{\equal{\ALG at within}{subsubsection}}
-     {\newfloat{algorithm}{htbp}{loa}[subsubsection]}{}
-  \ifthenelse{\equal{\ALG at within}{nothing}}
-     {\newfloat{algorithm}{htbp}{loa}}{}
-}{
-  \newfloat{algorithm}{htbp}{loa}
-}
-\floatname{algorithm}{\ALG at name}
-
-\newcommand{\listofalgorithms}{\listof{algorithm}{\listalgorithmname}}
-
diff --git a/ufc-merge-into-ffc/doc/manual/bibliography.bib b/ufc-merge-into-ffc/doc/manual/bibliography.bib
deleted file mode 100644
index dc81cef..0000000
--- a/ufc-merge-into-ffc/doc/manual/bibliography.bib
+++ /dev/null
@@ -1,180 +0,0 @@
- at misc{www:dolfin,
-        title = {{DOLFIN}},
-        author = {J. Hoffman and J. Jansson and A. Logg and G. N. Wells},
-        year = {2006},
-        note = {URL: \url{http://www.fenics.org/dolfin/}}
-}
-
- at misc{www:ffc,
-    author = {A. Logg},
-    title = {{FFC}},
-    year = {2007},
-    note = {URL: \url{http://www.fenics.org/ffc/}},
-}
-
- at misc{www:syfi,
-    author = {M. Aln\ae{}s and K--A Mardal},
-    title = {{S}y{F}i},
-    year = {2007},
-    note = {URL: \url{http://www.fenics.org/syfi/}},
-}
-
- at misc{www:fenics,
-    author = {J. Hoffman and J. Jansson and C. Johnson and M. G. Knepley and R. C. Kirby and A. Logg and L. R. Scott and G. N. Wells},
-    title = {{FE}ni{CS}},
-    year = {2006},
-    note = {URL: \url{http://www.fenics.org/}},
-}
-
- at misc{www:sundance,
-	author = {Kevin Long},
-	title = {Sundance},
-	year = {2006},
-	note = {URL: \url{http://software.sandia.gov/sundance/}}
-}
-
- at misc{www:deal.II,
-	author = {Wolfgang Bangerth and Ralf Hartmann and Guido Kanschat},
-	title = {{\tt deal.{I}{I}} {D}ifferential {E}quations {A}nalysis {L}ibrary},
-	year = {2006},
-	note = {URL: \url{http://www.dealii.org/}}
-}
-
- at misc{www:petsc,
-	author = {Satish Balay and Kris Buschelman and William D. Gropp and Dinesh Kaushik and Matthew G. Knepley and Lois Curfman McInnes and Barry F. Smith and Hong Zhang},
-	title = {{PETS}c},
-	year = {2006},
-	note = {URL: \url{http://www.mcs.anl.gov/petsc/}}
-}
-
- at misc{www:trilinos,
-  title = {Trilinos},
-  note  = {URL: \url{http://software.sandia.gov/trilinos/}},
-}
-
- at manual{www:diffpack,
-       title = {{Diffpack}},
-       author = {A. M. Bruaset and H. P. Langtangen and others},
-       year = {2006},
-       note = {URL: \url{http://www.diffpack.com/}}
-}
-
- at article{logg:article:07,
-        author = {R. C. Kirby and M. G. Knepley and A. Logg and L. R. Scott},
-        title = {Optimizing the Evaluation of Finite Element Matrices},
-        journal = {SIAM J. Sci. Comput.},
-        year = {2005},
-        volume = {27},
-        number = {3},
-        pages = {741--758},
-        issn = {1064-8275}
-
-}
-
- at article{logg:article:09,
-        author = {R. C. Kirby and A. Logg and L. R. Scott and A. R. Terrel},
-        title = {Topological Optimization of the Evaluation of Finite Element Ma
-trices},
-        journal = {{SIAM} J. Sci. Comput.},
-        year = {2006},
-        volume = {28},
-        number = {1},
-        pages = {224--240},
-        issn = {1064-8275}
-
-}
-
- at article{logg:article:10,
-  author =       "R. C. Kirby and A. Logg",
-  title =        "A Compiler for Variational Forms",
-  journal =      "{ACM} Transactions on Mathematical Software",
-  volume =       "32",
-  number =       "3",
-  pages =        "417--444",
-  year =         "2006",
-  accepted =     "13 November 2005",
-  abstract =     "As a key step towards a complete automation of the finite elem
-ent method, 
-                 we present a new algorithm for automatic and efficient evaluati
-on of 
-                 multilinear variational forms. The algorithm has been implement
-ed in the 
-                 form of a compiler, the FEniCS Form Compiler FFC. We present be
-nchmark 
-                 results for a series of standard variational forms, including t
-he 
-                 incompressible Navier-Stokes equations and linear elasticity. T
-he speedup 
-                 compared to the standard quadrature-based approach is impressiv
-e; in some 
-                 cases the speedup is as large as a factor 1000.",
-  issn = {0098-3500},
-
-}
- at article{logg:article:11,
-  author =       "R. C. Kirby and A. Logg",
-  title =        "Efficient Compilation of a Class of Variational Forms",
-  journal =      "{ACM} Transactions on Mathematical Software",
-  volume =       "33",
-  number =       "3",
-  year =         "2007",
-  accepted =     "31 August 2006",
-  abstract =     "We investigate the compilation of general multilinear variatio
-nal
-                 forms over affines simplices and prove a representation theorem
- for
-                 the representation of the element tensor (element stiffness mat
-rix)
-                 as the contraction of a constant reference tensor and a geometr
-y
-                 tensor that accounts for geometry and variable coefficients. Ba
-sed
-                 on this representation theorem, we design an algorithm for effi
-cient
-                 pretabulation of the reference tensor.  The new algorithm has b
-een
-                 implemented in the FEniCS Form Compiler~(FFC) and improves on a
-                 previous loop-based implementation by several orders of magnitu
-de,
-                 thus shortening compile-times and development cycles for users 
-of
-                 FFC.",
-  year = "2007",
-  issn = {0098-3500},
-}
-
- at book{ZieTay67,
-        author = {O. C. Zienkiewicz and R. L. Taylor and J. Z. Zhu},
-        title = {The Finite Element Method --- Its Basis and Fundamentals, 6th e
-dition},
-        publisher = {Elsevier},
-        year = {2005, first published in 1967},
-
-}
-
- at book{Hug87,
-        author = {T. J. R. Hughes},
-        title = {The Finite Element Method: Linear Static and Dynamic Finite Ele
-ment Analysis},
-        publisher = {Prentice-Hall},
-        year = {1987},
-
-}
-
- at book{Lan99,
-        author = {H. P. Langtangen},
-        title = {Computational Partial Differential Equations -- Numerical Metho
-ds and Diffpack Programming},
-        publisher = {Springer},
-        year = {1999},
-        series = {Lecture Notes in Computational Science and Engineering},
-
-}
-
- at book{Cia78,
-        author = {P. G. Ciarlet},
-        title = {The Finite Element Method for Elliptic Problems},
-        publisher = {North-Holland, Amsterdam, New York, Oxford},
-        year = {1978},
-
-}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/assembly.tex b/ufc-merge-into-ffc/doc/manual/chapters/assembly.tex
deleted file mode 100644
index b7f2b9a..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/assembly.tex
+++ /dev/null
@@ -1,397 +0,0 @@
-\chapter{Finite element assembly}
-\label{sec:assembly}
-
-\newtheorem{example}{\small{\sc{Example}}}[section]
-
-In this section, we present a general algorithm for assembly of finite
-element variational forms and define the concepts that the UFC
-interface is based on.
-
-\section{Finite Element Discretization}
-\label{sec:fem}
-
-\subsection{The Finite Element}
-\index{finite element}
-
-A finite element is mathematically defined as a triplet consisting of
-a polygon, a polynomial function space, and a set of linear
-functionals, see~\cite{Cia78}. Given that the dimension of the
-function space and the number of the (linearly independent) linear
-functionals are equal, the finite element is uniquely defined. Hence,
-we will refer to a finite element as a collection of
-\begin{itemize}
-\item a polygon $K$,
-\item a polynomial space $\mathcal{P}_K$ on $K$,
-\item a set of linearly independent linear functionals, the
-\emph{degrees of freedom}, $L_i : \mathcal{P}_K \rightarrow \R, \, i =
-1, 2, \ldots, n$.
-\end{itemize}
-
-\subsection{Variational Forms}
-\index{variational form}
-
-Consider the weighted Poisson problem $- \nabla \cdot (w \nabla u) =
-f$ with Dirichlet boundary conditions on a domain $\Omega \subset
-\R^d$.  Multiplying by a test function $v \in V_h$ and integrating by
-parts, one obtains the variational problem
-\begin{equation} \label{eq:weightedpoisson}
-  \int_{\Omega} w \nabla v \cdot \nabla u \dx = \int_{\Omega} v f \dx
-  \quad \forall v \in V_h,
-\end{equation}
-for $u \in V_h$. If $w, f \in W_h$ for some discrete finite element space
-$W_h$ (which may be different from $V_h$), we may thus
-write~(\ref{eq:weightedpoisson}) as
-\begin{equation}
-  a(v, u; w) = L(v; f) \quad \forall v \in V_h,
-\end{equation}
-where the trilinear form $a : V_h \times V_h \times W_h \rightarrow \R$ is given by
-\begin{equation}
-  a(v, u; w) = \int_{\Omega} w \nabla v \cdot \nabla u \dx
-\end{equation}
-and the bilinear form $L : V_h \times W_h \rightarrow R$ is given by
-\begin{equation}
-  L(v; f) = \int_{\Omega} v f \dx.
-\end{equation}
-Note here that $a$ is \emph{bilinear} for any given fixed $w \in W_h$
-and $L$ is \emph{linear} for any given fixed $f \in W_h$.
-
-In general, we shall be concerned with the discretization of
-finite element variational forms of general arity~$r + n > 0$,
-\begin{equation} \label{eq:variationalform}
-  a : V_h^1 \times V_h^2 \times \cdots \times V_h^r \times
-  W_h^1 \times W_h^2 \times \cdots \times W_h^n \rightarrow \R,
-\end{equation}
-defined on the product space $V_h^1 \times V_h^2 \times \cdots \times
-V_h^r \times W_h^1 \times W_h^2 \times \cdots \times W_h^n$ of two
-sets $\{V_h^j\}_{j=1}^r, \{W_h^j\}_{j=1}^n$ of discrete finite element
-function spaces on $\Omega$. We refer to
-$(v_1, v_2, \ldots, v_r) \in V_h^1 \times V_h^2 \times \cdots \times V_h^r$
-as \emph{primary arguments},
-and to
-$(w_1, w_2, \ldots, w_n) \in W_h^1 \times W_h^2 \times \cdots \times W_h^n$
-as \emph{coefficients} and write
-\begin{equation}
-a = a(v_1, \ldots, v_r; w_1, \ldots, w_n).
-\label{eq:gen_form}
-\end{equation}
-In the simplest case, all function spaces are equal but there are many
-important examples, such as mixed methods, where the
-arguments come from different function spaces.
-
-\subsection{Discretization}
-\label{sec:Discretization}
-
-To discretize the form $a$, we introduce bases
-$\{\phi_i^1\}_{i=1}^{N^1},
- \{\phi_i^2\}_{i=1}^{N^2}, \ldots,
- \{\phi_i^r\}_{i=1}^{N^r}$
-for the function spaces $V_h^1, V_h^2, \ldots, V_h^r$ respectively and let $i =
-(i_1, i_2, \ldots, i_r)$ be a multiindex of length $|i| = r$. The
-form $a$ then defines a rank~$r$ tensor given by
-\begin{equation} \label{eq:tensor}
-  A_i = a(\phi_{i_1}^1, \phi_{i_2}^2, \ldots, \phi_{i_r}^r; w_1, w_2, \ldots, w_n)
-  \quad \forall i \in \mathcal{I},
-\end{equation}
-where $\mathcal{I}$ is the index set
-\begin{equation}
-  \begin{split}
-  & \mathcal{I} =  \prod_{j=1}^r[1,|V^j_h|] =  \\
-  & \{(1,1,\ldots,1), (1,1,\ldots,2), \ldots,
-  (N^1,N^2,\ldots,N^r)\}.
-  \end{split}
-\end{equation}
-We refer to the tensor~$A$ as the \emph{discrete operator} generated
-by the form~$a$ and the particular choice of basis functions.  For any
-given form of arity~$r + n$, the tensor~$A$ is a (typically sparse)
-tensor of rank~$r$ and dimension $|V_h^1| \times |V_h^2| \times \ldots
-\times |V_h^r| = N^1 \times N^2 \times \ldots \times N^r$.
-\index{global tensor}
-
-Typically, the rank $r$ is 0, 1, or 2. When $r = 0$, the
-tensor $A$ is a scalar (a tensor of rank zero), when $r = 1$, the
-tensor $A$ is a vector (the ``load vector'') and when $r = 2$, the
-tensor $A$ is a matrix (the ``stiffness matrix''). Forms of higher
-arity also appear, though they are rarely assembled as a
-higher-dimensional sparse tensor.
-
-Note here that we consider the functions $w_1, w_2, \ldots, w_n$ as
-fixed in the sense that the discrete operator~$A$ is computed for a
-given set of functions, which we refer to as \emph{coefficients}. As
-an example, consider again the variational
-problem~(\ref{eq:weightedpoisson}) for the weighted Poisson's
-equation. For the trilinear form~$a$, the rank is $r = 2$ and
-the number of coefficients is $n = 1$, while for the linear form~$L$,
-the rank is $r = 1$ and the number of coefficients is $n = 1$. We may
-also choose to directly compute the \emph{action} of the form
-$a$ obtained by assembling a vector from the form
-\begin{equation}
-  a(v_1; w_1, w2) = \int_{\Omega} w_1 \nabla v_1 \cdot \nabla w_2 \dx,
-\end{equation}
-where now $r = 1$ and $n = 2$.
-
-We list below a few other examples to illustrate the notation.
-
-\begin{example}
-\label{example:div}
-Our first example is related
-to the divergence constraint in fluid flow. Let the form~$a$ be given by
-\begin{equation}
-a(q, u) = \int_{\Omega} q \nabla \cdot u \dx, \quad q\in V_h^1, \quad u\in V_h^2, 
-\end{equation}
-where $V_h^1$ is a space of scalar-valued functions and where $V_h^2$
-is a space of vector-valued functions.  The form $a : V_h^1 \times
-V_h^2 \rightarrow \R$ has two primary arguments and thus $r = 2$.
-Furthermore, the form does not depend on any coefficients and thus $n=0$.
-\end{example}
-
-\begin{example}
-\label{example:linearconv}
-Another common form in fluid flow (with variable density) is
-\begin{equation}
-a(v,u;w,\varrho) = \int_{\Omega} v \, \varrho \, w \cdot \nabla  u \dx. 
-\end{equation}
-Here, $v\in V_h^1,\ u \in V_h^2,\ w\in W_h^1, \ \varrho \in W_h^2$, where
-$V_h^1$, $V_h^2$, and $W_h^1$ are spaces of vector-valued functions, while $W_h^2$ is a space of  
-scalar-valued functions. 
-The form takes four arguments, where two of the arguments
-are coefficients,
-\begin{equation}
-a : V_h^1 \times V_h^2 \times W_h^1 \times W_h^2 \rightarrow \R.
-\end{equation}
-Hence, $r=2$ and $n=2$. 
-\end{example}
-
-\begin{example}
-The $H^1(\Omega)$ norm of the error $e = u - u_h$ squared is
-\begin{equation}
-a(;u, u_h) = \int_{\Omega} (u - u_h)^2 + |\nabla (u - u_h)|^2 \dx.
-\end{equation}
-The form takes two arguments and both are coefficients,
-\begin{equation}
-a : W_h^1 \times  W_h^2 \rightarrow \R.
-\end{equation}
-Hence, $r=0$ and $n=2$. 
-\end{example}
-
-\section{Finite Element Assembly}
-\index{assembly}
-
-The standard algorithm for computing the global sparse tensor~$A$ is
-known as \emph{assembly}, see~\cite{ZieTay67,Hug87}. By this
-algorithm, the tensor~$A$ may be computed by assembling (summing) the
-contributions from the local entities of a finite element mesh.  To
-express this algorithm for assembly of the global sparse tensor~$A$ for
-a general finite element variational form of arity~$r$, we introduce
-the following notation and assumptions.
-
-Let $\mathcal{T} = \{K\}$ be a set of disjoint \emph{cells} (a
-triangulation) partitioning the domain $\Omega =
-\cup_{K\in\mathcal{T}} K$. Further, let $\partial_e \mathcal{T}$
-denote the set of \emph{exterior facets} (the set of cell facets
-incident with the boundary $\partial \Omega$), and let $\partial_i
-\mathcal{T}$ denote the set of $\emph{interior facets}$ (the set of
-cell facets non-incident with the boundary $\partial \Omega$).  For
-each discrete function space $V_h^j, \, j=1,2,\ldots,r$, we assume
-that the global basis~$\{\phi_i^j\}_{i=1}^{N^j}$ is obtained by
-patching together local function spaces $\mathcal{P}_K^j$ on each
-cell~$K$ as determined by a local-to-global mapping.
-
-We shall further assume that the variational
-form~(\ref{eq:variationalform}) may be expressed as a sum of integrals
-over the cells~$\mathcal{T}$, the exterior facets~$\partial_e
-\mathcal{T}$ and the interior facets~$\partial_i \mathcal{T}$. We
-shall allow integrals expressed on disjoint subsets
-$\mathcal{T} = \cup_{k=1}^{n_c} \mathcal{T}_k$,
-$\partial_e \mathcal{T} = \cup_{k=1}^{n_e} \partial_e \mathcal{T}_k$
-and
-$\partial_i \mathcal{T} = \cup_{k=1}^{n_i} \partial_i \mathcal{T}_k$
-respectively.
-
-We thus assume that the form $a$ is given by
-\begin{equation}
-  \begin{split}
-    & a(v_1, \ldots, v_r; w_1, \ldots,  w_n) =  \\
-    &\ \ \   \sum_{k=1}^{n_c} \sum_{K\in\mathcal{T}_k} \int_{K}
-    I^c_k(v_1, \ldots, v_r; w_1, \ldots w_n) \dx \\
-    &+
-    \sum_{k=1}^{n_e} \sum_{S\in\partial_e\mathcal{T}_k} \int_{S}
-    I^e_k(v_1, \ldots, v_r; w_1, \ldots,  w_n) \ds \\
-    &+
-    \sum_{k=1}^{n_i} \sum_{S\in\partial_i\mathcal{T}_k} \int_{S}
-    I^i_k(v_1, \ldots, v_r; w_1, \ldots, w_n) \ds.
-  \end{split} \label{eq:form_integrals}
-\end{equation}
-We refer to an integral over a cell~$K$ as a \emph{cell integral},
-an integral over an exterior facet~$S$ as an \emph{exterior facet integral}
-(typically used to implement Neumann and Robin type boundary conditions),
-and to an integral over an interior facet~$S$ as an \emph{interior facet integral} (typically used in discontinuous Galerkin methods).
-
-For simplicity, we consider here initially assembly of the global
-sparse tensor~$A$ corresponding to a form~$a$ given by a single
-integral over all cells $\mathcal{T}$, and later extend to the general
-case where we must also account for contributions from several cell
-integrals, interior facet integrals and exterior facet integrals.
-
-We thus consider the form
-\begin{equation}
-  \begin{split}
-    &a(v_1, \ldots, v_r; w_1, \ldots, w_n) = \\
-    & \ \ \ \sum_{K\in\mathcal{T}} \int_K
-    I^c(v_1, \ldots, v_r; w_1, \ldots, w_n) \dx,
-  \end{split}
-\end{equation}
-for which the global sparse tensor~$A$ is given by
-\begin{equation}
-  A_i = \sum_{K\in\mathcal{T}} \int_K
-  I^c(\phi^1_{i_1}, \ldots, \phi^r_{i_r}; w_1, \ldots, w_n) \dx.
-\end{equation}
-To see how to compute the tensor $A$ by summing the local
-contributions from each cell~$K$, we let $n^j_K = |\mathcal{P}^j_K|$
-denote the dimension of the local finite element space on $K$ for the
-$j$th primary argument $v_j \in V_h^j$ for $j = 1,2,\ldots,r$. Furthermore, let
-\begin{equation}
-  \iota_K^j : [1,n_K^j] \rightarrow [1,N^j] \label{eq:iota_K}
-\end{equation}
-denote the local-to-global mapping for~$V_h^j$, that is, on any given
-$K\in\mathcal{T}$, the mapping $\iota_K^j$ maps the number of a local
-degree of freedom (or, equivalently, local basis function) to the
-number of the corresponding global degree of freedom (or,
-equivalently, global basis function). We then define for each $K \in
-\mathcal{T}$ the collective local-to-global mapping $\iota_K :
-\mathcal{I}_K \rightarrow \mathcal{I}$ by
-\begin{equation}
-  \iota_K(i) =
-  (\iota_K^1(i_1),\iota_K^2(i_2),\ldots,\iota_K^r(i_r))
-  \quad \forall i \in \mathcal{I}_K,
-\end{equation}
-where $\mathcal{I}_K$ is the index set
-\begin{equation}
-\begin{split}
-  & \mathcal{I}_K = \prod_{j=1}^r[1,|\mathcal{P}_K^j|] \\ 
-  & = \{(1,1,\ldots,1), (1,1,\ldots,2), \ldots,
-  (n_K^1,n_K^2,\ldots,n_K^r)\}.
-\end{split}
-\end{equation}
-Furthermore, for each $V_h^j$ we let $\{\phi^{K,j}_i\}_{i=1}^{n_K^j}$
-denote the restriction to an element $K$ of the subset of the basis
-$\{\phi_i^j\}_{i=1}^{N^j} \subset \mathcal{P}_K^j$ of $V_h^j$ supported on $K$.
-
-We may now compute~$A$ by summing the contributions from
-the local cells,
-\begin{equation}
-  \begin{split}
-  A_i
-  &=
-  \sum_{K\in\mathcal{T}_i} \int_K
-  I^c(\phi_{i_1}^1, \ldots, \phi_{i_r}^r; w_1, \ldots, w_n) \dx \\
-  &=
-  \sum_{K\in\mathcal{T}_i} \int_K
-  I^c(\phi_{(\iota_K^1)^{-1}(i_1)}^{K,1},
-      \ldots,
-      \phi_{(\iota_K^r)^{-1}(i_r)}^{K,r}; w_1, \ldots, w_n) \dx \\
-  &=
-  \sum_{K\in\mathcal{T}_i}
-  A^K_{\iota_K^{-1}(i)},
-  \end{split}
-\end{equation}
-where $A^K$ is the local \emph{cell tensor} on cell $K$ (the ``element
-stiffness matrix''), given by
-\begin{equation}
-  A^K_i = \int_K
-  I^c(\phi_{i_1}^{K,1},
-  \ldots,
-  \phi_{i_r}^{K,r}; w_1, \ldots, w_n) \dx, \\
-\end{equation}
-and where $\mathcal{T}_i$ denotes the set of cells on which all basis
-functions $\phi_{i_1}^1, \phi_{i_2}^2, \ldots, \phi_{i_r}^r$ are supported.
-Similarly, we may sum the local contributions
-from the exterior and interior facets in the form of local
-\emph{exterior facet tensors} and \emph{interior facet tensors}.
-\index{cell tensor}
-\index{exterior facet tensor}
-\index{interior facet tensor}
-
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{i0}{\hspace{-0.5cm}$\iota_K^1(1)$}
-    \psfrag{i1}{\hspace{-0.5cm}$\iota_K^1(2)$}
-    \psfrag{i2}{\hspace{-0.5cm}$\iota_K^1(3)$}
-    \psfrag{j0}{\hspace{-0.3cm}$\iota_K^2(1)$}
-    \psfrag{j1}{\hspace{-0.5cm}$\iota_K^2(2)$}
-    \psfrag{j2}{\hspace{-0.1cm}$\iota_K^2(3)$}
-    \psfrag{A21}{$A^K_{32}$}
-    \psfrag{1}{$1$}
-    \psfrag{2}{$2$}
-    \psfrag{3}{$3$}
-    \includegraphics[height=3in]{eps/insertion.eps}
-    \caption{Adding the entries of a cell tensor~$A^K$ to the
-      global tensor~$A$ using the  local-to-global mapping
-      $\iota_K$, illustrated here for a rank two
-      tensor (a matrix).}
-    \label{fig:insertion}
-  \end{center}
-\end{figure}
-
-In Algorithm~\ref{alg:assembly}, we present a general algorithm for
-assembling the contributions from the local cell, exterior facet and
-interior facet tensors into a global sparse tensor.  In all cases, we
-iterate over all entities (cells, exterior or interior facets),
-compute the local cell tensor $A^K$ (or exterior/interior facet tensor
-$A^S$) and add it to the global sparse tensor as determined by the
-local-to-global mapping, see~Figure~\ref{fig:insertion}.
-
-
-\begin{algorithm}
-\footnotesize
-$A = 0$ \\
-(i) \emph{Assemble contributions from all cells} \\
-\textbf{for each} $K \in \mathcal{T}$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,r$: \\
-\tab\tab Tabulate the local-to-global mapping $\iota_K^j$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,n$: \\
-\tab\tab Extract the values of $w_j$ on $K$
-\\
-\\
-\tab Take $0 \leq k \leq n_c$ such that $K \in \mathcal{T}_k$ \\
-\tab Tabulate the cell tensor $A^K$ for $I^c_k$ \\
-\tab Add $A^K_i$ to $A_{\iota_K^1(i_1), \iota_K^2(i_2), \ldots, \iota_K^r(i_r)}$ for $i\in I_K$ \\
-\\
-(ii) \emph{Assemble contributions from all exterior facets} \\
-\textbf{for each} $S \in \partial_e\mathcal{T}$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,r$: \\
-\tab\tab Tabulate the local-to-global mapping $\iota_{K(S)}^j$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,n$: \\
-\tab\tab Extract the values of $w_j$ on $K(S)$
-\\
-\\
-\tab Take $0 \leq k \leq n_e$ such that $S \in \partial_e \mathcal{T}_k$ \\
-\tab Tabulate the exterior facet tensor $A^S$ for $I^e_k$ \\
-\tab Add $A^S_i$ to $A_{\iota_{K(S)}^1(i_1), \iota_{K(S)}^2(i_2), \ldots, \iota_{K(S)}^r(i_r)}$ for $i\in I_{K(S)}$ \\
-\\
-\\
-(iii) \emph{Assemble contributions from all interior facets} \\
-\textbf{for each} $S \in \partial_i\mathcal{T}$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,r$: \\
-\tab\tab Tabulate the local-to-global mapping $\iota_{K(S)}^j$ \\
-\\
-\tab \textbf{for} $j = 1,2,\ldots,n$: \\
-\tab\tab Extract the values of $w_j$ on $K(S)$
-\\
-\\
-\tab Take $0 \leq k \leq n_i$ such that $S \in \partial_i \mathcal{T}_k$ \\
-\tab Tabulate the interior facet tensor $A^S$ for $I^i_k$ \\
-\tab Add $A^S_i$ to $A_{\iota_{K(S)}^1(i_1), \iota_{K(S)}^2(i_2), \ldots, \iota_{K(S)}^r(i_r)}$ for $i\in I_{K(S)}$ \\
-\caption{Assembling the global tensor~$A$ from the local contributions
-  on all cells, exterior and interior facets. For assembly over
-  exterior facets, $K(S)$ refers to the cell $K\in\mathcal{T}$ incident
-  to the exterior facet~$S$, and for assembly over interior facets,
-  $K(S)$ refers to the ``macro cell'' consisting of the pair of cells
-  $K^+$ and $K^-$ incident to the interior facet~$S$.}
-\label{alg:assembly}
-\end{algorithm}
-\normalsize
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/assembly_cpp.tex b/ufc-merge-into-ffc/doc/manual/chapters/assembly_cpp.tex
deleted file mode 100644
index f2e278d..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/assembly_cpp.tex
+++ /dev/null
@@ -1,111 +0,0 @@
-\chapter{A basic UFC-based assembler}
-\label{app:assembly}
-
-Below, we include a sketch of a UFC-based implementation of the assembly
-of the global tensor~$A$ by summing the local contributions from all
-cells. The contributions from all exterior and interior facets may be
-computed similarly.
-
-The implementation is incomplete and system specific details such as
-interaction with mesh and linear algebra libraries have been
-omitted.\footnote{For an example of a complete implementation of a
-UFC-based assembler, we refer to the source code of
-DOLFIN~\cite{www:dolfin}, in particular class \texttt{Assembler} as
-implemented in \texttt{Assembler.cpp}.}
-
-\scriptsize
-\begin{code}
-void assemble(..., ufc::form& form, ...)
-{
-  ...
-
-  // Initialize mesh data structure
-  ufc::mesh mesh;
-  mesh.num_entities = new unsigned int[...];
-  ...
-
-  // Initialize cell data structure
-  ufc::cell cell;
-  cell.entity_indices = new unsigned int[...];
-  cell.coordinates = new double[...];
-  ...
-
-  // Create cell integrals
-  ufc::cell_integral** cell_integrals;
-  cell_integrals = new ufc::cell_integral*[form.num_cell_domains()];
-  for (unsigned int i = 0; i < form.num_cell_domains(); i++)
-    cell_integrals[i] = form.create_cell_integral(i);
-
-  // Create dofmaps
-  ufc::dofmaps** dofmaps;
-  dofmaps = new ufc::dofmap*[form.rank() + form.num_coefficients()];
-  for (unsigned int i = 0; i < form.rank() + form.num_coefficients(); i++)
-  {
-    dofmaps[i] = form.create_dofmap(i);
-
-    // Initialize dofmap
-    if (dofmaps[i]->init_mesh(mesh))
-    {
-      // Iterate over cells
-      for (...)
-      {
-        // Update cell data structure to current cell
-        cell.entity_indices[...] = ...
-        cell.coordinates[...] = ...
-        ...
-
-        // Initialize dofmap for cell
-        dofmaps[i]->init_cell(mesh, cell);
-      }
-
-      dofmap.init_cell_finalize();
-    }
-  }
-
-  // Initialize array of values for the cell tensor
-  unsigned int size = 1;
-  for (unsigned int i = 0; i < form.rank(); i++)
-    size *= dofmaps[i]->max_local_dimension();
-  double* AK = new double[size];
-
-  // Initialize array of local to global dofmaps
-  unsigned int** dofs = new unsigned int*[form.rank()];
-  for (unsigned int i = 0; i < form.rank(); i++)
-    dofs[i] = new unsigned int[dofmaps[i]->max_local_dimension()];
-
-  // Initialize array of coefficient values
-  double** w = new double*[form.num_coefficients()];
-  for (unsigned int i = 0; i < form.num_coefficients(); i++)
-    w[i] = new double[dofmaps[form.rank() + i]->max_local_dimension()];
-
-  // Iterate over cells
-  for (...)
-  {
-    // Get number of subdomain for current cell
-    const unsigned int sub_domain = ...
-
-    // Update cell data structure to current cell
-    cell.entity_indices[...] = ...
-    cell.coordinates[...] = ...
-    ...
-
-    // Interpolate coefficients (library specific so omitted here)
-    ...
-
-    // Tabulate dofs for each dimension
-    for (unsigned int i = 0; i < ufc.form.rank(); i++)
-      dofmaps[i]->tabulate_dofs(dofs[i], mesh, cell);
-
-    // Tabulate cell tensor
-    cell_integrals[sub_domain]->tabulate_tensor(AK, w, cell);
-
-    // Add entries to global tensor (library specific so omitted here)
-    ...
-  }
-
-  // Delete data structures
-  delete [] mesh.num_entities;
-  ...
-}
-\end{code}
-\normalsize
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/examples.tex b/ufc-merge-into-ffc/doc/manual/chapters/examples.tex
deleted file mode 100644
index 35a2892..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/examples.tex
+++ /dev/null
@@ -1,76 +0,0 @@
-\chapter{Complete UFC code for Poisson's equation}
-\index{Poisson's equation}
-\index{example code}
-
-In this section, a simple example is given of UFC code generated by
-the form compilers
-FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11}
-and SyFi~\cite{www:syfi} for Poisson's equation. The code presented
-below is generated for the bilinear form of Poisson's equation for
-standard continuous piecewise linear Lagrange finite elements on a
-two-dimensional domain $\Omega$,
-\begin{equation}
-  a(v, u) = \int_{\Omega} \nabla v \cdot \nabla u \dx.
-\end{equation}
-
-Although FFC and SyFi are two different form compilers, with very
-different approaches to code generation, both generate code that
-conforms to the UFC specification and may thus be used interchangeably
-within any UFC-based system.
-
-In the generated code, we have omitted the two functions
-\texttt{evaluate\_basis} and
-\texttt{evaluate\_basis\_derivatives}\footnotemark{} to save space.
-
-\footnotetext{For FFC, this may be done by using the compiler flags
-  \texttt{-fno-evaluate\_basis} and
-  \texttt{-fno-evaluate\_basis\_derivatives}.}
-
-\section{Code generated by FFC}
-\index{FFC}
-
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/poisson_ffc.h}
-\normalsize
-
-\section{Code generated by SyFi}
-\index{SyFi}
-
-In the following we list the complete code for the finite element,
-the dofmap and the variational form for computing a stiffness matrix
-based on linear Lagrangian elements in 2D.
-
-The code below is generated for the assembler in PyCC and it therefore
-includes some PyCC files, since the option \texttt{SFC.options.include\_from = "pycc"} was used during the code generation. If PyCC is not present, the option \texttt{SFC.options.include\_from = "syfi"} can be used, and this will result in some additional files that define the numbering scheme.
-
-\subsection{Header file for linear Lagrange element in 2D}
-
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/fe_Lagrange_1_2D.h}
-\normalsize
-
-\subsection{Source file for linear Lagrange element in 2D}
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/fe_Lagrange_1_2D.cpp}
-\normalsize
-
-\subsection{Header file for the dofmap}
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/dof_map_Lagrange_1_2D.h}
-\normalsize
-
-\subsection{Source file for the dofmap}
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp}
-\normalsize
-
-\subsection{Header file for the stiffness matrix form}
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h}
-\normalsize
-
-
-\subsection{Source file for the stiffness matrix form}
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp}
-\normalsize
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/installation.tex b/ufc-merge-into-ffc/doc/manual/chapters/installation.tex
deleted file mode 100644
index 8044020..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/installation.tex
+++ /dev/null
@@ -1,47 +0,0 @@
-\chapter{Installation}
-\label{app:installation}
-\index{installation}
-
-The \ufc{} package consists of two parts, the main part being a single
-header file called \texttt{ufc.h}. In addition, a set of Python
-utilities to simplify the generation of \ufc{} code is provided.
-
-Questions, bug reports and patches concerning the installation should
-be directed to the \ufc{} mailing list at the address
-\begin{code}
-fenics at lists.launchpad.net
-\end{code}
-
-\section{Installing \ufc{}}
-
-To install UFC, simply run
-\begin{code}
-scons
-sudo scons install
-\end{code}
-
-This installs the header file ufc.h and a small set of Python
-utilities (templates) for generating UFC code. Files will be installed
-under the default prefix.
-
-The installation prefix may be optionally specified, for example
-
-\begin{code}
-scons install prefix=~/local
-\end{code}
-
-Alternatively, just copy the single header file \texttt{src/ufc/ufc.h}
-into a suitable include directory.
-
-If you do not want to build and install the python extenstion module of \ufc{},
-needed by, e.g., PyDOLFIN, you can write
-
-\begin{code}
-sudo enablePyUFC=No
-sudo cons install
-\end{code}
-
-Help with available options and default arguments can be viewed by
-\begin{code}
-scons -h
-\end{code}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/interface.tex b/ufc-merge-into-ffc/doc/manual/chapters/interface.tex
deleted file mode 100644
index 2c8fed5..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/interface.tex
+++ /dev/null
@@ -1,1210 +0,0 @@
-\chapter{Interface specification}
-\label{sec:interface}
-\index{interface}
-
-\section{A short remark on design}
-\index{design}
-
-UFC is organized as a minimalistic set of abstract C++ classes
-representing low-level abstractions of the finite element method. The
-functions in these classes are mainly of two types: (i) functions
-returning dimensions, which are typically used to allocate an array,
-and (ii) functions that fill an array with values.
-
-It is considered the assembly routine's responsibility
-to allocate and deallocate arrays of proper size.
-Consider for example the function for evaluating the $i$th basis
-function in the class \texttt{ufc::finite\_element}:
-\begin{code}
-virtual void evaluate_basis(unsigned int i,
-                            double* values,
-                            const double* coordinates,
-                            const cell& c) const = 0;
-\end{code}
-This function assumes that the array \texttt{values} has the correct
-size, which may be obtained by calling the functions
-\texttt{value\_rank} and \texttt{value\_dimension} as described in
-detail below.
-
-Thus, the UFC interface is a low-level interface that should be simple
-to integrate into an existing C++ finite element code, but which is
-probably not suitable to be exposed as part of an end-user interface.
-
-The UFC interface is defined by a single header file \texttt{ufc.h}
-which defines the central interface class \texttt{ufc::form} and a
-small set of auxiliary interface classes. In addition, a pair of data
-structures \texttt{ufc::mesh} and \texttt{ufc::cell} are defined and
-used for passing data to the interface functions. All functions
-defined by the UFC interface are \emph{pure virtual}, meaning that all
-these functions must be overloaded in each implementation of the
-classes.  All but two functions (\texttt{init\_mesh} and
-\texttt{init\_cell}) are \texttt{const}, meaning that calling these
-\texttt{const} functions will leave the UFC objects unchanged.  Input
-argument (pointers) are always \texttt{const}, while output arguments
-(pointers) are always non-\texttt{const}.  \index{ufc.h}
-
-The interface is presented below in the same order as it is defined in
-the header file \texttt{ufc.h}. Thus, the interface is presented
-bottom-up, starting with the definition of basic data structures and
-ending with the definition of the main \texttt{ufc::form} interface
-class.
-
-\section{Cell shapes}
-\index{Cell shapes}
-\index{\texttt{interval}}
-\index{\texttt{triangle}}
-\index{\texttt{quadrilateral}}
-\index{\texttt{tetrahedron}}
-\index{\texttt{hexahedron}}
-
-\begin{code}
-enum shape {interval,
-            triangle,
-            quadrilateral,
-            tetrahedron,
-            hexahedron};
-\end{code}
-
-This enumeration includes all cell shapes that are covered by the UFC
-specification, see Chapter~\ref{sec:referencecells}.
-
-\section{The class \texttt{ufc::mesh}}
-\index{\texttt{ufc::mesh}}
-
-The class \texttt{ufc::mesh} defines a data structure containing basic
-information about an unstructured mesh. It is used for passing a
-minimal amount of information about the global mesh to UFC functions.
-
-\subsection{The integer \texttt{topological\_dimension}}
-
-\begin{code}
-unsigned int topological_dimension;
-\end{code}
-
-The unsigned integer \texttt{topological\_dimension} holds the
-topological dimension of the mesh, that is, the topological dimension
-of the cells of the mesh. For the supported cell shapes defined above,
-the topological dimensions are as follows: \texttt{interval} has
-dimension one, \texttt{triangle} and \texttt{quadri\-lateral} have
-dimension two, and \texttt{tetrahedron} and \texttt{hexahedron} have
-dimension three.
-
-\subsection{The integer \texttt{geometric\_dimension}}
-
-\begin{code}
-unsigned int geometric_dimension;
-\end{code}
-
-The unsigned integer \texttt{geometric\_dimension} holds the geometric
-dimension of the mesh, that is, the dimension of the coordinates of
-the mesh vertices.  Often, the geometric dimension is equal to the
-topological dimension, but they may differ. For example, one may have
-a topologically two-dimensional mesh embedded in three-dimensional
-space.
-
-\subsection{The array \texttt{num\_entities}}
-
-\begin{code}
-unsigned int* num_entities;
-\end{code}
-
-The array \texttt{num\_entities} should contain the number of entities
-within each topological dimension of the mesh (see
-Chapter~\ref{sec:referencecells}). The size of the array should be
-equal to the topological dimension of the mesh plus one.
-
-Thus, for a mesh of tetrahedral cells, \texttt{num\_entities[0]}
-should contain the number of vertices, \texttt{num\_entities[1]}
-should contain the number of edges (if they are needed, see
-\texttt{ufc::dofmap::needs\_mesh\_entities} below),
-\texttt{num\_en\-tities[2]} should contain the number of faces, and
-\texttt{num\_entities[3]} should contain the number of volumes.  If
-\texttt{d} is the topological dimension of the mesh,
-\texttt{num\_entities[d]} should contain the number of cells or
-elements.
-
-\section{The class \texttt{ufc::cell}}
-\index{\texttt{ufc::cell}}
-
-The class \texttt{ufc::cell} defines the data structure for a cell in
-a mesh. Its intended use is not as a building block in a mesh data
-structure, but merely as a view of specific data for a single cell.
-It is used to pass cell data to UFC functions with a minimal amount of
-assumptions on how the computational mesh is represented and stored.
-
-\subsection{The enum variable \texttt{cell\_shape}}
-
-\begin{code}
-shape cell_shape;
-\end{code}
-
-The variable \texttt{cell\_shape} should be set to the corresponding
-\texttt{ufc::shape} for the cell.
-
-\subsection{The integer \texttt{topological\_dimension}}
-
-\begin{code}
-unsigned int topological_dimension;
-\end{code}
-
-The integer \texttt{topological\_dimension} should be set to the
-topological dimension of the cell (see \texttt{ufc::mesh} above).
-
-\subsection{The integer \texttt{geometric\_dimension}}
-
-\begin{code}
-unsigned int geometric_dimension;
-\end{code}
-
-The integer \texttt{geometric\_dimension} should be set to the
-geometric dimension of the cell (see \texttt{ufc::mesh} above).
-
-\subsection{The array \texttt{entity\_indices}}
-
-\begin{code}
-unsigned int** entity_indices;
-\end{code}
-
-The array \texttt{entity\_indices} should contain the global indices
-for all entities of the cell (see Chapter~\ref{sec:referencecells}).
-The length of the array \texttt{entity\_indices} should be equal to
-the value of the variable \texttt{topological\_dimension} plus one.
-
-Thus, \texttt{entity\_indices[0]} should be an array containing the
-global indices of all the vertices of the cell,
-\texttt{entity\_indices[1]} should be an array containing the global
-indices of all the edges of the cell, etc. The sizes of these arrays
-are implicitly defined by the cell type.
-
-Note that the entity indices are not always needed for all entities of
-the cell. Which entities are required is specified by the
-\texttt{ufc::dofmap} class (see
-\texttt{ufc::dofmap::needs\_mesh\_entities} below).
-
-\subsection{The array \texttt{coordinates}}
-
-\begin{code}
-double** coordinates;
-\end{code}
-
-The array \texttt{coordinates} should contain the global coordinates
-for all vertices of the cell and thus its length should be equal to
-number of vertices of the cell. The length of the array
-\texttt{coordinates[0]} should be equal to the value of the variable
-\texttt{geometric\_dimension} and it should contain the $x$, $y$,
-\ldots coordinates of the first vertex etc.
-
-\subsection{The integer \texttt{index}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-unsigned int index;
-\end{code}
-
-The integer \texttt{index} should contain the global index for the
-cell. This is a short-cut for
-\texttt{entity\_indices[topological\_dimension][0]}.
-
-\subsection{The integer \texttt{local\_facet}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-int local_facet;
-\end{code}
-
-The integer \texttt{local\_facet} can be used during callbacks to the
-\texttt{evaluate} function of \texttt{ufc::function} to specify the
-local index of a facet (relative to the cell) on which the function is
-being evaluated. In all other cases, this variable should be set to
-\texttt{-1}.
-
-\subsection{The integer \texttt{mesh\_identifier}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-int mesh_identifier;
-\end{code}
-
-The integer \texttt{mesh\_identifier} can be used during callbacks to
-the \texttt{evaluate} function of \texttt{ufc::function} to specify a
-global identifier for the mesh on which the function is being
-evaluated. This allows \texttt{ufc::function} subclasses to handle
-evaluation differently on different meshes. If not used, this variable
-should be set to \texttt{-1}.
-
-\section{The class \texttt{ufc::function}}
-\index{\texttt{ufc::function}}
-
-The class \texttt{ufc::function} is an interface for evaluation of
-general tensor-valued functions on the cells of a mesh.
-
-\subsection{The function \texttt{evaluate}}
-
-\begin{code}
-virtual void evaluate(double* values,
-                      const double* coordinates,
-                      const cell& c) const = 0;
-\end{code}
-
-The only function in this class is \texttt{evaluate}, which evaluates
-all the value components of the function at a given point in a given
-cell of the mesh.
-
-The output of \texttt{evaluate} should be written to the array
-\texttt{values}. For a scalar-valued function, a single value should be
-written to \texttt{values[0]}. For general tensor-valued functions,
-the values should be written in a flattened row-major ordering of the
-tensor values. Thus, for a function $f : K \rightarrow \R^{2x2}$ (where $A =
-f(x)$ is a $2 \times 2$ matrix), the array \texttt{values} should contain
-the values $A_{11}, A_{12}, A_{21}, A_{22}$ in this order.
-
-The input to \texttt{evaluate} are the coordinates of a point in a cell
-and the UFC view of the cell containing that point.
-
-See also the description of
-\texttt{ufc::finite\_element::evaluate\_dof} below.
-
-\section{The class \texttt{ufc::finite\_element}}
-\index{\texttt{ufc::finite\_element}}
-
-The class \texttt{ufc::finite\_element} represents a finite element in
-the classical Ciarlet sense~\cite{Cia78}, or rather a particular
-instance of a finite element for a particular choice of nodal basis
-functions. Thus, a \texttt{ufc::finite\_element} has functions for
-accessing the shape of the finite element, the dimension of the
-polynomial function space, the basis functions of the function space
-(and their derivatives), and the linear functionals defining the
-degrees of freedom. In addition, a \texttt{ufc::finite\_element}
-provides functionality for interpolation.
-
-\subsection{The function \texttt{signature}}
-
-\begin{code}
-virtual const char* signature() const = 0;
-\end{code}
-
-This function returns a signature string that uniquely identifies the
-finite element. This can be used to compare whether or not two given
-\texttt{ufc::fi\-nite\_element} objects are identical.
-
-\subsection{The function \texttt{cell\_shape}}
-
-\begin{code}
-virtual shape cell_shape() const = 0;
-\end{code}
-
-This function returns the shape of the cell the finite element is
-defined on.
-
-\subsection{The function \texttt{topological\_dimension}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual unsigned int topological_dimension() const = 0;
-\end{code}
-
-This function returns the topological dimension of the cell the finite
-element is defined on.
-
-\subsection{The function \texttt{geometric\_dimension}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual unsigned int geometric_dimension() const = 0;
-\end{code}
-
-This function returns the geometric dimension of the cell the finite
-element is defined on.
-
-\subsection{The function \texttt{space\_dimension}}
-
-\begin{code}
-virtual unsigned int space_dimension() const = 0;
-\end{code}
-
-This function returns the dimension of the local finite element space
-($|V_h^K|$), which is equal to the number of basis functions. This
-should also be equal to the value of \texttt{local\_dimension()} for
-the corresponding \texttt{ufc::dofmap} (see below).
-
-\subsection{The function \texttt{value\_rank}}
-
-\begin{code}
-virtual unsigned int value_rank() const = 0;
-\end{code}
-
-A finite element can have general tensor-valued basis functions.  The
-function \texttt{value\_rank} returns the rank of the value space of
-the basis functions. For a scalar element, this function should return
-zero, for vector-valued functions it should return one, for
-matrix-valued functions it should return two, etc. For mixed elements, this
-may not always make sense, for example with a tensor-vector-scalar element.
-Thus the value rank of a mixed element must be 1 if any of the subelements have
-different value ranks.
-
-\subsection{The function \texttt{value\_dimension}}
-
-\begin{code}
-virtual unsigned int
-value_dimension(unsigned int i) const = 0;
-\end{code}
-
-This function returns the dimension of the value space of the finite
-element basis functions for the given axis, where the given axis must
-be a number between zero and the value rank minus one.
-
-Note that the total size (number of values) of the value space is
-obtained as the product of \texttt{value\_dimension(i)} for $0 \le i <$
-\texttt{value\_rank()}. For a mixed element with value rank 1
-Since the value rank of a mixed element must be 1 if any of the subelements
-have different value ranks, \texttt{value\_dimension(0)} is then the total value
-size of all the subelements.
-
-\subsection{The function \texttt{evaluate\_basis}}
-
-\begin{code}
-virtual void evaluate_basis(unsigned int i,
-                            double* values,
-                            const double* coordinates,
-                            const cell& c) const = 0;
-\end{code}
-
-This function evaluates basis function \texttt{i} at the given
-\texttt{coordinates} within the given cell \texttt{c}, and stores the
-values in the array \texttt{values}. The size of the output array
-should be equal to size of the value space (see
-\texttt{value\_dimension} above).
-
-The point defined by \texttt{coordinates} should be located inside the
-cell~\texttt{c}. If the point is located outside the cell, then the
-result is undefined.~\footnote{In particular, the basis functions
-  generated by FIAT/FFC are undefined along the line $y = 1$ on the
-  UFC reference element since the collapsed cube mapping used by FIAT
-  is singular along that line.}
-
-\subsection{The function \texttt{evaluate\_basis\_all}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual void evaluate_basis_all(double* values,
-                                const double* coordinates,
-                                const cell& c) const = 0;
-\end{code}
-
-As \texttt{evaluate\_basis}, but evaluates all basis functions at
-once, which can be implemented much more effectively than multiple
-calls to \texttt{evaluate\_basis}. The size of the output array
-should be equal to size of the value space times the number of
-basis functions. The computed values for a single basis function
-are placed contiguously in the array.
-
-Note that just as for \texttt{evaluate\_basis}, the point defined by
-\texttt{coordinates} should be located inside the cell~\texttt{c}. The
-result is otherwise undefined.
-
-\subsection{The function \texttt{evaluate\_basis\_derivatives}}
-
-\begin{code}
-virtual void
-evaluate_basis_derivatives(unsigned int i,
-                           unsigned int n,
-                           double* values,
-                           const double* coordinates,
-                           const cell& c) const = 0;
-\end{code}
-
-This function evaluates all order \texttt{n} derivatives of basis
-function \texttt{i} at the given \texttt{coordinates} within the given
-\texttt{cell}, and stores the values in the array \texttt{values}.
-Derivatives may be obtained up to the polynomial degree of the finite
-element function space with higher degree derivatives evaluating to
-zero.
-
-The number of derivatives is given by $d^n$ where $d$ is the geometric
-dimension of the cell. For $n = 1$, $d = 3$, the order of the
-derivatives is naturally $\partial/\partial x$, $\partial/\partial y$,
-$\partial/\partial z$. For $n = 2$, $d = 3$, the order of the
-derivatives is $\frac{\partial^2}{\partial x\partial x},
-\frac{\partial^2}{\partial x\partial y}, \ldots,
-\frac{\partial^2}{\partial z\partial z}$. Thus, the derivatives are
-stored in a flattened row-major ordering based on the derivative
-spatial dimensions.
-
-For tensor-valued basis functions, the ordering of derivatives is
-row-major based on the value space dimensions followed by the
-derivative spatial dimensions.
-
-Note that just as for \texttt{evaluate\_basis}, the point defined by
-\texttt{coordinates} should be located inside the cell~\texttt{c}. The
-result is otherwise undefined.
-
-\subsection{The function \texttt{evaluate\_basis\_derivatives\_all}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual void
-evaluate_basis_derivatives_all(unsigned int n,
-                               double* values,
-                               const double* coordinates,
-                               const cell& c) const = 0;
-\end{code}
-
-As \texttt{evaluate\_basis\_derivatives}, but evaluated for all
-basis functions at once, which can be implemented much more
-effectively than multiple calls to \texttt{evaluate\_basis\_derivatives}.
-The size of the output array should be equal to the corresponding size
-defined for \texttt{evaluate\_basis\_derivatives} times the number of
-basis functions. The computed values for a single basis function
-are placed contiguously in the array.
-
-Note that just as for \texttt{evaluate\_basis}, the point defined by
-\texttt{coordinates} should be located inside the cell~\texttt{c}. The
-result is otherwise undefined.
-
-\subsection{The function \texttt{evaluate\_dof}}
-
-\begin{code}
-virtual double evaluate_dof(unsigned int i,
-                            const function& f,
-                            const cell& c) const = 0;
-\end{code}
-
-This function evaluates and returns the value of the degree of freedom
-\texttt{i} (which is a linear functional) on the given function
-\texttt{f}.
-
-For example, the degrees of freedom for Lagrange finite elements are
-given by evaluation of \texttt{f} at a set of points. Other examples
-of degrees of freedom include weighted integrals over facets or normal
-components on facets.
-
-\subsection{The function \texttt{evaluate\_dofs}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual void evaluate_dofs(double* values,
-                           const function& f,
-                           const cell& c) const = 0;
-\end{code}
-
-Vectorized version of \texttt{evaluate\_dof} for efficiency,
-returning the values of all degrees of freedom in the array \texttt{values}.
-
-\subsection{The function \texttt{interpolate\_vertex\_values}}
-
-\begin{code}
-virtual void
-interpolate_vertex_values(double* vertex_values,
-                          const double* dof_values,
-                          const cell& c) const = 0;
-\end{code}
-
-This function takes as input the array \texttt{dof\_values} containing
-the expansion coefficients for some function in the nodal basis and
-computes the values of that function at the vertices of the given
-cell, storing those values in the array \texttt{vertex\_values}. If
-the function is tensor-valued, the values are stored in the array
-\texttt{vertex\_values} row-major on the list of vertices followed by the
-row-major ordering of the tensor values as described above.
-
-\subsection{The function \texttt{map\_from\_reference\_cell}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual void map_from_reference_cell(double* x,
-                                     const double* xhat,
-                                     const cell& c) = 0;
-\end{code}
-
-This function maps a given coordinate \texttt{xhat} on the reference
-cell to a coordinate \texttt{x} on a given \texttt{ufc::cell}.
-
-\subsection{The function \texttt{map\_to\_reference\_cell}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual void map_to_reference_cell(double* xhat,
-                                   const double* x,
-                                   const cell& c) = 0;
-\end{code}
-
-This function maps a given coordinate \texttt{x} on a given
-\texttt{ufc::cell} to a coordinate \texttt{xhat} on the reference
-cell.
-
-\subsection{The function \texttt{num\_sub\_elements}}
-
-\begin{code}
-virtual unsigned int num_sub_elements() const = 0;
-\end{code}
-
-This function returns the number of subelements for a nested (mixed)
-element. For simple elements (non-nested), this function should return
-one.
-
-A nested element is an element that is defined from a set of elements
-by taking the direct sum (tensor product) of the polynomial spaces of
-those elements. For example, the basis functions $\psi_1, \psi_2,
-\ldots, \psi_m$ of a vector-valued Lagrange element may be constructed
-from a scalar Lagrange element by repeating the basis functions
-$\phi_1, \phi_2, \ldots, \phi_n$ of the scalar element and padding
-with zeros: $\psi_1 = (\phi_1, 0), \psi_2 = (\phi_2, 0), \ldots,
-\psi_n = (\phi_n, 0), \psi_{n+1} = (0, \phi_1), \psi_{n+2} = (0,
-\phi_2), \ldots$.
-
-Finite elements may be nested at arbitrary depth. For example, a mixed
-Taylor--Hood element may be created by combining a vector-valued
-quadratic Lagrange finite element with a scalar linear Lagrange finite
-element, and the vector-valued element may in turn be created by
-combining a set of scalar quadratic Lagrange elements.
-
-\subsection{The function \texttt{create\_sub\_element}}
-
-\begin{code}
-virtual finite_element*
-create_sub_element(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs a \texttt{ufc::finite\_element}
-object for subelement \texttt{i}. The argument \texttt{i} must be an
-integer between zero and the number of subelements
-(\texttt{num\_sub\_elements}) minus one. If the element is simple
-(non-nested), this function returns a zero pointer. The caller is
-responsible for deleting the returned object.
-
-Note that in earlier versions of UFC, this function returned a copy of
-the element itself in the case of a simple element. To create a copy
-of the element, use the \texttt{create} function instead.
-
-\subsection{The function \texttt{create}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual finite_element* create() const = 0;
-\end{code}
-
-This factory function creates a new instance of the generated
-\texttt{ufc::finite\_\-element} subclass.
-
-\section{The class \texttt{ufc::dofmap}}
-\index{\texttt{ufc::dofmap}}
-
-This class represents the local-to-global mapping of degrees of
-freedom (dofs), or rather one particular instance of such a mapping
-(there are many possible local-to-global mappings) as defined in
-Equation \eqref{eq:iota_K}. The most central function of this class is
-\texttt{tabulate\_dofs}, which tabulates the local-to-global mapping
-from the degree of freedom indices on a local cell to a global vector of
-degree of freedom indices.
-
-\subsection{The function \texttt{signature}}
-
-\begin{code}
-virtual const char* signature() const = 0;
-\end{code}
-
-This function returns a signature string that uniquely identifies the
-dofmap. This can be used to compare whether or not two given
-\texttt{ufc::dofmap} objects are identical. (This may be used to
-optimize the assembly of forms by caching previously computed
-dofmaps.)
-
-\subsection{The function \texttt{needs\_mesh\_entities}}
-
-\begin{code}
-virtual bool needs_mesh_entities(unsigned int d) const = 0;
-\end{code}
-
-This function returns true if the \texttt{ufc::dofmap} requires mesh
-entities of topological dimension \texttt{d} to be available in
-\texttt{ufc::cell} arguments. Thus, if this function returns false for
-a given value of \texttt{d}, then the array
-\texttt{entity\_indices[d]} of the \texttt{ufc::cell} data structure
-will not be used during calls to \texttt{ufc::dofmap} functions. In
-those cases, the array \texttt{entity\_indices[d]} may be set to zero.
-
-This may be used to check which entities must be generated to tabulate
-the local-to-global mapping. For example, linear Lagrange elements
-will only need to know the vertices of each cell in the mesh, while
-quadratic Lagrange elements will also need to know the edges of each
-cell in the mesh.
-
-\subsection{The function \texttt{init\_mesh}}
-
-\begin{code}
-virtual bool init_mesh(const mesh& mesh) = 0;
-\end{code}
-
-This function initializes the dofmap for a given mesh. If it returns
-true, calls to \texttt{init\_cell} and \texttt{init\_cell\_finalize}
-are required to complete the initialization. The function
-\texttt{global\_dimension} be may only be called when the
-initialization is complete.
-
-\subsection{The function \texttt{init\_cell}}
-
-\begin{code}
-virtual void init_cell(const mesh& m,
-                       const cell& c) = 0;
-\end{code}
-
-For \texttt{ufc::dofmap} objects where \texttt{init\_mesh} returns true,
-this function must be called for each cell in the mesh to initialize
-the dofmap.
-
-\subsection{The function \texttt{init\_cell\_finalize}}
-
-\begin{code}
-virtual void init_cell_finalize() = 0;
-\end{code}
-
-For \texttt{ufc::dofmap} objects where \texttt{init\_mesh} returns
-true, this function must be called after \texttt{init\_cell} is called
-for each cell in the mesh to complete initialization of the dofmap.
-
-\subsection{The function \texttt{topological\_dimension}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual unsigned int topological_dimension() const = 0;
-\end{code}
-
-This function returns the topological dimension of the associated cell
-shape.
-
-\subsection{The function \texttt{geometric\_dimension}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual unsigned int geometric_dimension() const = 0;
-\end{code}
-
-This function returns the geometric dimension of the associated cell
-shape.
-
-\subsection{The function \texttt{global\_dimension}}
-
-\begin{code}
-virtual unsigned int global_dimension() const = 0;
-\end{code}
-
-This function returns the dimension of the global finite element space
-on the mesh that the \texttt{ufc::dofmap} has been initialized
-for. The result of calling this function before the initialization is
-complete is undefined.
-
-\subsection{The function \texttt{local\_dimension}}
-Changed in version 1.2.
-
-\begin{code}
-virtual unsigned int local_dimension(const cell& c) const = 0;
-\end{code}
-
-This function returns the dimension of the local finite element space
-on a given cell.
-
-\subsection{The function \texttt{max\_local\_dimension}}
-Introduced in UFC version 1.2.
-
-\begin{code}
-virtual unsigned int max_local_dimension() const = 0;
-\end{code}
-
-This function returns the maximum dimension of the local finite element space
-on a single cell.
-
-\subsection{The function \texttt{num\_facet\_dofs}}
-
-\begin{code}
-virtual unsigned int num_facet_dofs() const = 0;
-\end{code}
-
-This function returns the number of dofs associated with a single facet
-of a cell, including all dofs associated with mesh entities of lower
-dimension incident with the facet. For example on a tetrahedron this
-will include dofs associated with edges and vertices of the triangle face.
-This is also the number of dofs that should be set if a Dirichlet
-boundary condition is applied to a single facet.
-
-\subsection{The function \texttt{num\_entity\_dofs}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual unsigned int num_entity_dofs(unsigned int d) const = 0;
-\end{code}
-
-This function returns the number of dofs associated with a single
-mesh entity of dimension \texttt{d} in a cell, not including
-dofs associated with incident entities of lower dimension
-(unlike \texttt{num\_facet\_dofs()}).
-It is assumed that all cells of the mesh have the same number
-of degrees of freedom on each mesh entity of the same dimension.
-
-\subsection{The function \texttt{tabulate\_dofs}}
-
-\begin{code}
-virtual void tabulate_dofs(unsigned int* dofs,
-                           const mesh& m,
-                           const cell& c) const = 0;
-\end{code}
-
-This function tabulates the global dof indices corresponding to each
-dof on the given cell. The size of the output array \texttt{dofs}
-should be equal to the value returned by \texttt{local\_dimension()}.
-
-\subsection{The function \texttt{tabulate\_facet\_dofs}}
-
-\begin{code}
-virtual void
-tabulate_facet_dofs(unsigned int* dofs,
-                    unsigned int facet) const = 0;
-\end{code}
-
-This function tabulates the local dof indices associated with a given local
-facet, including all dofs associated with mesh entities of lower dimension
-incident with the facet. The size of the output array \texttt{dofs} should
-equal the value returned by \texttt{num\_facet\_dofs}.
-
-\subsection{The function \texttt{tabulate\_entity\_dofs}}
-Introduced in UFC version 1.1.
-
-\begin{code}
-virtual void tabulate_entity_dofs(unsigned int* dofs,
-                                  unsigned int d,
-                                  unsigned int i) const = 0;
-\end{code}
-
-This function tabulates the local dof indices associated with a given local
-mesh entity \texttt{i} of dimension \texttt{d}, i.e. mesh entity (d, i),
-not including dofs associated with incident entities of lower dimension
-(unlike \texttt{tabulate\_facet\_dofs}).
-The size of the output array \texttt{dofs} should equal
-the value returned by the function \texttt{num\_entity\_dofs(d)}.
-
-As an example, calling \texttt{tabulate\_entity\_dofs} for a face (d = 2)
-should yield only the dofs associated with the face that are not associated
-with vertices and edges. Thus \texttt{tabulate\_entity\_dofs} can
-be used to build connectivity information.
-
-
-\subsection{The function \texttt{tabulate\_coordinates}}
-
-\begin{code}
-virtual void tabulate_coordinates(double** coordinates,
-                                  const cell& c) const = 0;
-\end{code}
-
-This function tabulates the coordinates for each dof on the given
-cell. For Lagrange elements, this function will tabulate a set of
-points on the given cell such that the dofs of the finite element are
-given by evaluation at those points.
-
-For elements that do not have a direct relationship between
-coordinates and dofs, an attempt should be made at a sensible
-implementation of this function. For example, if a dof is defined as
-the integral over a facet, the midpoint of the facet can be used. If
-no other choice makes sense, the midpoint of the cell can be used as a
-last resort. This function must thus be used with care if
-non-Lagrangian elements are used.
-
-The size of the output array \texttt{coordinates} should be equal to
-the value returned by \texttt{local\_dimension()} and the size of each
-subarray \texttt{coordi\-nates[0]}, \texttt{coordinates[1]} etc should
-be equal to the geometric dimension of the mesh, which can be obtained
-with the function \texttt{dofmap::geometric\_\-dimension()}.
-
-\subsection{The function \texttt{num\_sub\_dofmaps}}
-
-\begin{code}
-virtual unsigned int num_sub_dofmaps() const = 0;
-\end{code}
-
-This function returns the number of sub-dofmaps for a nested (mixed)
-element. For a discussion on the subelement concept, see the
-documentation of the function
-\texttt{ufc::finite\_element::num\_sub\_elements}. For simple
-elements (non-nested), this function should return one.
-
-\subsection{The function \texttt{create\_sub\_dofmap}}
-
-\begin{code}
-virtual dofmap* create_sub_dofmap(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs a \texttt{ufc::dofmap} object for
-subelement \texttt{i}. The argument \texttt{i} must be a number
-between zero and the number of sub-dofmaps
-(\texttt{num\_sub\_dofmaps}) minus one. If the dofmap is simple
-(non-nested), this function returns a zero pointer. The caller is
-responsible for deleting the returned object.
-
-Note that in earlier versions of UFC, this function returned a copy of
-the dofmap itself in the case of a simple element. To create a copy
-of the element, use the \texttt{create} function instead.
-
-\subsection{The function \texttt{create}}
-Introduced in UFC version 2.0.
-
-\begin{code}
-virtual dofmap* create() const = 0;
-\end{code}
-
-This factory function creates a new instance of the generated
-\texttt{ufc::dofmap} subclass.
-
-\section{The integral classes}
-
-As described in Section~\ref{sec:assembly}, and in particular Equation
-\eqref{eq:form_integrals}, the global sparse tensor
-(the ``stiffness matrix'') representing a given form (differential
-operator) may be assembled by summing the contributions from the local
-cells, exterior facets and interior facets of the mesh.
-
-These contributions are represented in the UFC interface by the
-classes \texttt{cell\_integral}, \texttt{exterior\_facet\_integral}
-and \texttt{interior\_facet\_integral}. Each of these three integral
-classes has a single function \texttt{tabulate\_tensor} which may be
-called to compute the corresponding local contribution (cell tensor,
-exterior facet tensor or interior facet tensor).
-
-\section{The class \texttt{ufc::cell\_integral}}
-\index{\texttt{ufc::cell\_integral}}
-
-The class \texttt{ufc::cell\_integral} represents the integral of a
-form over a local cell in a finite element mesh. It has a single
-function \texttt{tabulate\_tensor} which may be called to tabulate the
-values of the cell tensor for a given cell.
-
-\subsection{The function \texttt{tabulate\_tensor}}
-\index{\texttt{tabulate\_tensor}}
-
-\begin{code}
-virtual void tabulate_tensor(double* A,
-                             const double * const * w,
-                             const cell& c) const = 0;
-\end{code}
-
-This function tabulates the values of the cell tensor for a form into
-the given array \texttt{A}. The size of this array should be equal to
-the product of the local space dimensions for the set of finite
-element function spaces corresponding to the arguments of the
-form. For example, when computing the matrix for a bilinear form
-defined on piecewise linear scalar elements on triangles, the space dimension
-of the local finite element is three and so the size of the array
-\texttt{A} should be $3 \times 3 = 9$.
-
-The array \texttt{w} should contain the expansion coefficients for all
-\emph{coefficients} of the form in the finite element nodal basis for
-each corresponding function space. Thus, the size of the array
-\texttt{w} should be equal to the number of coefficients~$n$, and the
-size of each each array \texttt{w[0]}, \texttt{w[1]} etc should be
-equal to the space dimension of the corresponding local finite element
-space.
-
-\subsection{The function \texttt{tabulate\_tensor} (quadrature version)}
-\index{\texttt{tabulate\_tensor} (quadrature version)}
-Introduced in UFC version version 2.0
-
-\begin{code}
-virtual void
-tabulate_tensor(double* A,
-                const double * const * w,
-                const cell& c,
-                unsigned int num_quadrature_points,
-                const double * const * quadrature_points,
-                const double* quadrature_weights) const = 0;
-\end{code}
-
-This function is identical to \texttt{tabulate\_tensor} described
-above but computes the integral over the cell using the given set of
-quadrature points and weights. Note that the quadrature points should
-be specified on the reference cell. By a careful choice of quadrature
-points, this function can be used to integrate over subsets of cells.
-
-\section{The class \texttt{ufc::exterior\_facet\_integral}}
-\index{\texttt{ufc::exterior\_facet\_integral}}
-
-The class \texttt{ufc::exterior\_facet\_integral} represents the
-integral of a form over a local exterior facet (boundary facet) in a
-finite element mesh. It has a single function
-\texttt{tabulate\_tensor} which may be called to tabulate the values
-of the exterior facet tensor for a given facet.
-
-\subsection{The function \texttt{tabulate\_tensor}}
-\index{\texttt{tabulate\_tensor}}
-
-\begin{code}
-virtual void tabulate_tensor(double* A,
-                             const double * const * w,
-                             const cell& c,
-                             unsigned int facet) const = 0;
-\end{code}
-
-The arrays \texttt{A} and \texttt{w} have the same function and should
-have the same sizes as described in the documentation for
-\texttt{cell\_integral::tabulate\_tensor}. Thus, the values of the
-exterior facet integral will be tabulated into the array \texttt{A}
-and the nodal basis expansions of all coefficients should be provided
-in the array \texttt{w}.
-
-The additional argument \texttt{facet} should specify the local number
-of the facet with respect to its (single) incident cell. Thus, when
-the facet is an edge of a triangle, the argument \texttt{facet} should
-be an integer between zero and two (0, 1, 2) and when the facet is a
-facet of a tetrahedron, the argument \texttt{facet} should be an
-integer between zero and three (0, 1, 2, 3).
-
-\subsection{The function \texttt{tabulate\_tensor} (quadrature version)}
-\index{\texttt{tabulate\_tensor} (quadrature version)}
-Introduced in version version 2.0
-
-\begin{code}
-virtual void
-tabulate_tensor(double* A,
-                const double * const * w,
-                const cell& c,
-                unsigned int num_quadrature_points,
-                const double * const * quadrature_points,
-                const double* quadrature_weights) const = 0;
-\end{code}
-
-This function is identical to \texttt{tabulate\_tensor} described
-above but computes the integral over the cell using the given set of
-quadrature points and weights. Note that the quadrature points should
-be specified on the reference cell. By a careful choice of quadrature
-points, this function can be used to integrate over subsets of facets.
-
-\section{The class \texttt{ufc::interior\_facet\_integral}}
-\index{\texttt{ufc::interior\_facet\_integral}}
-
-The class \texttt{ufc::interior\_facet\_integral} represents the
-integral of a form over a local interior facet in a finite element
-mesh. It has a single function \texttt{tabulate\_tensor} which may be
-called to tabulate the values of the interior facet tensor for a given
-facet.
-
-\subsection{The function \texttt{tabulate\_tensor}}
-
-\begin{code}
-virtual void tabulate_tensor(double* A,
-                             const double * const * w,
-                             const cell& c0,
-                             const cell& c1,
-                             unsigned int facet0,
-                             unsigned int facet1) const = 0;
-\end{code}
-
-Just as for the \texttt{cell\_integral} and
-\texttt{exterior\_facet\_integral} classes, the
-\texttt{tabulate\_tensor} function for the class
-\texttt{interior\_facet\_integral} tabulates the values of the local
-(interior facet) tensor into the array \texttt{A}, given the nodal
-basis expansions of the form coefficients in the array \texttt{w}.
-However, the interior facet tensor contains contributions from the two
-incident cells of an interior facet and thus the dimensions of these
-arrays are different.
-
-On each interior facet, the two incident (neighboring) cells form a
-``macro cell'' consisting of the total set of local basis functions on
-the two cells. The set of basis functions on the macro element is
-obtained by extending the basis functions on each of the two cells by
-zero to the macro cell. Thus, the space dimension of the finite
-element function space on the macro element is twice the size of the
-finite element function space on a single cell. The ordering of basis
-functions and degrees of freedom on the macro cell is obtained by
-first enumerating the basis functions and degrees of freedom on one of
-the two cells and then the basis functions and degrees of freedom on
-the second cell.
-
-Thus the size of the array \texttt{A} should be equal to the product
-of twice the local space dimensions for the set of finite element
-function spaces corresponding to the arguments of the form. For
-example, when computing the matrix for a bilinear form defined on
-piecewise linear elements on triangles, the space dimension of the
-local finite element is three and so the size of the array \texttt{A}
-should be $6 \times 6 = 36$.
-
-Similarly, the array \texttt{w} should contain the expansion
-coefficients for all \emph{coefficients} of the form in the finite
-element nodal basis for each corresponding function space on the macro
-cell. Thus, the size of the array \texttt{w} should be equal to the
-number of coefficients~$n$ and the size of each each array
-\texttt{w[0]}, \texttt{w[1]} etc should be equal to twice the space
-dimension of the corresponding local finite element space.
-
-The additional arguments \texttt{facet0} and \texttt{facet1} should
-specify the local number of the facet with respect to its two incident
-cells. Thus, when the facet is an edge of a triangle, each of these
-arguments may be an integer between zero and two (0, 1, 2) and when
-the facet is a face of a tetrahedron, each of these arguments may be
-an integer between zero and three (0, 1, 2, 3).
-
-\subsection{The function \texttt{tabulate\_tensor} (quadrature version)}
-\index{\texttt{tabulate\_tensor} (quadrature version)}
-Introduced in version version 2.0
-
-\begin{code}
-virtual void
-tabulate_tensor(double* A,
-                const double * const * w,
-                const cell& c,
-                unsigned int num_quadrature_points,
-                const double * const * quadrature_points,
-                const double* quadrature_weights) const = 0;
-\end{code}
-
-This function is identical to \texttt{tabulate\_tensor} described
-above but computes the integral over the cell using the given set of
-quadrature points and weights. Note that the quadrature points should
-be specified on the reference cell. By a careful choice of quadrature
-points, this function can be used to integrate over subsets of facets.
-
-\section{The class \texttt{ufc::form}}
-\index{\texttt{ufc::form}}
-
-The \texttt{ufc::form} class is the central part of the UFC interface
-and it represents a form
-\begin{equation}
-  a = a(v_1, \ldots, v_r; w_1, \ldots, w_n),
-\end{equation}
-defined on the product space $V_h^1 \times V_h^2 \times \cdots \times
-V_h^r \times W_h^1 \times W_h^2 \times \cdots \times W_h^n$ of two
-sets $\{V_h^j\}_{j=1}^r, \{W_h^j\}_{j=1}^n$ of finite element function
-spaces on a triangulation $\mathcal{T}$ of a domain $\Omega \subset
-\R^d$.
-
-A \texttt{ufc::form} provides functions for accessing the rank~$r$ and
-number of coefficients~$n$ for a form, and factory functions for
-creating UFC objects for the corresponding cell integrals, exterior
-facet integrals, interior facet integrals, and all associated finite
-elements and dofmaps (local-to-global mappings).
-
-\subsection{The function \texttt{signature}}
-
-\begin{code}
-virtual const char* signature() const = 0;
-\end{code}
-
-This function returns a signature string that uniquely identifies the
-form. This can be used to compare whether or not two given
-\texttt{ufc::form} objects are identical.
-
-\subsection{The function \texttt{rank}}
-
-\begin{code}
-virtual unsigned int rank() const = 0;
-\end{code}
-
-This function returns the rank~$r$ of the global tensor generated by
-the form (the arity of the form).
-
-\subsection{The function \texttt{num\_coefficients}}
-
-\begin{code}
-virtual unsigned int num_coefficients() const = 0;
-\end{code}
-
-This function returns the number of coefficients~$n$ for the form.
-Note that all integral terms of a form must have the same
-coefficients, even if not all coefficients are present in each term of
-the form.
-
-\subsection{The function \texttt{num\_cell\_domains}}
-
-\begin{code}
-virtual unsigned int num_cell_domains() const = 0;
-\end{code}
-
-This function returns the number of different cell domains for the
-form. A form may have an arbitrary number of integrals over disjoint
-subdomains of the mesh.
-
-\subsection{The function \texttt{num\_exterior\_facet\_domains}}
-
-\begin{code}
-virtual unsigned int num_exterior_facet_domains() const = 0;
-\end{code}
-
-This function returns the number of different exterior facet domains
-for the form. A form may have an arbitrary number of integrals over
-disjoint subdomains of the mesh boundary.
-
-\subsection{The function \texttt{num\_interior\_facet\_domains}}
-
-\begin{code}
-virtual unsigned int num_interior_facet_domains() const = 0;
-\end{code}
-
-This function returns the number of different interior facet domains
-for the form. A form may have an arbitrary number of integrals over
-disjoint subsets of the interior facets of the mesh.
-
-\subsection{The function \texttt{create\_finite\_element}}
-
-\begin{code}
-virtual finite_element*
-create_finite_element(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs a \texttt{ufc::finite\_element}
-object for form argument \texttt{i}. A form with rank~$r$ and number
-of coefficients~$n$ has $r + n$ arguments, so this function returns
-the finite element object for tensor axis $i$ if $i < r$, or the
-finite element for coefficient $i - r$ if $i \geq r$.  The caller is
-responsible for deleting the returned object.
-
-\subsection{The function \texttt{create\_dofmap}}
-
-\begin{code}
-virtual dofmap*
-create_dofmap(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs a \texttt{ufc::dofmap} object for
-form argument \texttt{i}. A form with rank~$r$ and number of
-coefficients~$n$ has $r + n$ arguments, so this function returns the
-dofmap object for tensor axis $i$ if $i < r$, or the dofmap for
-coefficient $i - r$ if $i \geq r$.  The caller is responsible for
-deleting the returned object.
-
-\subsection{The function \texttt{create\_cell\_integral}}
-
-\begin{code}
-virtual cell_integral*
-create_cell_integral(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs a \texttt{cell\_integral} object for
-cell domain \texttt{i}. The caller is responsible for deleting the
-returned object.
-
-If the integral evaluates to zero, this function may return a null
-pointer.
-
-\subsection{The function \texttt{create\_exterior\_facet\_integral}}
-
-\begin{code}
-virtual exterior_facet_integral*
-create_exterior_facet_integral(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs an \texttt{exterior\_facet\_integral}
-object for exterior facet domain \texttt{i}. The caller is responsible
-for deleting the returned object.
-
-If the integral evaluates to zero, this function may return a null
-pointer.
-
-\subsection{The function \texttt{create\_interior\_facet\_integral}}
-
-\begin{code}
-virtual interior_facet_integral*
-create_interior_facet_integral(unsigned int i) const = 0;
-\end{code}
-
-This factory function constructs an \texttt{interior\_facet\_integral}
-object for interior facet domain \texttt{i}. The caller is responsible
-for deleting the returned object.
-
-If the integral evaluates to zero, this function may return a null
-pointer.
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/interface_cpp.tex b/ufc-merge-into-ffc/doc/manual/chapters/interface_cpp.tex
deleted file mode 100644
index d55cdeb..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/interface_cpp.tex
+++ /dev/null
@@ -1,8 +0,0 @@
-\chapter{C++ Interface}
-
-Below follows a verbatim copy of the complete UFC interface which is
-specified in the header file~\texttt{ufc.h}.
-
-\scriptsize
-\VerbatimInput[frame=single,rulecolor=\color{blue}]{../../src/ufc/ufc.h}
-\normalsize
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/introduction.tex b/ufc-merge-into-ffc/doc/manual/chapters/introduction.tex
deleted file mode 100644
index 282824c..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/introduction.tex
+++ /dev/null
@@ -1,80 +0,0 @@
-\chapter{Introduction}
-\label{sec:introduction}
-
-Large parts of a finite element program are similar from problem to
-problem and can therefore be coded as a general, reusable library.
-Mesh data structures, linear algebra and finite element assembly are
-examples of operations that are naturally coded in a
-problem-independent way and made available in reusable
-libraries~\cite{www:fenics,www:petsc,www:sundance,www:deal.II,www:trilinos,www:diffpack}.
-However, some parts of a finite element program are difficult to code
-in a problem-independent way. In particular, this includes the
-evaluation of the \emph{element tensor} (the `element stiffness
-matrix'), that is, the evaluation of the local contribution from a
-finite element to a global sparse tensor (the ``stiffness matrix'')
-representing a discretized differential operator. These parts must
-thus be implemented by the application programmer for each specific
-combination of differential equation and discretization (finite
-element spaces).
-
-\index{form compilers} \index{FFC} \index{SyFi} However,
-domain-specific compilers such as
-FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11}
-and SyFi~\cite{www:syfi} make it possible to automatically generate
-the code for the evaluation of the element tensor. These \emph{form
-compilers} accept as input a high-level description of a finite
-element variational form and generate low-level code code for
-efficient evaluation of the element tensor and associated quantities.
-It thus becomes important to specify the \emph{interface} between form
-compilers and finite element assemblers such that the code generated
-by FFC, SyFi and other form compilers can be used to assemble finite
-element matrices and vectors (and in general tensors).
-
-\section{Unified Form-assembly Code}
-
-UFC (Unified Form-assembly Code) is a unified framework for finite
-element assembly. More precisely, it defines a fixed interface for
-communicating low level routines (functions) for evaluating and
-assembling finite element variational forms. The UFC interface
-consists of a single header file \texttt{ufc.h} that specifies a C++
-interface that must be implemented by code that complies with the UFC
-specification.
-
-Both FFC (since version 0.4.0) and SyFi (since version 0.3.4)
-generate code that complies with the UFC specification. Thus, code
-generated by FFC and SyFi may be used interchangeably by any UFC-based
-finite element assembler, such as DOLFIN~\cite{www:dolfin}.
-
-\section{Aim and scope}
-
-The UFC interface has been designed to make a minimal amount of
-assumptions on the form compilers generating the UFC code and the
-assemblers built on top of the UFC specification. Thus, the UFC
-specification provides a minimal amount of abstractions and data
-structures. Programmers wishing to implement the UFC specification
-will typically want to create system-specific (but simple) wrappers
-for the generated code.
-
-Few assumptions have also been made on the underlying finite element
-methodology. The current specification is limited to affinely mapped
-cells, but does not restrict the mapping of finite element function
-spaces. Thus, UFC code may be generated for elements where basis
-functions are transformed from the reference cell by the affine
-mapping, as well as for elements where the basis functions must be
-transformed by the Piola mapping. UFC code has been successfully
-generated and used in finite element codes for standard continuous
-Galerkin methods (Lagrange finite elements of arbitrary order),
-discontinuous Galerkin methods (including integrals of jumps and
-averages over interior facets) and mixed methods (including
-Brezzi--Douglas--Marini and Raviart--Thomas elements).
-
-\section{Outline}
-
-In the next section, we give an overview of finite element assembly
-and explain how the code generated by form compilers may be used as
-the basic building blocks in the assembly algorithm. We then present
-the UFC interface in detail in Section~\ref{sec:interface}. In
-Section~\ref{sec:referencecells} and Section~\ref{sec:numbering}, we
-define the reference cells and numbering conventions that must be
-followed by UFC-based form compilers and assemblers.
-
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/license.tex b/ufc-merge-into-ffc/doc/manual/chapters/license.tex
deleted file mode 100644
index 30d7c2f..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/license.tex
+++ /dev/null
@@ -1,5 +0,0 @@
-\chapter{License}
-\index{license}
-
-The UFC specification, and in particular the header file
-\texttt{ufc.h}, is released into the public domain.
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/numbering.tex b/ufc-merge-into-ffc/doc/manual/chapters/numbering.tex
deleted file mode 100644
index ec31f7a..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/numbering.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-\chapter{Numbering of mesh entities}
-\label{sec:numbering}
-
-\input{chapters/numbering_common}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/numbering_common.tex b/ufc-merge-into-ffc/doc/manual/chapters/numbering_common.tex
deleted file mode 100644
index 928d760..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/numbering_common.tex
+++ /dev/null
@@ -1,412 +0,0 @@
-\index{numbering}
-
-The UFC specification dictates a certain numbering of the vertices,
-edges etc. of the cells of a finite element mesh. First, an \emph{ad
-hoc} numbering is picked for the vertices of each cell. Then, the
-remaining entities are ordered based on a simple rule, as described in
-detail below.
-
-\section{Basic concepts}
-\index{mesh entity}
-\index{topological dimension}
-
-The topological entities of a cell (or mesh) are referred to as
-\emph{mesh entities}. A mesh entity can be identified by a pair
-$(d, i)$, where $d$ is the topological dimension of the mesh entity and $i$
-is a unique index of the mesh entity. Mesh entities are numbered
-within each topological dimension from $0$ to $n_d-1$, where $n_d$ is
-the number of mesh entities of topological dimension $d$.
-
-For convenience, mesh entities of topological dimension $0$ are
-referred to as \emph{vertices}, entities of dimension $1$
-as \emph{edges}, entities of dimension $2$ as \emph{faces}, entities of
-\emph{codimension} $1$ as \emph{facets} and entities of codimension
-$0$ as \emph{cells}. These concepts are summarized in
-Table~\ref{tab:entities}.
-
-Thus, the vertices of a tetrahedron are identified as
-$v_0 = (0, 0)$, $v_1 = (0, 1)$ and $v_2 = (0, 2)$,
-the edges are
-$e_0 = (1, 0)$, $e_1 = (1, 1)$, $e_2 = (1, 2)$,
-$e_3 = (1, 3)$, $e_4 = (1, 4)$ and $e_5 = (1, 5)$,
-the faces (facets) are
-$f_0 = (2, 0)$, $f_1 = (2, 1)$, $f_2 = (2, 2)$ and $f_3 = (2, 3)$,
-and the cell itself is
-$c_0 = (3, 0)$.
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|l|c|c|}
-      \hline
-      Entity & Dimension & Codimension \\
-      \hline
-      Vertex & $0$       & -- \\
-      Edge   & $1$       & -- \\
-      Face   & $2$       & -- \\
-      & & \\
-      Facet  & --      &  $1$ \\
-      Cell   & --      &  $0$ \\
-      \hline
-    \end{tabular}
-    \caption{Named mesh entities.}
-    \label{tab:entities}
-  \end{center}
-\end{table}
-
-\section{Numbering of vertices}
-\index{vertex numbering}
-
-For simplicial cells (intervals, triangles and tetrahedra) of a finite
-element mesh, the vertices are numbered locally based on the
-corresponding global vertex numbers. In particular, a tuple of
-increasing local vertex numbers corresponds to a tuple of increasing
-global vertex numbers.  This is illustrated in
-Figure~\ref{fig:numbering_example_triangles} for a mesh consisting of
-two triangles.
- 
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{v0}{$v_0$}
-    \psfrag{v1}{$v_1$}
-    \psfrag{v2}{$v_2$}
-    \psfrag{0}{$0$}
-    \psfrag{1}{$1$}
-    \psfrag{2}{$2$}
-    \psfrag{3}{$3$}
-    \includegraphics[width=8cm]{eps/numbering_example_triangles.eps}
-    \caption{The vertices of a simplicial mesh are numbered locally
-      based on the corresponding global vertex numbers.}
-    \label{fig:numbering_example_triangles}
-  \end{center}
-\end{figure}
-
-For non-simplicial cells (quadrilaterals and hexahedra), the numbering
-is arbitrary, as long as each cell is isomorphic to the corresponding
-reference cell by matching each vertex with the corresponding vertex
-in the reference cell. This is illustrated in
-Figure~\ref{fig:numbering_example_quadrilaterals} for a mesh
-consisting of two quadrilaterals.
-
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{v0}{$v_0$}
-    \psfrag{v1}{$v_1$}
-    \psfrag{v2}{$v_2$}
-    \psfrag{v3}{$v_3$}
-    \psfrag{0}{$0$}
-    \psfrag{1}{$1$}
-    \psfrag{2}{$2$}
-    \psfrag{3}{$3$}
-    \psfrag{4}{$4$}
-    \psfrag{5}{$5$}
-    \includegraphics[width=8cm]{eps/numbering_example_quadrilaterals.eps}
-    \caption{The local numbering of vertices of a non-simplicial mesh
-      is arbitrary, as long as each cell is isomorphic to the
-      reference cell by matching each vertex to the corresponding
-      vertex of the reference cell.}
-    \label{fig:numbering_example_quadrilaterals}
-  \end{center}
-\end{figure}
-
-\section{Numbering of other mesh entities}
-
-When the vertices have been numbered, the remaining mesh entities are
-numbered within each topological dimension based on a
-\emph{lexicographical ordering} of the corresponding ordered tuples of
-\emph{non-incident vertices}.
-
-As an illustration, consider the numbering of edges (the mesh entities
-of topological dimension one) on the reference triangle in
-Figure~\ref{fig:orderingexample,triangle}. To number the edges of the
-reference triangle, we identify for each edge the corresponding
-non-incident vertices. For each edge, there is only one such vertex
-(the vertex opposite to the edge). We thus identify the three edges in
-the reference triangle with the tuples $(v_0)$, $(v_1)$ and $(v_2)$. The
-first of these is edge $e_0$ between vertices $v_1$ and $v_2$ opposite
-to vertex $v_0$, the second is edge $e_1$ between vertices $v_0$ and
-$v_2$ opposite to vertex $v_1$, and the third is edge $e_2$ between
-vertices $v_0$ and $v_1$ opposite to vertex $v_2$.
-
-Similarly, we identify the six edges of the reference tetrahedron with
-the corresponding non-incident tuples $(v_0, v_1)$, $(v_0, v_2)$,
-$(v_0, v_3)$, $(v_1, v_2)$, $(v_1, v_3)$ and $(v_2, v_3)$. The first of these is
-edge $e_0$ between vertices $v_2$ and $v_3$ opposite to vertices $v_0$
-and $v_1$ as shown in Figure~\ref{fig:orderingexample,tetrahedron}.
-
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{v0}{$v_0$}
-    \psfrag{v1}{$v_1$}
-    \psfrag{v2}{$v_2$}
-    \psfrag{e0}{$e_0$}
-    \includegraphics[width=5cm]{eps/ordering_example_triangle.eps}
-    \caption{Mesh entities are ordered based on a lexicographical ordering
-      of the corresponding ordered tuples of non-incident vertices.
-      The first edge $e_0$ is non-incident to vertex $v_0$.}
-    \label{fig:orderingexample,triangle}
-  \end{center}
-\end{figure}
-
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{v0}{$v_0$}
-    \psfrag{v1}{$v_1$}
-    \psfrag{v2}{$v_2$}
-    \psfrag{v3}{$v_3$}
-    \psfrag{e0}{$e_0$}
-    \includegraphics[width=5cm]{eps/ordering_example_tetrahedron.eps}
-    \caption{Mesh entities are ordered based on a lexicographical ordering
-      of the corresponding ordered tuples of non-incident vertices.
-      The first edge $e_0$ is non-incident to vertices $v_0$ and $v_1$.}
-    \label{fig:orderingexample,tetrahedron}
-  \end{center}
-\end{figure}
-
-\subsection{Relative ordering}
-
-The relative ordering of mesh entities with respect to other incident
-mesh entities follows by sorting the entities by their (global)
-indices. Thus, the pair of vertices incident to the first edge $e_0$
-of a triangular cell is $(v_1, v_2)$, not $(v_2, v_1)$. Similarly, the
-first face $f_0$ of a tetrahedral cell is incident to vertices $(v_1,
-v_2, v_3)$.
-
-For simplicial cells, the relative ordering in combination with the
-convention of numbering the vertices locally based on global vertex
-indices means that two incident cells will always agree on the
-orientation of incident subsimplices. Thus, two incident triangles
-will agree on the orientation of the common edge and two incident
-tetrahedra will agree on the orientation of the common edge(s) and the
-orientation of the common face (if any). This is illustrated in
-Figure~\ref{fig:orientation_example_triangles} for two incident
-triangles sharing a common edge.
-
-\begin{figure}[htbp]
-  \begin{center}
-    \psfrag{v0}{$v_0$}
-    \psfrag{v1}{$v_1$}
-    \psfrag{v2}{$v_2$}
-    \psfrag{v3}{$v_3$}
-    \includegraphics[width=9cm]{eps/orientation_example_triangles.eps}
-    \caption{Two incident triangles will always agree on the
-      orientation of the common edge.}
-    \label{fig:orientation_example_triangles}
-  \end{center}
-\end{figure}
-
-\subsection{Limitations}
- 
-The UFC specification is only concerned with the ordering of mesh
-entities with respect to entities of larger topological dimension. In
-other words, the UFC specification is only concerned with the ordering
-of incidence relations of the class $d - d'$ where $d > d'$. For
-example, the UFC specification is not concerned with the ordering of
-incidence relations of the class $0 - 1$, that is, the ordering of
-edges incident to vertices.
-
-\newpage
-
-\section{Numbering schemes for reference cells}
-
-The numbering scheme is demonstrated below for cells
-isomorphic to each of the five reference cells.
-
-\subsection{Numbering of mesh entities on intervals}
-
-\begin{minipage}{\textwidth}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|c|}
-      \hline
-      Entity & Incident vertices & Non-incident vertices \\
-      \hline
-      \hline
-      $v_0 = (0, 0)$ & $(v_0)$ & $(v_1)$ \\
-      \hline
-      $v_1 = (0, 1)$ & $(v_1)$ & $(v_0)$ \\
-      \hline
-      $c_0 = (1, 0)$ & $(v_0, v_1)$ & $\emptyset$ \\
-      \hline
-    \end{tabular}
-  \end{center}
-\end{minipage}
-
-\subsection{Numbering of mesh entities on triangular cells}
-%
-\begin{minipage}{\textwidth}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|c|}
-      \hline
-      Entity & Incident vertices & Non-incident vertices \\
-      \hline
-      \hline
-      $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2)$ \\
-      \hline
-      $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2)$ \\
-      \hline
-      $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1)$ \\
-      \hline
-      $e_0 = (1, 0)$ & $(v_1, v_2)$ & $(v_0)$ \\
-      \hline
-      $e_1 = (1, 1)$ & $(v_0, v_2)$ & $(v_1)$ \\
-      \hline
-      $e_2 = (1, 2)$ & $(v_0, v_1)$ & $(v_2)$ \\
-      \hline
-      $c_0 = (2, 0)$ & $(v_0, v_1, v_2)$ & $\emptyset$ \\
-      \hline
-    \end{tabular}
-  \end{center}
-\end{minipage}
-
-\subsection{Numbering of mesh entities on quadrilateral cells}
-%
-\begin{minipage}{\textwidth}
-\linespread{1.1}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|c|}
-      \hline
-      Entity & Incident vertices & Non-incident vertices \\
-      \hline
-      \hline
-      $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\
-      \hline
-      $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\
-      \hline
-      $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\
-      \hline
-      $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\
-      \hline
-      $e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\
-      \hline
-      $e_1 = (1, 1)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\
-      \hline
-      $e_2 = (1, 2)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\
-      \hline
-      $e_3 = (1, 3)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\
-      \hline
-      $c_0 = (2, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\
-      \hline
-    \end{tabular}
-  \end{center}
-\end{minipage}
-
-
-\subsection{Numbering of mesh entities on tetrahedral cells}
-%
-\begin{minipage}{\textwidth}
-\linespread{1.1}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|c|}
-      \hline
-      Entity & Incident vertices & Non-incident vertices \\
-      \hline
-      \hline
-      $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\
-      \hline
-      $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\
-      \hline
-      $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\
-      \hline
-      $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\
-      \hline
-      $e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\
-      \hline
-      $e_1 = (1, 1)$ & $(v_1, v_3)$ & $(v_0, v_2)$ \\
-      \hline
-      $e_2 = (1, 2)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\
-      \hline
-      $e_3 = (1, 3)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\
-      \hline
-      $e_4 = (1, 4)$ & $(v_0, v_2)$ & $(v_1, v_3)$ \\
-      \hline
-      $e_5 = (1, 5)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\
-      \hline
-      $f_0 = (2, 0)$ & $(v_1, v_2, v_3)$ & $(v_0)$ \\
-      \hline
-      $f_1 = (2, 1)$ & $(v_0, v_2, v_3)$ & $(v_1)$ \\
-      \hline
-      $f_2 = (2, 2)$ & $(v_0, v_1, v_3)$ & $(v_2)$ \\
-      \hline
-      $f_3 = (2, 3)$ & $(v_0, v_1, v_2)$ & $(v_3)$ \\
-      \hline
-      $c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\
-      \hline
-    \end{tabular}
-  \end{center}
-\end{minipage}
-
-\vfill
-
-\newpage
-
-\subsection{Numbering of mesh entities on hexahedral cells}
-
-\begin{minipage}{\textwidth}
-\small
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|c|}
-      \hline
-      Entity & Incident vertices & Non-incident vertices \\
-      \hline
-      \hline
-      $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $v_4 = (0, 4)$ & $(v_4)$ & $(v_0, v_1, v_2, v_3, v_5, v_6, v_7)$ \\
-      \hline
-      $v_5 = (0, 5)$ & $(v_5)$ & $(v_0, v_1, v_2, v_3, v_4, v_6, v_7)$ \\
-      \hline
-      $v_6 = (0, 6)$ & $(v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_7)$ \\
-      \hline
-      $v_7 = (0, 7)$ & $(v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6)$ \\
-      \hline
-      $e_0 = (1, 0)$ & $(v_6, v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5)$ \\
-      \hline
-      $e_1 = (1, 1)$ & $(v_5, v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_7)$ \\
-      \hline
-      $e_2 = (1, 2)$ & $(v_4, v_7)$ & $(v_0, v_1, v_2, v_3, v_5, v_6)$ \\
-      \hline
-      $e_3 = (1, 3)$ & $(v_4, v_5)$ & $(v_0, v_1, v_2, v_3, v_6, v_7)$ \\
-      \hline
-      $e_4 = (1, 4)$ & $(v_3, v_7)$ & $(v_0, v_1, v_2, v_4, v_5, v_6)$ \\
-      \hline
-      $e_5 = (1, 5)$ & $(v_2, v_6)$ & $(v_0, v_1, v_3, v_4, v_5, v_7)$ \\
-      \hline
-      $e_6 = (1, 6)$ & $(v_2, v_3)$ & $(v_0, v_1, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $e_7 = (1, 7)$ & $(v_1, v_5)$ & $(v_0, v_2, v_3, v_4, v_6, v_7)$ \\
-      \hline
-      $e_8 = (1, 8)$ & $(v_1, v_2)$ & $(v_0, v_3, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $e_9 = (1, 9)$ & $(v_0, v_4)$ & $(v_1, v_2, v_3, v_5, v_6, v_7)$ \\
-      \hline
-      $e_{10} = (1, 10)$ & $(v_0, v_3)$ & $(v_1, v_2, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $e_{11} = (1, 11)$ & $(v_0, v_1)$ & $(v_2, v_3, v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $f_0 = (2, 0)$ & $(v_4, v_5, v_6, v_7)$ & $(v_0, v_1, v_2, v_3)$ \\
-      \hline
-      $f_1 = (2, 1)$ & $(v_2, v_3, v_6, v_7)$ & $(v_0, v_1, v_4, v_5)$ \\
-      \hline
-      $f_2 = (2, 2)$ & $(v_1, v_2, v_5, v_6)$ & $(v_0, v_3, v_4, v_7)$ \\
-      \hline
-      $f_3 = (2, 3)$ & $(v_0, v_3, v_4, v_7)$ & $(v_1, v_2, v_5, v_6)$ \\
-      \hline
-      $f_4 = (2, 4)$ & $(v_0, v_1, v_4, v_5)$ & $(v_2, v_3, v_6, v_7)$ \\
-      \hline
-      $f_5 = (2, 5)$ & $(v_0, v_1, v_2, v_3)$ & $(v_4, v_5, v_6, v_7)$ \\
-      \hline
-      $c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ & $\emptyset$ \\
-      \hline
-    \end{tabular}
-  \end{center}
-\end{minipage}
-
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/pythonutils.tex b/ufc-merge-into-ffc/doc/manual/chapters/pythonutils.tex
deleted file mode 100644
index 4d82be8..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/pythonutils.tex
+++ /dev/null
@@ -1,43 +0,0 @@
-\chapter{Python utilities}
-\index{Python utilities}
-\index{code generation}
-
-The UFC distribution includes a set of Python utilities for generating
-code that conforms to the UFC specification. These utilities consist
-of format string templates for C++ header files (\texttt{.h} files),
-implementation files (\texttt{.cpp}) and combined header and
-implementation files (\texttt{.h} files containing both the
-declaration and definition of the UFC functions).
-
-The following format strings are provided:
-
-\begin{code}
-function_combined_{header, implementation, combined}
-finite_element_{header, implementation, combined}
-dofmap_{header, implementation, combined}
-cell_integral_{header, implementation, combined}
-exterior_facet_integral_{header, implementation, combined}
-interior_facet_integral_{header, implementation, combined}
-form_{header, implementation, combined}
-\end{code}
-
-We demonstrate below how to use the format string
-\texttt{form\_combined} together with a dictionary that specifies the
-code to be inserted into the format string. Typically, a form compiler
-will first generate the code to be inserted into the dictionary and
-then in a later stage write the generated code to file in UFC format
-using the provided format strings.
-
-\begin{code}
-from ufc import form_combined
-
-code = {}
-code["classname"] = "Poisson",
-...
-code["rank"] = "    return 2;",
-code["num_coefficients"] = "    return 0;",
-code["num_cell_domains"] = "    return 1;",
-...
-
-print form_combined % code
-\end{code}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/referencecells.tex b/ufc-merge-into-ffc/doc/manual/chapters/referencecells.tex
deleted file mode 100644
index a87eab7..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/referencecells.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-\chapter{Reference cells}
-\label{sec:referencecells}
-
-\input{chapters/referencecells_common}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/referencecells_common.tex b/ufc-merge-into-ffc/doc/manual/chapters/referencecells_common.tex
deleted file mode 100644
index d024415..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/referencecells_common.tex
+++ /dev/null
@@ -1,250 +0,0 @@
-\index{reference cells}
-
-The following five reference cells are covered by the UFC specification:
-the reference \emph{interval},
-the reference \emph{triangle},
-the reference \emph{quadrilateral},
-the reference \emph{tetrahedron} and
-the reference \emph{hexahedron} (see Table~\ref{tab:ufc_reference_cells}).
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|l|c|c|c|}
-      \hline
-      Reference cell & Dimension & \#Vertices & \#Facets \\
-      \hline
-      \hline
-      The reference interval      & 1 & 2 & 2 \\
-      \hline
-      The reference triangle      & 2 & 3 & 3 \\
-      \hline
-      The reference quadrilateral & 2 & 4 & 4 \\
-      \hline
-      The reference tetrahedron   & 3 & 4 & 4 \\
-      \hline
-      The reference hexahedron    & 3 & 8 & 6 \\
-      \hline
-    \end{tabular}
-    \caption{Reference cells covered by the UFC specification.}
-    \label{tab:ufc_reference_cells}
-  \end{center}
-\end{table}
-
-The UFC specification assumes that each cell in a finite element mesh
-is always isomorphic to one of the reference cells.
-
-\section{The reference interval}
-\index{interval}
-
-The reference interval is shown in Figure~\ref{fig:interval} and is
-defined by its two vertices with coordinates as specified in
-Table~\ref{tab:interval,vertices}.
-
-\begin{figure}
-  \begin{center}
-    \psfrag{0}{$0$}
-    \psfrag{1}{$1$}
-    \includegraphics[width=10cm]{eps/interval.eps}
-    \caption{The reference interval.}
-    \label{fig:interval}
-  \end{center}
-\end{figure}
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_0$ & $x = 0$ \\
-      \hline
-      $v_1$ & $x = 1$ \\
-      \hline
-    \end{tabular}
-    \caption{Vertex coordinates of the reference interval.}
-    \label{tab:interval,vertices}
-  \end{center}
-\end{table}
-
-\section{The reference triangle}
-\index{triangle}
-
-The reference triangle is shown in Figure~\ref{fig:triangle} and is
-defined by its three vertices with coordinates as specified in
-Table~\ref{tab:triangle,vertices}.
-
-\begin{figure}
-  \begin{center}
-    \psfrag{v0}{$(0, 0)$}
-    \psfrag{v1}{$(1, 0)$}
-    \psfrag{v2}{$(0, 1)$}
-    \includegraphics[width=8cm]{eps/triangle.eps}
-    \caption{The reference triangle.}
-    \label{fig:triangle}
-  \end{center}
-\end{figure}
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_0$ & $x = (0, 0)$ \\
-      \hline
-      $v_1$ & $x = (1, 0)$ \\
-      \hline
-      $v_2$ & $x = (0, 1)$ \\
-      \hline
-    \end{tabular}
-    \caption{Vertex coordinates of the reference triangle.}
-    \label{tab:triangle,vertices}
-  \end{center}
-\end{table}
-
-\section{The reference quadrilateral}
-\index{quadrilateral}
-
-The reference quadrilateral is shown in Figure~\ref{fig:quadrilateral}
-and is defined by its four vertices with coordinates as specified in
-Table~\ref{tab:quadrilateral,vertices}.
-
-\begin{figure}
-  \begin{center}
-    \psfrag{v0}{$(0, 0)$}
-    \psfrag{v1}{$(1, 0)$}
-    \psfrag{v2}{$(1, 1)$}
-    \psfrag{v3}{$(0, 1)$}
-    \includegraphics[width=8cm]{eps/quadrilateral.eps}
-    \caption{The reference quadrilateral.}
-    \label{fig:quadrilateral}
-  \end{center}
-\end{figure}
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_0$ & $x = (0, 0)$ \\
-      \hline
-      $v_1$ & $x = (1, 0)$ \\
-      \hline
-      $v_2$ & $x = (1, 1)$ \\
-      \hline
-      $v_3$ & $x = (0, 1)$ \\
-      \hline
-    \end{tabular}
-    \caption{Vertex coordinates of the reference quadrilateral.}
-    \label{tab:quadrilateral,vertices}
-  \end{center}
-\end{table}
-
-\section{The reference tetrahedron}
-\index{tetrahedron}
-
-The reference tetrahedron is shown in Figure~\ref{fig:tetrahedron} and
-is defined by its four vertices with coordinates as specified in
-Table~\ref{tab:tetrahedron,vertices}.
-
-\begin{figure}
-  \begin{center}
-    \psfrag{v0}{$(0, 0, 0)$}
-    \psfrag{v1}{$(1, 0, 0)$}
-    \psfrag{v2}{$(0, 1, 0)$}
-    \psfrag{v3}{$(0, 0, 1)$}
-    \includegraphics[width=6cm]{eps/tetrahedron.eps}
-    \caption{The reference tetrahedron.}
-    \label{fig:tetrahedron}
-  \end{center}
-\end{figure}
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_0$ & $x = (0, 0, 0)$ \\
-      \hline
-      $v_1$ & $x = (1, 0, 0)$ \\
-      \hline
-      $v_2$ & $x = (0, 1, 0)$ \\
-      \hline
-      $v_3$ & $x = (0, 0, 1)$ \\
-      \hline
-    \end{tabular}
-    \caption{Vertex coordinates of the reference tetrahedron.}
-    \label{tab:tetrahedron,vertices}
-  \end{center}
-\end{table}
-
-\section{The reference hexahedron}
-\index{hexahedron}
-
-The reference hexahedron is shown in Figure~\ref{fig:hexahedron} and
-is defined by its eight vertices with coordinates as specified in
-Table~\ref{tab:hexahedron,vertices}.
-
-\begin{figure}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \psfrag{v0}{$(0, 0, 0)$}
-    \psfrag{v1}{$(1, 0, 0)$}
-    \psfrag{v2}{$(1, 1, 0)$}
-    \psfrag{v3}{$(0, 1, 0)$}
-    \psfrag{v4}{$(0, 0, 1)$}
-    \psfrag{v5}{$(1, 0, 1)$}
-    \psfrag{v6}{$(1, 1, 1)$}
-    \psfrag{v7}{$(0, 1, 1)$}
-    \includegraphics[width=9cm]{eps/hexahedron.eps}
-    \caption{The reference hexahedron.}
-    \label{fig:hexahedron}
-  \end{center}
-\end{figure}
-
-\begin{table}
-\linespread{1.2}\selectfont
-  \begin{center}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_0$ & $x = (0, 0, 0)$ \\
-      \hline
-      $v_1$ & $x = (1, 0, 0)$ \\
-      \hline
-      $v_2$ & $x = (1, 1, 0)$ \\
-      \hline
-      $v_3$ & $x = (0, 1, 0)$ \\
-      \hline
-    \end{tabular}
-    \begin{tabular}{|c|c|}
-      \hline
-      Vertex & Coordinate \\
-      \hline
-      \hline
-      $v_4$ & $x = (0, 0, 1)$ \\
-      \hline
-      $v_5$ & $x = (1, 0, 1)$ \\
-      \hline
-      $v_6$ & $x = (1, 1, 1)$ \\
-      \hline
-      $v_7$ & $x = (0, 1, 1)$ \\
-      \hline
-    \end{tabular}
-    \caption{Vertex coordinates of the reference hexahedron.}
-    \label{tab:hexahedron,vertices}
-  \end{center}
-\end{table}
diff --git a/ufc-merge-into-ffc/doc/manual/chapters/versions.tex b/ufc-merge-into-ffc/doc/manual/chapters/versions.tex
deleted file mode 100644
index f6500b8..0000000
--- a/ufc-merge-into-ffc/doc/manual/chapters/versions.tex
+++ /dev/null
@@ -1,92 +0,0 @@
-\chapter{\ufc{} versions}
-\label{app:versions}
-\index{versions}
-
-To keep things simple, the UFC classes do not have any run time
-version control. To upgrade to a new UFC version, all libraries and
-applications must therefore be recompiled with the new header file
-\texttt{ufc.h}.
-
-\section{Version 1.0}
-
-Initial release.
-
-\section{Version 1.1}
-
-The following six functions have been added to the interface:
-\begin{itemize}
-\item \texttt{ufc::finite\_element::evaluate\_dofs}
-\item \texttt{ufc::finite\_element::evaluate\_basis\_all}
-\item \texttt{ufc::finite\_element::evaluate\_basis\_derivatives\_all}
-\item \texttt{ufc::dof\_map::geometric\_dimension}
-\item \texttt{ufc::dof\_map::num\_entity\_dofs}
-\item \texttt{ufc::dof\_map::tabulate\_entity\_dofs}
-\end{itemize}
-
-An implementation of UFC version 1.0 can be recompiled with the header
-file from UFC version 1.1 without changes to the source code. The new
-functions introduced in 1.1 will then simply throw an informative
-exception. (The new functions are virtual but not pure virtual.)
-
-\section{Version 1.2}
-
-The following functions have been modified:
-\begin{itemize}
-\item \texttt{ufc::dof\_map::local\_dimension}
-\end{itemize}
-The following functions have been added to the interface:
-\begin{itemize}
-\item \texttt{ufc::dof\_map::max\_local\_dimension}
-\end{itemize}
-
-\section{Version 1.4}
-
-The behavior of the functions
-\texttt{ufc::form::create\_cell\_integral},
-\texttt{ufc::form::create\_exterior\_facet\_integral} and
-\texttt{ufc::form::create\_interior\_facet\_integral} were changed to
-return a zero pointer when there is no integral on the given domain.
-
-\section{Version 2.0}
-
-The following names have been changed:
-\begin{itemize}
-\item
-  \texttt{ufc::dof\_map} $\rightarrow$ \texttt{ufc::dofmap}
-\item
-  \texttt{ufc::form::num\_cell\_integrals} $\rightarrow$ \texttt{ufc::form::num\_cell\_domains}
-\item
-  \texttt{ufc::form::num\_exterior\_facet\_integrals} $\rightarrow$ \texttt{ufc::form::num\_exterior\_facet\_domains}
-\item
-  \texttt{ufc::form::num\_interior\_facet\_integrals} $\rightarrow$ \texttt{ufc::form::num\_interior\_facet\_domains}
-\end{itemize}
-
-The following new data members have been added:
-\begin{itemize}
-\item
-  \texttt{ufc::cell:index}
-\item
-  \texttt{ufc::cell:local\_facet}
-\item
-  \texttt{ufc::cell:mesh\_identifier}
-\end{itemize}
-
-The following new functions have been added:
-\begin{itemize}
-\item
-  \texttt{ufc::finite\_element::topological\_dimension}
-\item
-  \texttt{ufc::finite\_element::geometric\_dimension}
-\item
-  \texttt{ufc::finite\_element::create}
-\item
-  \texttt{ufc::dofmap::topological\_dimension}
-\item
-  \texttt{ufc::dofmap::create}
-\item
-  \texttt{ufc::cell\_integral::tabulate\_tensor} (quadrature version)
-\item
-  \texttt{ufc::exterior\_facet\_integral::tabulate\_tensor} (quadrature version)
-\item
-  \texttt{ufc::interior\_integral::tabulate\_tensor} (quadrature version)
-\end{itemize}
diff --git a/ufc-merge-into-ffc/doc/manual/code/Poisson.ufl b/ufc-merge-into-ffc/doc/manual/code/Poisson.ufl
deleted file mode 100644
index c006742..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/Poisson.ufl
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (C) 2004-2007 Anders Logg (logg at simula.no)
-# Licensed under the GNU GPL Version 2
-#
-# The bilinear form a(v, u) and linear form L(v) for
-# Poisson's equation.
-#
-# Compile this form with FFC: ffc Poisson.form
-
-element = FiniteElement("Lagrange", "triangle", 1)
-
-v = TestFunction(element)
-u = TrialFunction(element)
-f = Function(element)
-
-a = dot(grad(v), grad(u))*dx
-#L = v*f*dx
diff --git a/ufc-merge-into-ffc/doc/manual/code/poisson_ffc.h b/ufc-merge-into-ffc/doc/manual/code/poisson_ffc.h
deleted file mode 100644
index 1ec56b0..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/poisson_ffc.h
+++ /dev/null
@@ -1,1418 +0,0 @@
-// This code conforms with the UFC specification version 1.0
-// and was automatically generated by FFC version 0.6.2.
-
-#ifndef __POISSON_H
-#define __POISSON_H
-
-#include <cmath>
-#include <stdexcept>
-#include <ufc.h>
-
-/// This class defines the interface for a finite element.
-
-class PoissonBilinearForm_finite_element_0: public ufc::finite_element
-{
-public:
-
-  /// Constructor
-  PoissonBilinearForm_finite_element_0() : ufc::finite_element()
-  {
-    // Do nothing
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_finite_element_0()
-  {
-    // Do nothing
-  }
-
-  /// Return a string identifying the finite element
-  virtual const char* signature() const
-  {
-    return "FiniteElement('Lagrange', 'triangle', 1)";
-  }
-
-  /// Return the cell shape
-  virtual ufc::shape cell_shape() const
-  {
-    return ufc::triangle;
-  }
-
-  /// Return the dimension of the finite element function space
-  virtual unsigned int space_dimension() const
-  {
-    return 3;
-  }
-
-  /// Return the rank of the value space
-  virtual unsigned int value_rank() const
-  {
-    return 0;
-  }
-
-  /// Return the dimension of the value space for axis i
-  virtual unsigned int value_dimension(unsigned int i) const
-  {
-    return 1;
-  }
-
-  /// Evaluate basis function i at given point in cell
-  virtual void evaluate_basis(unsigned int i,
-                              double* values,
-                              const double* coordinates,
-                              const ufc::cell& c) const
-  {
-    // Extract vertex coordinates
-    const double * const * element_coordinates = c.coordinates;
-    
-    // Compute Jacobian of affine map from reference cell
-    const double J_00 = element_coordinates[1][0] - element_coordinates[0][0];
-    const double J_01 = element_coordinates[2][0] - element_coordinates[0][0];
-    const double J_10 = element_coordinates[1][1] - element_coordinates[0][1];
-    const double J_11 = element_coordinates[2][1] - element_coordinates[0][1];
-      
-    // Compute determinant of Jacobian
-    const double detJ = J_00*J_11 - J_01*J_10;
-    
-    // Compute inverse of Jacobian
-    
-    // Get coordinates and map to the reference (UFC) element
-    double x = (element_coordinates[0][1]*element_coordinates[2][0] -\
-                element_coordinates[0][0]*element_coordinates[2][1] +\
-                J_11*coordinates[0] - J_01*coordinates[1]) / detJ;
-    double y = (element_coordinates[1][1]*element_coordinates[0][0] -\
-                element_coordinates[1][0]*element_coordinates[0][1] -\
-                J_10*coordinates[0] + J_00*coordinates[1]) / detJ;
-    
-    // Map coordinates to the reference square
-    if (std::abs(y - 1.0) < 1e-14)
-      x = -1.0;
-    else
-      x = 2.0 *x/(1.0 - y) - 1.0;
-    y = 2.0*y - 1.0;
-    
-    // Reset values
-    *values = 0;
-    
-    // Map degree of freedom to element degree of freedom
-    const unsigned int dof = i;
-    
-    // Generate scalings
-    const double scalings_y_0 = 1;
-    const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y);
-    
-    // Compute psitilde_a
-    const double psitilde_a_0 = 1;
-    const double psitilde_a_1 = x;
-    
-    // Compute psitilde_bs
-    const double psitilde_bs_0_0 = 1;
-    const double psitilde_bs_0_1 = 1.5*y + 0.5;
-    const double psitilde_bs_1_0 = 1;
-    
-    // Compute basisvalues
-    const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0;
-    const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0;
-    const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1;
-    
-    // Table(s) of coefficients
-    const static double coefficients0[3][3] = \
-    {{0.471404520791032, -0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0, 0.333333333333333}};
-    
-    // Extract relevant coefficients
-    const double coeff0_0 = coefficients0[dof][0];
-    const double coeff0_1 = coefficients0[dof][1];
-    const double coeff0_2 = coefficients0[dof][2];
-    
-    // Compute value(s)
-    *values = coeff0_0*basisvalue0 + coeff0_1*basisvalue1 + coeff0_2*basisvalue2;
-  }
-
-  /// Evaluate all basis functions at given point in cell
-  virtual void evaluate_basis_all(double* values,
-                                  const double* coordinates,
-                                  const ufc::cell& c) const
-  {
-    throw std::runtime_error("The vectorised version of evaluate_basis() is not yet implemented.");
-  }
-
-  /// Evaluate order n derivatives of basis function i at given point in cell
-  virtual void evaluate_basis_derivatives(unsigned int i,
-                                          unsigned int n,
-                                          double* values,
-                                          const double* coordinates,
-                                          const ufc::cell& c) const
-  {
-    // Extract vertex coordinates
-    const double * const * element_coordinates = c.coordinates;
-    
-    // Compute Jacobian of affine map from reference cell
-    const double J_00 = element_coordinates[1][0] - element_coordinates[0][0];
-    const double J_01 = element_coordinates[2][0] - element_coordinates[0][0];
-    const double J_10 = element_coordinates[1][1] - element_coordinates[0][1];
-    const double J_11 = element_coordinates[2][1] - element_coordinates[0][1];
-      
-    // Compute determinant of Jacobian
-    const double detJ = J_00*J_11 - J_01*J_10;
-    
-    // Compute inverse of Jacobian
-    
-    // Get coordinates and map to the reference (UFC) element
-    double x = (element_coordinates[0][1]*element_coordinates[2][0] -\
-                element_coordinates[0][0]*element_coordinates[2][1] +\
-                J_11*coordinates[0] - J_01*coordinates[1]) / detJ;
-    double y = (element_coordinates[1][1]*element_coordinates[0][0] -\
-                element_coordinates[1][0]*element_coordinates[0][1] -\
-                J_10*coordinates[0] + J_00*coordinates[1]) / detJ;
-    
-    // Map coordinates to the reference square
-    if (std::abs(y - 1.0) < 1e-14)
-      x = -1.0;
-    else
-      x = 2.0 *x/(1.0 - y) - 1.0;
-    y = 2.0*y - 1.0;
-    
-    // Compute number of derivatives
-    unsigned int num_derivatives = 1;
-    
-    for (unsigned int j = 0; j < n; j++)
-      num_derivatives *= 2;
-    
-    
-    // Declare pointer to two dimensional array that holds combinations of derivatives and initialise
-    unsigned int **combinations = new unsigned int *[num_derivatives];
-        
-    for (unsigned int j = 0; j < num_derivatives; j++)
-    {
-      combinations[j] = new unsigned int [n];
-      for (unsigned int k = 0; k < n; k++)
-        combinations[j][k] = 0;
-    }
-        
-    // Generate combinations of derivatives
-    for (unsigned int row = 1; row < num_derivatives; row++)
-    {
-      for (unsigned int num = 0; num < row; num++)
-      {
-        for (unsigned int col = n-1; col+1 > 0; col--)
-        {
-          if (combinations[row][col] + 1 > 1)
-            combinations[row][col] = 0;
-          else
-          {
-            combinations[row][col] += 1;
-            break;
-          }
-        }
-      }
-    }
-    
-    // Compute inverse of Jacobian
-    const double Jinv[2][2] =  {{J_11 / detJ, -J_01 / detJ}, {-J_10 / detJ, J_00 / detJ}};
-    
-    // Declare transformation matrix
-    // Declare pointer to two dimensional array and initialise
-    double **transform = new double *[num_derivatives];
-        
-    for (unsigned int j = 0; j < num_derivatives; j++)
-    {
-      transform[j] = new double [num_derivatives];
-      for (unsigned int k = 0; k < num_derivatives; k++)
-        transform[j][k] = 1;
-    }
-    
-    // Construct transformation matrix
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      for (unsigned int col = 0; col < num_derivatives; col++)
-      {
-        for (unsigned int k = 0; k < n; k++)
-          transform[row][col] *= Jinv[combinations[col][k]][combinations[row][k]];
-      }
-    }
-    
-    // Reset values
-    for (unsigned int j = 0; j < 1*num_derivatives; j++)
-      values[j] = 0;
-    
-    // Map degree of freedom to element degree of freedom
-    const unsigned int dof = i;
-    
-    // Generate scalings
-    const double scalings_y_0 = 1;
-    const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y);
-    
-    // Compute psitilde_a
-    const double psitilde_a_0 = 1;
-    const double psitilde_a_1 = x;
-    
-    // Compute psitilde_bs
-    const double psitilde_bs_0_0 = 1;
-    const double psitilde_bs_0_1 = 1.5*y + 0.5;
-    const double psitilde_bs_1_0 = 1;
-    
-    // Compute basisvalues
-    const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0;
-    const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0;
-    const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1;
-    
-    // Table(s) of coefficients
-    const static double coefficients0[3][3] = \
-    {{0.471404520791032, -0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0, 0.333333333333333}};
-    
-    // Interesting (new) part
-    // Tables of derivatives of the polynomial base (transpose)
-    const static double dmats0[3][3] = \
-    {{0, 0, 0},
-    {4.89897948556636, 0, 0},
-    {0, 0, 0}};
-    
-    const static double dmats1[3][3] = \
-    {{0, 0, 0},
-    {2.44948974278318, 0, 0},
-    {4.24264068711928, 0, 0}};
-    
-    // Compute reference derivatives
-    // Declare pointer to array of derivatives on FIAT element
-    double *derivatives = new double [num_derivatives];
-    
-    // Declare coefficients
-    double coeff0_0 = 0;
-    double coeff0_1 = 0;
-    double coeff0_2 = 0;
-    
-    // Declare new coefficients
-    double new_coeff0_0 = 0;
-    double new_coeff0_1 = 0;
-    double new_coeff0_2 = 0;
-    
-    // Loop possible derivatives
-    for (unsigned int deriv_num = 0; deriv_num < num_derivatives; deriv_num++)
-    {
-      // Get values from coefficients array
-      new_coeff0_0 = coefficients0[dof][0];
-      new_coeff0_1 = coefficients0[dof][1];
-      new_coeff0_2 = coefficients0[dof][2];
-    
-      // Loop derivative order
-      for (unsigned int j = 0; j < n; j++)
-      {
-        // Update old coefficients
-        coeff0_0 = new_coeff0_0;
-        coeff0_1 = new_coeff0_1;
-        coeff0_2 = new_coeff0_2;
-    
-        if(combinations[deriv_num][j] == 0)
-        {
-          new_coeff0_0 = coeff0_0*dmats0[0][0] + coeff0_1*dmats0[1][0] + coeff0_2*dmats0[2][0];
-          new_coeff0_1 = coeff0_0*dmats0[0][1] + coeff0_1*dmats0[1][1] + coeff0_2*dmats0[2][1];
-          new_coeff0_2 = coeff0_0*dmats0[0][2] + coeff0_1*dmats0[1][2] + coeff0_2*dmats0[2][2];
-        }
-        if(combinations[deriv_num][j] == 1)
-        {
-          new_coeff0_0 = coeff0_0*dmats1[0][0] + coeff0_1*dmats1[1][0] + coeff0_2*dmats1[2][0];
-          new_coeff0_1 = coeff0_0*dmats1[0][1] + coeff0_1*dmats1[1][1] + coeff0_2*dmats1[2][1];
-          new_coeff0_2 = coeff0_0*dmats1[0][2] + coeff0_1*dmats1[1][2] + coeff0_2*dmats1[2][2];
-        }
-    
-      }
-      // Compute derivatives on reference element as dot product of coefficients and basisvalues
-      derivatives[deriv_num] = new_coeff0_0*basisvalue0 + new_coeff0_1*basisvalue1 + new_coeff0_2*basisvalue2;
-    }
-    
-    // Transform derivatives back to physical element
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      for (unsigned int col = 0; col < num_derivatives; col++)
-      {
-        values[row] += transform[row][col]*derivatives[col];
-      }
-    }
-    // Delete pointer to array of derivatives on FIAT element
-    delete [] derivatives;
-    
-    // Delete pointer to array of combinations of derivatives and transform
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      delete [] combinations[row];
-      delete [] transform[row];
-    }
-    
-    delete [] combinations;
-    delete [] transform;
-  }
-
-  /// Evaluate order n derivatives of all basis functions at given point in cell
-  virtual void evaluate_basis_derivatives_all(unsigned int n,
-                                              double* values,
-                                              const double* coordinates,
-                                              const ufc::cell& c) const
-  {
-    throw std::runtime_error("The vectorised version of evaluate_basis_derivatives() is not yet implemented.");
-  }
-
-  /// Evaluate linear functional for dof i on the function f
-  virtual double evaluate_dof(unsigned int i,
-                              const ufc::function& f,
-                              const ufc::cell& c) const
-  {
-    // The reference points, direction and weights:
-    const static double X[3][1][2] = {{{0, 0}}, {{1, 0}}, {{0, 1}}};
-    const static double W[3][1] = {{1}, {1}, {1}};
-    const static double D[3][1][1] = {{{1}}, {{1}}, {{1}}};
-    
-    const double * const * x = c.coordinates;
-    double result = 0.0;
-    // Iterate over the points:
-    // Evaluate basis functions for affine mapping
-    const double w0 = 1.0 - X[i][0][0] - X[i][0][1];
-    const double w1 = X[i][0][0];
-    const double w2 = X[i][0][1];
-    
-    // Compute affine mapping y = F(X)
-    double y[2];
-    y[0] = w0*x[0][0] + w1*x[1][0] + w2*x[2][0];
-    y[1] = w0*x[0][1] + w1*x[1][1] + w2*x[2][1];
-    
-    // Evaluate function at physical points
-    double values[1];
-    f.evaluate(values, y, c);
-    
-    // Map function values using appropriate mapping
-    // Affine map: Do nothing
-    
-    // Note that we do not map the weights (yet).
-    
-    // Take directional components
-    for(int k = 0; k < 1; k++)
-      result += values[k]*D[i][0][k];
-    // Multiply by weights 
-    result *= W[i][0];
-    
-    return result;
-  }
-
-  /// Evaluate linear functionals for all dofs on the function f
-  virtual void evaluate_dofs(double* values,
-                             const ufc::function& f,
-                             const ufc::cell& c) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Interpolate vertex values from dof values
-  virtual void interpolate_vertex_values(double* vertex_values,
-                                         const double* dof_values,
-                                         const ufc::cell& c) const
-  {
-    // Evaluate at vertices and use affine mapping
-    vertex_values[0] = dof_values[0];
-    vertex_values[1] = dof_values[1];
-    vertex_values[2] = dof_values[2];
-  }
-
-  /// Return the number of sub elements (for a mixed element)
-  virtual unsigned int num_sub_elements() const
-  {
-    return 1;
-  }
-
-  /// Create a new finite element for sub element i (for a mixed element)
-  virtual ufc::finite_element* create_sub_element(unsigned int i) const
-  {
-    return new PoissonBilinearForm_finite_element_0();
-  }
-
-};
-
-/// This class defines the interface for a finite element.
-
-class PoissonBilinearForm_finite_element_1: public ufc::finite_element
-{
-public:
-
-  /// Constructor
-  PoissonBilinearForm_finite_element_1() : ufc::finite_element()
-  {
-    // Do nothing
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_finite_element_1()
-  {
-    // Do nothing
-  }
-
-  /// Return a string identifying the finite element
-  virtual const char* signature() const
-  {
-    return "FiniteElement('Lagrange', 'triangle', 1)";
-  }
-
-  /// Return the cell shape
-  virtual ufc::shape cell_shape() const
-  {
-    return ufc::triangle;
-  }
-
-  /// Return the dimension of the finite element function space
-  virtual unsigned int space_dimension() const
-  {
-    return 3;
-  }
-
-  /// Return the rank of the value space
-  virtual unsigned int value_rank() const
-  {
-    return 0;
-  }
-
-  /// Return the dimension of the value space for axis i
-  virtual unsigned int value_dimension(unsigned int i) const
-  {
-    return 1;
-  }
-
-  /// Evaluate basis function i at given point in cell
-  virtual void evaluate_basis(unsigned int i,
-                              double* values,
-                              const double* coordinates,
-                              const ufc::cell& c) const
-  {
-    // Extract vertex coordinates
-    const double * const * element_coordinates = c.coordinates;
-    
-    // Compute Jacobian of affine map from reference cell
-    const double J_00 = element_coordinates[1][0] - element_coordinates[0][0];
-    const double J_01 = element_coordinates[2][0] - element_coordinates[0][0];
-    const double J_10 = element_coordinates[1][1] - element_coordinates[0][1];
-    const double J_11 = element_coordinates[2][1] - element_coordinates[0][1];
-      
-    // Compute determinant of Jacobian
-    const double detJ = J_00*J_11 - J_01*J_10;
-    
-    // Compute inverse of Jacobian
-    
-    // Get coordinates and map to the reference (UFC) element
-    double x = (element_coordinates[0][1]*element_coordinates[2][0] -\
-                element_coordinates[0][0]*element_coordinates[2][1] +\
-                J_11*coordinates[0] - J_01*coordinates[1]) / detJ;
-    double y = (element_coordinates[1][1]*element_coordinates[0][0] -\
-                element_coordinates[1][0]*element_coordinates[0][1] -\
-                J_10*coordinates[0] + J_00*coordinates[1]) / detJ;
-    
-    // Map coordinates to the reference square
-    if (std::abs(y - 1.0) < 1e-14)
-      x = -1.0;
-    else
-      x = 2.0 *x/(1.0 - y) - 1.0;
-    y = 2.0*y - 1.0;
-    
-    // Reset values
-    *values = 0;
-    
-    // Map degree of freedom to element degree of freedom
-    const unsigned int dof = i;
-    
-    // Generate scalings
-    const double scalings_y_0 = 1;
-    const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y);
-    
-    // Compute psitilde_a
-    const double psitilde_a_0 = 1;
-    const double psitilde_a_1 = x;
-    
-    // Compute psitilde_bs
-    const double psitilde_bs_0_0 = 1;
-    const double psitilde_bs_0_1 = 1.5*y + 0.5;
-    const double psitilde_bs_1_0 = 1;
-    
-    // Compute basisvalues
-    const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0;
-    const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0;
-    const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1;
-    
-    // Table(s) of coefficients
-    const static double coefficients0[3][3] = \
-    {{0.471404520791032, -0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0, 0.333333333333333}};
-    
-    // Extract relevant coefficients
-    const double coeff0_0 = coefficients0[dof][0];
-    const double coeff0_1 = coefficients0[dof][1];
-    const double coeff0_2 = coefficients0[dof][2];
-    
-    // Compute value(s)
-    *values = coeff0_0*basisvalue0 + coeff0_1*basisvalue1 + coeff0_2*basisvalue2;
-  }
-
-  /// Evaluate all basis functions at given point in cell
-  virtual void evaluate_basis_all(double* values,
-                                  const double* coordinates,
-                                  const ufc::cell& c) const
-  {
-    throw std::runtime_error("The vectorised version of evaluate_basis() is not yet implemented.");
-  }
-
-  /// Evaluate order n derivatives of basis function i at given point in cell
-  virtual void evaluate_basis_derivatives(unsigned int i,
-                                          unsigned int n,
-                                          double* values,
-                                          const double* coordinates,
-                                          const ufc::cell& c) const
-  {
-    // Extract vertex coordinates
-    const double * const * element_coordinates = c.coordinates;
-    
-    // Compute Jacobian of affine map from reference cell
-    const double J_00 = element_coordinates[1][0] - element_coordinates[0][0];
-    const double J_01 = element_coordinates[2][0] - element_coordinates[0][0];
-    const double J_10 = element_coordinates[1][1] - element_coordinates[0][1];
-    const double J_11 = element_coordinates[2][1] - element_coordinates[0][1];
-      
-    // Compute determinant of Jacobian
-    const double detJ = J_00*J_11 - J_01*J_10;
-    
-    // Compute inverse of Jacobian
-    
-    // Get coordinates and map to the reference (UFC) element
-    double x = (element_coordinates[0][1]*element_coordinates[2][0] -\
-                element_coordinates[0][0]*element_coordinates[2][1] +\
-                J_11*coordinates[0] - J_01*coordinates[1]) / detJ;
-    double y = (element_coordinates[1][1]*element_coordinates[0][0] -\
-                element_coordinates[1][0]*element_coordinates[0][1] -\
-                J_10*coordinates[0] + J_00*coordinates[1]) / detJ;
-    
-    // Map coordinates to the reference square
-    if (std::abs(y - 1.0) < 1e-14)
-      x = -1.0;
-    else
-      x = 2.0 *x/(1.0 - y) - 1.0;
-    y = 2.0*y - 1.0;
-    
-    // Compute number of derivatives
-    unsigned int num_derivatives = 1;
-    
-    for (unsigned int j = 0; j < n; j++)
-      num_derivatives *= 2;
-    
-    
-    // Declare pointer to two dimensional array that holds combinations of derivatives and initialise
-    unsigned int **combinations = new unsigned int *[num_derivatives];
-        
-    for (unsigned int j = 0; j < num_derivatives; j++)
-    {
-      combinations[j] = new unsigned int [n];
-      for (unsigned int k = 0; k < n; k++)
-        combinations[j][k] = 0;
-    }
-        
-    // Generate combinations of derivatives
-    for (unsigned int row = 1; row < num_derivatives; row++)
-    {
-      for (unsigned int num = 0; num < row; num++)
-      {
-        for (unsigned int col = n-1; col+1 > 0; col--)
-        {
-          if (combinations[row][col] + 1 > 1)
-            combinations[row][col] = 0;
-          else
-          {
-            combinations[row][col] += 1;
-            break;
-          }
-        }
-      }
-    }
-    
-    // Compute inverse of Jacobian
-    const double Jinv[2][2] =  {{J_11 / detJ, -J_01 / detJ}, {-J_10 / detJ, J_00 / detJ}};
-    
-    // Declare transformation matrix
-    // Declare pointer to two dimensional array and initialise
-    double **transform = new double *[num_derivatives];
-        
-    for (unsigned int j = 0; j < num_derivatives; j++)
-    {
-      transform[j] = new double [num_derivatives];
-      for (unsigned int k = 0; k < num_derivatives; k++)
-        transform[j][k] = 1;
-    }
-    
-    // Construct transformation matrix
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      for (unsigned int col = 0; col < num_derivatives; col++)
-      {
-        for (unsigned int k = 0; k < n; k++)
-          transform[row][col] *= Jinv[combinations[col][k]][combinations[row][k]];
-      }
-    }
-    
-    // Reset values
-    for (unsigned int j = 0; j < 1*num_derivatives; j++)
-      values[j] = 0;
-    
-    // Map degree of freedom to element degree of freedom
-    const unsigned int dof = i;
-    
-    // Generate scalings
-    const double scalings_y_0 = 1;
-    const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y);
-    
-    // Compute psitilde_a
-    const double psitilde_a_0 = 1;
-    const double psitilde_a_1 = x;
-    
-    // Compute psitilde_bs
-    const double psitilde_bs_0_0 = 1;
-    const double psitilde_bs_0_1 = 1.5*y + 0.5;
-    const double psitilde_bs_1_0 = 1;
-    
-    // Compute basisvalues
-    const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0;
-    const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0;
-    const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1;
-    
-    // Table(s) of coefficients
-    const static double coefficients0[3][3] = \
-    {{0.471404520791032, -0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0.288675134594813, -0.166666666666667},
-    {0.471404520791032, 0, 0.333333333333333}};
-    
-    // Interesting (new) part
-    // Tables of derivatives of the polynomial base (transpose)
-    const static double dmats0[3][3] = \
-    {{0, 0, 0},
-    {4.89897948556636, 0, 0},
-    {0, 0, 0}};
-    
-    const static double dmats1[3][3] = \
-    {{0, 0, 0},
-    {2.44948974278318, 0, 0},
-    {4.24264068711928, 0, 0}};
-    
-    // Compute reference derivatives
-    // Declare pointer to array of derivatives on FIAT element
-    double *derivatives = new double [num_derivatives];
-    
-    // Declare coefficients
-    double coeff0_0 = 0;
-    double coeff0_1 = 0;
-    double coeff0_2 = 0;
-    
-    // Declare new coefficients
-    double new_coeff0_0 = 0;
-    double new_coeff0_1 = 0;
-    double new_coeff0_2 = 0;
-    
-    // Loop possible derivatives
-    for (unsigned int deriv_num = 0; deriv_num < num_derivatives; deriv_num++)
-    {
-      // Get values from coefficients array
-      new_coeff0_0 = coefficients0[dof][0];
-      new_coeff0_1 = coefficients0[dof][1];
-      new_coeff0_2 = coefficients0[dof][2];
-    
-      // Loop derivative order
-      for (unsigned int j = 0; j < n; j++)
-      {
-        // Update old coefficients
-        coeff0_0 = new_coeff0_0;
-        coeff0_1 = new_coeff0_1;
-        coeff0_2 = new_coeff0_2;
-    
-        if(combinations[deriv_num][j] == 0)
-        {
-          new_coeff0_0 = coeff0_0*dmats0[0][0] + coeff0_1*dmats0[1][0] + coeff0_2*dmats0[2][0];
-          new_coeff0_1 = coeff0_0*dmats0[0][1] + coeff0_1*dmats0[1][1] + coeff0_2*dmats0[2][1];
-          new_coeff0_2 = coeff0_0*dmats0[0][2] + coeff0_1*dmats0[1][2] + coeff0_2*dmats0[2][2];
-        }
-        if(combinations[deriv_num][j] == 1)
-        {
-          new_coeff0_0 = coeff0_0*dmats1[0][0] + coeff0_1*dmats1[1][0] + coeff0_2*dmats1[2][0];
-          new_coeff0_1 = coeff0_0*dmats1[0][1] + coeff0_1*dmats1[1][1] + coeff0_2*dmats1[2][1];
-          new_coeff0_2 = coeff0_0*dmats1[0][2] + coeff0_1*dmats1[1][2] + coeff0_2*dmats1[2][2];
-        }
-    
-      }
-      // Compute derivatives on reference element as dot product of coefficients and basisvalues
-      derivatives[deriv_num] = new_coeff0_0*basisvalue0 + new_coeff0_1*basisvalue1 + new_coeff0_2*basisvalue2;
-    }
-    
-    // Transform derivatives back to physical element
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      for (unsigned int col = 0; col < num_derivatives; col++)
-      {
-        values[row] += transform[row][col]*derivatives[col];
-      }
-    }
-    // Delete pointer to array of derivatives on FIAT element
-    delete [] derivatives;
-    
-    // Delete pointer to array of combinations of derivatives and transform
-    for (unsigned int row = 0; row < num_derivatives; row++)
-    {
-      delete [] combinations[row];
-      delete [] transform[row];
-    }
-    
-    delete [] combinations;
-    delete [] transform;
-  }
-
-  /// Evaluate order n derivatives of all basis functions at given point in cell
-  virtual void evaluate_basis_derivatives_all(unsigned int n,
-                                              double* values,
-                                              const double* coordinates,
-                                              const ufc::cell& c) const
-  {
-    throw std::runtime_error("The vectorised version of evaluate_basis_derivatives() is not yet implemented.");
-  }
-
-  /// Evaluate linear functional for dof i on the function f
-  virtual double evaluate_dof(unsigned int i,
-                              const ufc::function& f,
-                              const ufc::cell& c) const
-  {
-    // The reference points, direction and weights:
-    const static double X[3][1][2] = {{{0, 0}}, {{1, 0}}, {{0, 1}}};
-    const static double W[3][1] = {{1}, {1}, {1}};
-    const static double D[3][1][1] = {{{1}}, {{1}}, {{1}}};
-    
-    const double * const * x = c.coordinates;
-    double result = 0.0;
-    // Iterate over the points:
-    // Evaluate basis functions for affine mapping
-    const double w0 = 1.0 - X[i][0][0] - X[i][0][1];
-    const double w1 = X[i][0][0];
-    const double w2 = X[i][0][1];
-    
-    // Compute affine mapping y = F(X)
-    double y[2];
-    y[0] = w0*x[0][0] + w1*x[1][0] + w2*x[2][0];
-    y[1] = w0*x[0][1] + w1*x[1][1] + w2*x[2][1];
-    
-    // Evaluate function at physical points
-    double values[1];
-    f.evaluate(values, y, c);
-    
-    // Map function values using appropriate mapping
-    // Affine map: Do nothing
-    
-    // Note that we do not map the weights (yet).
-    
-    // Take directional components
-    for(int k = 0; k < 1; k++)
-      result += values[k]*D[i][0][k];
-    // Multiply by weights 
-    result *= W[i][0];
-    
-    return result;
-  }
-
-  /// Evaluate linear functionals for all dofs on the function f
-  virtual void evaluate_dofs(double* values,
-                             const ufc::function& f,
-                             const ufc::cell& c) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Interpolate vertex values from dof values
-  virtual void interpolate_vertex_values(double* vertex_values,
-                                         const double* dof_values,
-                                         const ufc::cell& c) const
-  {
-    // Evaluate at vertices and use affine mapping
-    vertex_values[0] = dof_values[0];
-    vertex_values[1] = dof_values[1];
-    vertex_values[2] = dof_values[2];
-  }
-
-  /// Return the number of sub elements (for a mixed element)
-  virtual unsigned int num_sub_elements() const
-  {
-    return 1;
-  }
-
-  /// Create a new finite element for sub element i (for a mixed element)
-  virtual ufc::finite_element* create_sub_element(unsigned int i) const
-  {
-    return new PoissonBilinearForm_finite_element_1();
-  }
-
-};
-
-/// This class defines the interface for a local-to-global mapping of
-/// degrees of freedom (dofs).
-
-class PoissonBilinearForm_dof_map_0: public ufc::dof_map
-{
-private:
-
-  unsigned int __global_dimension;
-
-public:
-
-  /// Constructor
-  PoissonBilinearForm_dof_map_0() : ufc::dof_map()
-  {
-    __global_dimension = 0;
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_dof_map_0()
-  {
-    // Do nothing
-  }
-
-  /// Return a string identifying the dof map
-  virtual const char* signature() const
-  {
-    return "FFC dof map for FiniteElement('Lagrange', 'triangle', 1)";
-  }
-
-  /// Return true iff mesh entities of topological dimension d are needed
-  virtual bool needs_mesh_entities(unsigned int d) const
-  {
-    switch (d)
-    {
-    case 0:
-      return true;
-      break;
-    case 1:
-      return false;
-      break;
-    case 2:
-      return false;
-      break;
-    }
-    return false;
-  }
-
-  /// Initialize dof map for mesh (return true iff init_cell() is needed)
-  virtual bool init_mesh(const ufc::mesh& m)
-  {
-    __global_dimension = m.num_entities[0];
-    return false;
-  }
-
-  /// Initialize dof map for given cell
-  virtual void init_cell(const ufc::mesh& m,
-                         const ufc::cell& c)
-  {
-    // Do nothing
-  }
-
-  /// Finish initialization of dof map for cells
-  virtual void init_cell_finalize()
-  {
-    // Do nothing
-  }
-
-  /// Return the dimension of the global finite element function space
-  virtual unsigned int global_dimension() const
-  {
-    return __global_dimension;
-  }
-
-  /// Return the dimension of the local finite element function space
-  virtual unsigned int local_dimension() const
-  {
-    return 3;
-  }
-
-  // Return the geometric dimension of the coordinates this dof map provides
-  virtual unsigned int geometric_dimension() const
-  {
-    return 2;
-  }
-
-  /// Return the number of dofs on each cell facet
-  virtual unsigned int num_facet_dofs() const
-  {
-    return 2;
-  }
-
-  /// Return the number of dofs associated with each cell entity of dimension d
-  virtual unsigned int num_entity_dofs(unsigned int d) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Tabulate the local-to-global mapping of dofs on a cell
-  virtual void tabulate_dofs(unsigned int* dofs,
-                             const ufc::mesh& m,
-                             const ufc::cell& c) const
-  {
-    dofs[0] = c.entity_indices[0][0];
-    dofs[1] = c.entity_indices[0][1];
-    dofs[2] = c.entity_indices[0][2];
-  }
-
-  /// Tabulate the local-to-local mapping from facet dofs to cell dofs
-  virtual void tabulate_facet_dofs(unsigned int* dofs,
-                                   unsigned int facet) const
-  {
-    switch (facet)
-    {
-    case 0:
-      dofs[0] = 1;
-      dofs[1] = 2;
-      break;
-    case 1:
-      dofs[0] = 0;
-      dofs[1] = 2;
-      break;
-    case 2:
-      dofs[0] = 0;
-      dofs[1] = 1;
-      break;
-    }
-  }
-
-  /// Tabulate the local-to-local mapping of dofs on entity (d, i)
-  virtual void tabulate_entity_dofs(unsigned int* dofs,
-                                    unsigned int d, unsigned int i) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Tabulate the coordinates of all dofs on a cell
-  virtual void tabulate_coordinates(double** coordinates,
-                                    const ufc::cell& c) const
-  {
-    const double * const * x = c.coordinates;
-    coordinates[0][0] = x[0][0];
-    coordinates[0][1] = x[0][1];
-    coordinates[1][0] = x[1][0];
-    coordinates[1][1] = x[1][1];
-    coordinates[2][0] = x[2][0];
-    coordinates[2][1] = x[2][1];
-  }
-
-  /// Return the number of sub dof maps (for a mixed element)
-  virtual unsigned int num_sub_dof_maps() const
-  {
-    return 1;
-  }
-
-  /// Create a new dof_map for sub dof map i (for a mixed element)
-  virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const
-  {
-    return new PoissonBilinearForm_dof_map_0();
-  }
-
-};
-
-/// This class defines the interface for a local-to-global mapping of
-/// degrees of freedom (dofs).
-
-class PoissonBilinearForm_dof_map_1: public ufc::dof_map
-{
-private:
-
-  unsigned int __global_dimension;
-
-public:
-
-  /// Constructor
-  PoissonBilinearForm_dof_map_1() : ufc::dof_map()
-  {
-    __global_dimension = 0;
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_dof_map_1()
-  {
-    // Do nothing
-  }
-
-  /// Return a string identifying the dof map
-  virtual const char* signature() const
-  {
-    return "FFC dof map for FiniteElement('Lagrange', 'triangle', 1)";
-  }
-
-  /// Return true iff mesh entities of topological dimension d are needed
-  virtual bool needs_mesh_entities(unsigned int d) const
-  {
-    switch (d)
-    {
-    case 0:
-      return true;
-      break;
-    case 1:
-      return false;
-      break;
-    case 2:
-      return false;
-      break;
-    }
-    return false;
-  }
-
-  /// Initialize dof map for mesh (return true iff init_cell() is needed)
-  virtual bool init_mesh(const ufc::mesh& m)
-  {
-    __global_dimension = m.num_entities[0];
-    return false;
-  }
-
-  /// Initialize dof map for given cell
-  virtual void init_cell(const ufc::mesh& m,
-                         const ufc::cell& c)
-  {
-    // Do nothing
-  }
-
-  /// Finish initialization of dof map for cells
-  virtual void init_cell_finalize()
-  {
-    // Do nothing
-  }
-
-  /// Return the dimension of the global finite element function space
-  virtual unsigned int global_dimension() const
-  {
-    return __global_dimension;
-  }
-
-  /// Return the dimension of the local finite element function space
-  virtual unsigned int local_dimension() const
-  {
-    return 3;
-  }
-
-  // Return the geometric dimension of the coordinates this dof map provides
-  virtual unsigned int geometric_dimension() const
-  {
-    return 2;
-  }
-
-  /// Return the number of dofs on each cell facet
-  virtual unsigned int num_facet_dofs() const
-  {
-    return 2;
-  }
-
-  /// Return the number of dofs associated with each cell entity of dimension d
-  virtual unsigned int num_entity_dofs(unsigned int d) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Tabulate the local-to-global mapping of dofs on a cell
-  virtual void tabulate_dofs(unsigned int* dofs,
-                             const ufc::mesh& m,
-                             const ufc::cell& c) const
-  {
-    dofs[0] = c.entity_indices[0][0];
-    dofs[1] = c.entity_indices[0][1];
-    dofs[2] = c.entity_indices[0][2];
-  }
-
-  /// Tabulate the local-to-local mapping from facet dofs to cell dofs
-  virtual void tabulate_facet_dofs(unsigned int* dofs,
-                                   unsigned int facet) const
-  {
-    switch (facet)
-    {
-    case 0:
-      dofs[0] = 1;
-      dofs[1] = 2;
-      break;
-    case 1:
-      dofs[0] = 0;
-      dofs[1] = 2;
-      break;
-    case 2:
-      dofs[0] = 0;
-      dofs[1] = 1;
-      break;
-    }
-  }
-
-  /// Tabulate the local-to-local mapping of dofs on entity (d, i)
-  virtual void tabulate_entity_dofs(unsigned int* dofs,
-                                    unsigned int d, unsigned int i) const
-  {
-    throw std::runtime_error("Not implemented (introduced in UFC v1.1).");
-  }
-
-  /// Tabulate the coordinates of all dofs on a cell
-  virtual void tabulate_coordinates(double** coordinates,
-                                    const ufc::cell& c) const
-  {
-    const double * const * x = c.coordinates;
-    coordinates[0][0] = x[0][0];
-    coordinates[0][1] = x[0][1];
-    coordinates[1][0] = x[1][0];
-    coordinates[1][1] = x[1][1];
-    coordinates[2][0] = x[2][0];
-    coordinates[2][1] = x[2][1];
-  }
-
-  /// Return the number of sub dof maps (for a mixed element)
-  virtual unsigned int num_sub_dof_maps() const
-  {
-    return 1;
-  }
-
-  /// Create a new dof_map for sub dof map i (for a mixed element)
-  virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const
-  {
-    return new PoissonBilinearForm_dof_map_1();
-  }
-
-};
-
-/// This class defines the interface for the tabulation of the cell
-/// tensor corresponding to the local contribution to a form from
-/// the integral over a cell.
-
-class PoissonBilinearForm_cell_integral_0_quadrature: public ufc::cell_integral
-{
-public:
-
-  /// Constructor
-  PoissonBilinearForm_cell_integral_0_quadrature() : ufc::cell_integral()
-  {
-    // Do nothing
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_cell_integral_0_quadrature()
-  {
-    // Do nothing
-  }
-
-  /// Tabulate the tensor for the contribution from a local cell
-  virtual void tabulate_tensor(double* A,
-                               const double * const * w,
-                               const ufc::cell& c) const
-  {
-    // Extract vertex coordinates
-    const double * const * x = c.coordinates;
-    
-    // Compute Jacobian of affine map from reference cell
-    const double J_00 = x[1][0] - x[0][0];
-    const double J_01 = x[2][0] - x[0][0];
-    const double J_10 = x[1][1] - x[0][1];
-    const double J_11 = x[2][1] - x[0][1];
-      
-    // Compute determinant of Jacobian
-    double detJ = J_00*J_11 - J_01*J_10;
-      
-    // Compute inverse of Jacobian
-    const double Jinv_00 =  J_11 / detJ;
-    const double Jinv_01 = -J_01 / detJ;
-    const double Jinv_10 = -J_10 / detJ;
-    const double Jinv_11 =  J_00 / detJ;
-    
-    // Set scale factor
-    const double det = std::abs(detJ);
-    
-    
-    // Array of quadrature weights
-    const static double W1 = 0.5;
-    
-    
-    const static double FE0_D10[1][3] = \
-    {{-1, 1, 0}};
-    
-    const static double FE0_D01[1][3] = \
-    {{-1, 0, 1}};
-    
-    // Compute element tensor using UFL quadrature representation
-    // Optimisations: ('simplify expressions', False), ('ignore zero tables', False), ('non zero columns', False), ('remove zero terms', False), ('ignore ones', False)
-    // Total number of operations to compute element tensor: 162
-    
-    // Loop quadrature points for integral
-    // Number of operations to compute element tensor for following IP loop = 162
-    // Only 1 integration point, omitting IP loop.
-    
-    // Number of operations for primary indices = 162
-    for (unsigned int j = 0; j < 3; j++)
-    {
-      for (unsigned int k = 0; k < 3; k++)
-      {
-        // Number of operations to compute entry = 18
-        A[j*3 + k] += ((Jinv_00*FE0_D10[0][j] + Jinv_10*FE0_D01[0][j])*(Jinv_00*FE0_D10[0][k] + Jinv_10*FE0_D01[0][k]) + (Jinv_01*FE0_D10[0][j] + Jinv_11*FE0_D01[0][j])*(Jinv_01*FE0_D10[0][k] + Jinv_11*FE0_D01[0][k]))*W1*det;
-      }// end loop over 'k'
-    }// end loop over 'j'
-  }
-
-};
-
-/// This class defines the interface for the tabulation of the cell
-/// tensor corresponding to the local contribution to a form from
-/// the integral over a cell.
-
-class PoissonBilinearForm_cell_integral_0: public ufc::cell_integral
-{
-private:
-
-  PoissonBilinearForm_cell_integral_0_quadrature integral_0_quadrature;
-
-public:
-
-  /// Constructor
-  PoissonBilinearForm_cell_integral_0() : ufc::cell_integral()
-  {
-    // Do nothing
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm_cell_integral_0()
-  {
-    // Do nothing
-  }
-
-  /// Tabulate the tensor for the contribution from a local cell
-  virtual void tabulate_tensor(double* A,
-                               const double * const * w,
-                               const ufc::cell& c) const
-  {
-    // Reset values of the element tensor block
-    A[0] = 0;
-    A[1] = 0;
-    A[2] = 0;
-    A[3] = 0;
-    A[4] = 0;
-    A[5] = 0;
-    A[6] = 0;
-    A[7] = 0;
-    A[8] = 0;
-    
-    // Add all contributions to element tensor
-    integral_0_quadrature.tabulate_tensor(A, w, c);
-  }
-
-};
-
-/// This class defines the interface for the assembly of the global
-/// tensor corresponding to a form with r + n arguments, that is, a
-/// mapping
-///
-///     a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R
-///
-/// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r
-/// global tensor A is defined by
-///
-///     A = a(V1, V2, ..., Vr, w1, w2, ..., wn),
-///
-/// where each argument Vj represents the application to the
-/// sequence of basis functions of Vj and w1, w2, ..., wn are given
-/// fixed functions (coefficients).
-
-class PoissonBilinearForm: public ufc::form
-{
-public:
-
-  /// Constructor
-  PoissonBilinearForm() : ufc::form()
-  {
-    // Do nothing
-  }
-
-  /// Destructor
-  virtual ~PoissonBilinearForm()
-  {
-    // Do nothing
-  }
-
-  /// Return a string identifying the form
-  virtual const char* signature() const
-  {
-    return "Form([Integral(IndexSum(Product(Indexed(ComponentTensor(SpatialDerivative(BasisFunction(FiniteElement('Lagrange', Cell('triangle', 1), 1), 0), MultiIndex((Index(0),), {Index(0): 2})), MultiIndex((Index(0),), {Index(0): 2})), MultiIndex((Index(1),), {Index(1): 2})), Indexed(ComponentTensor(SpatialDerivative(BasisFunction(FiniteElement('Lagrange', Cell('triangle', 1), 1), 1), MultiIndex((Index(2),), {Index(2): 2})), MultiIndex((Index(2),), {Index(2): 2})), MultiIndex((Index(1), [...]
-  }
-
-  /// Return the rank of the global tensor (r)
-  virtual unsigned int rank() const
-  {
-    return 2;
-  }
-
-  /// Return the number of coefficients (n)
-  virtual unsigned int num_coefficients() const
-  {
-    return 0;
-  }
-
-  /// Return the number of cell integrals
-  virtual unsigned int num_cell_integrals() const
-  {
-    return 1;
-  }
-  
-  /// Return the number of exterior facet integrals
-  virtual unsigned int num_exterior_facet_integrals() const
-  {
-    return 0;
-  }
-  
-  /// Return the number of interior facet integrals
-  virtual unsigned int num_interior_facet_integrals() const
-  {
-    return 0;
-  }
-    
-  /// Create a new finite element for argument function i
-  virtual ufc::finite_element* create_finite_element(unsigned int i) const
-  {
-    switch (i)
-    {
-    case 0:
-      return new PoissonBilinearForm_finite_element_0();
-      break;
-    case 1:
-      return new PoissonBilinearForm_finite_element_1();
-      break;
-    }
-    return 0;
-  }
-  
-  /// Create a new dof map for argument function i
-  virtual ufc::dof_map* create_dof_map(unsigned int i) const
-  {
-    switch (i)
-    {
-    case 0:
-      return new PoissonBilinearForm_dof_map_0();
-      break;
-    case 1:
-      return new PoissonBilinearForm_dof_map_1();
-      break;
-    }
-    return 0;
-  }
-
-  /// Create a new cell integral on sub domain i
-  virtual ufc::cell_integral* create_cell_integral(unsigned int i) const
-  {
-    return new PoissonBilinearForm_cell_integral_0();
-  }
-
-  /// Create a new exterior facet integral on sub domain i
-  virtual ufc::exterior_facet_integral* create_exterior_facet_integral(unsigned int i) const
-  {
-    return 0;
-  }
-
-  /// Create a new interior facet integral on sub domain i
-  virtual ufc::interior_facet_integral* create_interior_facet_integral(unsigned int i) const
-  {
-    return 0;
-  }
-
-};
-
-#endif
diff --git a/ufc-merge-into-ffc/doc/manual/code/poisson_syfi.h b/ufc-merge-into-ffc/doc/manual/code/poisson_syfi.h
deleted file mode 100644
index a25ab8e..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/poisson_syfi.h
+++ /dev/null
@@ -1 +0,0 @@
-// Add example here
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp
deleted file mode 100644
index ecf2aae..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-#include "dof_map_Lagrange_1_2D.h"
-
-
-namespace pycc
-{
-
-/// Constructor
-dof_map_Lagrange_1_2D::dof_map_Lagrange_1_2D() : ufc::dof_map()
-{
-  loc2glob = 0;
-}
-
-/// Destructor
-dof_map_Lagrange_1_2D::~dof_map_Lagrange_1_2D()
-{
-  if(loc2glob) delete [] loc2glob;
-}
-
-/// Return a string identifying the dof map
-const char* dof_map_Lagrange_1_2D::signature() const
-{
-  return "dof_map_Lagrange_1_2D // generated by SyFi";
-}
-
-/// Return true iff mesh entities of topological dimension d are needed
-bool dof_map_Lagrange_1_2D::needs_mesh_entities(unsigned int d) const
-{
-  switch(d)
-  {
-  case 0: return true; // vertices
-  case 1: return true; // edges
-  case 2: return true; // faces
-  case 3: return false; // volumes
-  }
-  return false; // strange unsupported case or error
-}
-
-/// Initialize dof map for mesh (return true iff init_cell() is needed)
-bool dof_map_Lagrange_1_2D::init_mesh(const ufc::mesh& m)
-{
-  int top_dim = 2;
-  num_elements = m.num_entities[top_dim];
-  return true;
-}
-
-/// Initialize dof map for given cell
-void dof_map_Lagrange_1_2D::init_cell(const ufc::mesh& m,
-                              const ufc::cell& c)
-{
-  // coordinates
-  double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1];
-  double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1];
-  double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1];
-  
-  // affine map
-  double G00 = x1 - x0;
-  double G01 = x2 - x0;
-  
-  double G10 = y1 - y0;
-  double G11 = y2 - y0;
-  
-  int element = c.entity_indices[2][0];
-  
-  double dof0[2] = { x0, y0 };
-  Ptv pdof0(2, dof0);
-  dof.insert_dof(element, 0, pdof0);
-  
-  double dof1[2] = { G00+x0, y0+G10 };
-  Ptv pdof1(2, dof1);
-  dof.insert_dof(element, 1, pdof1);
-  
-  double dof2[2] = { x0+G01, G11+y0 };
-  Ptv pdof2(2, dof2);
-  dof.insert_dof(element, 2, pdof2);
-}
-
-/// Finish initialization of dof map for cells
-void dof_map_Lagrange_1_2D::init_cell_finalize()
-{
-  loc2glob = new unsigned int[num_elements * local_dimension()];
-  dof.build_loc2dof(num_elements, local_dimension(), reinterpret_cast<int*>(loc2glob));
-}
-
-/// Return the dimension of the global finite element function space
-unsigned int dof_map_Lagrange_1_2D::global_dimension() const
-{
-  return dof.size();
-}
-
-/// Return the dimension of the local finite element function space
-unsigned int dof_map_Lagrange_1_2D::local_dimension() const
-{
-  return 3;
-}
-
-/// Return the number of dofs on each cell facet
-unsigned int dof_map_Lagrange_1_2D::num_facet_dofs() const
-{
-  return 2;
-}
-
-/// Tabulate the local-to-global mapping of dofs on a cell
-void dof_map_Lagrange_1_2D::tabulate_dofs(unsigned int* dofs,
-                                  const ufc::mesh& m,
-                                  const ufc::cell& c) const
-{
-  const unsigned int *from_dofs = loc2glob + (3 * c.entity_indices[2][0]);
-  memcpy(dofs, from_dofs, sizeof(unsigned int)*3);
-}
-
-/// Tabulate the local-to-local mapping from facet dofs to cell dofs
-void dof_map_Lagrange_1_2D::tabulate_facet_dofs(unsigned int* dofs,
-                                        unsigned int facet) const
-{
-  switch(facet)
-  {
-  case 0:
-    dofs[0] = 1;
-    dofs[1] = 2;
-    break;
-  case 1:
-    dofs[0] = 0;
-    dofs[1] = 2;
-    break;
-  case 2:
-    dofs[0] = 0;
-    dofs[1] = 1;
-    break;
-  default:
-    throw std::runtime_error("Invalid facet number.");
-  }
-}
-
-/// Tabulate the coordinates of all dofs on a cell
-void dof_map_Lagrange_1_2D::tabulate_coordinates(double** coordinates,
-                                         const ufc::cell& c) const
-{
-  // coordinates
-  double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1];
-  double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1];
-  double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1];
-  
-  // affine map
-  double G00 = x1 - x0;
-  double G01 = x2 - x0;
-  
-  double G10 = y1 - y0;
-  double G11 = y2 - y0;
-  
-  
-  coordinates[0][0] = x0;
-  coordinates[0][1] = y0;
-  coordinates[1][0] = G00+x0;
-  coordinates[1][1] = y0+G10;
-  coordinates[2][0] = x0+G01;
-  coordinates[2][1] = G11+y0;
-}
-
-/// Return the number of sub dof maps (for a mixed element)
-unsigned int dof_map_Lagrange_1_2D::num_sub_dof_maps() const
-{
-  return 1;
-}
-
-/// Create a new dof_map for sub dof map i (for a mixed element)
-ufc::dof_map* dof_map_Lagrange_1_2D::create_sub_dof_map(unsigned int i) const
-{
-  return new dof_map_Lagrange_1_2D();
-}
-
-
-
-} // namespace
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.h b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.h
deleted file mode 100644
index bae3b58..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#ifndef __dof_map_Lagrange_1_2D_H
-#define __dof_map_Lagrange_1_2D_H
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-
-
-
-namespace pycc
-{
-
-/// This class defines the interface for a local-to-global mapping of
-/// degrees of freedom (dofs).
-
-class dof_map_Lagrange_1_2D: public ufc::dof_map
-{  public:
-    pycc::Dof_Ptv dof;
-    unsigned int num_elements;
-    unsigned int * loc2glob;
-
-public:
-
-  /// Constructor
-  dof_map_Lagrange_1_2D();
-
-  /// Destructor
-  virtual ~dof_map_Lagrange_1_2D();
-
-  /// Return a string identifying the dof map
-  virtual const char* signature() const;
-
-  /// Return true iff mesh entities of topological dimension d are needed
-  virtual bool needs_mesh_entities(unsigned int d) const;
-
-  /// Initialize dof map for mesh (return true iff init_cell() is needed)
-  virtual bool init_mesh(const ufc::mesh& m);
-
-  /// Initialize dof map for given cell
-  virtual void init_cell(const ufc::mesh& m,
-                         const ufc::cell& c);
-
-  /// Finish initialization of dof map for cells
-  virtual void init_cell_finalize();
-
-  /// Return the dimension of the global finite element function space
-  virtual unsigned int global_dimension() const;
-
-  /// Return the dimension of the local finite element function space
-  virtual unsigned int local_dimension() const;
-
-  /// Return the number of dofs on each cell facet
-  virtual unsigned int num_facet_dofs() const;
-
-  /// Tabulate the local-to-global mapping of dofs on a cell
-  virtual void tabulate_dofs(unsigned int* dofs,
-                             const ufc::mesh& m,
-                             const ufc::cell& c) const;
-
-  /// Tabulate the local-to-local mapping from facet dofs to cell dofs
-  virtual void tabulate_facet_dofs(unsigned int* dofs,
-                                   unsigned int facet) const;
-
-  /// Tabulate the coordinates of all dofs on a cell
-  virtual void tabulate_coordinates(double** coordinates,
-                                    const ufc::cell& c) const;
-
-  /// Return the number of sub dof maps (for a mixed element)
-  virtual unsigned int num_sub_dof_maps() const;
-
-  /// Create a new dof_map for sub dof map i (for a mixed element)
-  virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const;
-
-};
-
-
-} // namespace
-
-#endif
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.cpp b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.cpp
deleted file mode 100644
index fb7ace7..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-#include "fe_Lagrange_1_2D.h"
-
-
-namespace pycc
-{
-
-
-/// Constructor
-fe_Lagrange_1_2D::fe_Lagrange_1_2D() : ufc::finite_element()
-{
-  
-}
-
-/// Destructor
-fe_Lagrange_1_2D::~fe_Lagrange_1_2D()
-{
-  
-}
-
-/// Return a string identifying the finite element
-const char* fe_Lagrange_1_2D::signature() const
-{
-  return "fe_Lagrange_1_2D // generated by SyFi";
-}
-
-/// Return the cell shape
-ufc::shape fe_Lagrange_1_2D::cell_shape() const
-{
-  return ufc::triangle;
-}
-
-/// Return the dimension of the finite element function space
-unsigned int fe_Lagrange_1_2D::space_dimension() const
-{
-  return 3;
-}
-
-/// Return the rank of the value space
-unsigned int fe_Lagrange_1_2D::value_rank() const
-{
-  return 0;
-}
-
-/// Return the dimension of the value space for axis i
-unsigned int fe_Lagrange_1_2D::value_dimension(unsigned int i) const
-{
-  return 1;
-}
-
-/// Evaluate basis function i at given point in cell
-void fe_Lagrange_1_2D::evaluate_basis(unsigned int i,
-                                   double* values,
-                                   const double* coordinates,
-                                   const ufc::cell& c) const
-{
-  const double x = coordinates[0];
-  const double y = coordinates[1];
-  switch(i)
-  {
-  case 0:
-    values[0] = -x-y+1.0;
-    break;
-  case 1:
-    values[0] = x;
-    break;
-  case 2:
-    values[0] = y;
-    break;
-  }
-}
-
-/// Evaluate order n derivatives of basis function i at given point in cell
-void fe_Lagrange_1_2D::evaluate_basis_derivatives(unsigned int i,
-                                               unsigned int n,
-                                               double* values,
-                                               const double* coordinates,
-                                               const ufc::cell& c) const
-{
-    throw std::runtime_error("gen_evaluate_basis_derivatives not implemented yet.");
-}
-
-/// Evaluate linear functional for dof i on the function f
-double fe_Lagrange_1_2D::evaluate_dof(unsigned int i,
-                                   const ufc::function& f,
-                                   const ufc::cell& c) const
-{
-  // coordinates
-  double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1];
-  double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1];
-  double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1];
-  
-  // affine map
-  double G00 = x1 - x0;
-  double G01 = x2 - x0;
-  
-  double G10 = y1 - y0;
-  double G11 = y2 - y0;
-  
-  double v[1];
-  double x[2];
-  switch(i)
-    {
-  case 0:
-    x[0] = x0;
-    x[1] = y0;
-    break;
-  case 1:
-    x[0] = x0+G00;
-    x[1] = G10+y0;
-    break;
-  case 2:
-    x[0] = G01+x0;
-    x[1] = y0+G11;
-    break;
-  }
-  f.evaluate(v, x, c);
-  return v[i % 1];
-
-}
-
-/// Interpolate vertex values from dof values
-void fe_Lagrange_1_2D::interpolate_vertex_values(double* vertex_values,
-                                              const double* dof_values,
-                                              const ufc::cell& c) const
-{
-  vertex_values[0] = dof_values[0];
-  vertex_values[1] = dof_values[1];
-  vertex_values[2] = dof_values[2];
-}
-
-/// Return the number of sub elements (for a mixed element)
-unsigned int fe_Lagrange_1_2D::num_sub_elements() const
-{
-  return 1;
-}
-
-/// Create a new finite element for sub element i (for a mixed element)
-ufc::finite_element* fe_Lagrange_1_2D::create_sub_element(unsigned int i) const
-{
-  return new fe_Lagrange_1_2D();
-}
-
-
-} // namespace
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.h b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.h
deleted file mode 100644
index 5be5598..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#ifndef __fe_Lagrange_1_2D_H
-#define __fe_Lagrange_1_2D_H
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-
-
-
-namespace pycc
-{
-
-/// This class defines the interface for a finite element.
-
-class fe_Lagrange_1_2D: public ufc::finite_element
-{  
-public:
-
-  /// Constructor
-  fe_Lagrange_1_2D();
-
-  /// Destructor
-  virtual ~fe_Lagrange_1_2D();
-
-  /// Return a string identifying the finite element
-  virtual const char* signature() const;
-
-  /// Return the cell shape
-  virtual ufc::shape cell_shape() const;
-
-  /// Return the dimension of the finite element function space
-  virtual unsigned int space_dimension() const;
-
-  /// Return the rank of the value space
-  virtual unsigned int value_rank() const;
-
-  /// Return the dimension of the value space for axis i
-  virtual unsigned int value_dimension(unsigned int i) const;
-
-  /// Evaluate basis function i at given point in cell
-  virtual void evaluate_basis(unsigned int i,
-                              double* values,
-                              const double* coordinates,
-                              const ufc::cell& c) const;
-
-  /// Evaluate order n derivatives of basis function i at given point in cell
-  virtual void evaluate_basis_derivatives(unsigned int i,
-                                          unsigned int n,
-                                          double* values,
-                                          const double* coordinates,
-                                          const ufc::cell& c) const;
-
-  /// Evaluate linear functional for dof i on the function f
-  virtual double evaluate_dof(unsigned int i,
-                              const ufc::function& f,
-                              const ufc::cell& c) const;
-
-  /// Interpolate vertex values from dof values
-  virtual void interpolate_vertex_values(double* vertex_values,
-                                         const double* dof_values,
-                                         const ufc::cell& c) const;
-
-  /// Return the number of sub elements (for a mixed element)
-  virtual unsigned int num_sub_elements() const;
-
-  /// Create a new finite element for sub element i (for a mixed element)
-  virtual ufc::finite_element* create_sub_element(unsigned int i) const;
-
-};
-
-
-} // namespace
-
-#endif
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp
deleted file mode 100644
index 23aef86..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-#include "dof_map_Lagrange_1_2D.h"
-#include "fe_Lagrange_1_2D.h"
-#include "form__stiffness_form__Lagrange_1_2D.h"
-
-
-namespace pycc
-{
-
-/// This class defines the interface for the tabulation of the cell
-/// tensor corresponding to the local contribution to a form from
-/// the integral over a cell.
-
-class cell_itg__stiffness_form__Lagrange_1_2D: public ufc::cell_integral
-{
-public:
-
-  /// Constructor
-  cell_itg__stiffness_form__Lagrange_1_2D();
-
-  /// Destructor
-  virtual ~cell_itg__stiffness_form__Lagrange_1_2D();
-
-  /// Tabulate the tensor for the contribution from a local cell
-  virtual void tabulate_tensor(double* A,
-                               const double * const * w,
-                               const ufc::cell& c) const;
-
-};
-
-
-/// Constructor
-cell_itg__stiffness_form__Lagrange_1_2D::cell_itg__stiffness_form__Lagrange_1_2D() 
-: ufc::cell_integral()
-{
-
-}
-
-/// Destructor
-cell_itg__stiffness_form__Lagrange_1_2D::~cell_itg__stiffness_form__Lagrange_1_2D()
-{
-
-}
-
-/// Tabulate the tensor for the contribution from a local cell
-void cell_itg__stiffness_form__Lagrange_1_2D::tabulate_tensor(double* A,
-                                    const double * const * w,
-                                    const ufc::cell& c) const
-{
-  // coordinates
-  double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1];
-  double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1];
-  double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1];
-  
-  // affine map
-  double G00 = x1 - x0;
-  double G01 = x2 - x0;
-  
-  double G10 = y1 - y0;
-  double G11 = y2 - y0;
-  
-  double detG_tmp = G00*G11-G01*G10;
-  double detG = fabs(detG_tmp);
-  
-  double GinvT00 =  G11 / detG_tmp;
-  double GinvT01 = -G10 / detG_tmp;
-  double GinvT10 = -G01 / detG_tmp;
-  double GinvT11 =  G00 / detG_tmp;
-  
-  
-  memset(A, 0, sizeof(double)*9);
-  
-  A[3*0 + 0] = detG*((GinvT01*GinvT01)/2.0+(GinvT11*GinvT11)/2.0+GinvT10*GinvT00
-      +GinvT01*GinvT11+(GinvT00*GinvT00)/2.0+(GinvT10*GinvT10)/2.0);
-  A[3*0 + 1] = detG*(-(GinvT01*GinvT01)/2.0-GinvT10*GinvT00/2.0
-      -GinvT01*GinvT11/2.0-(GinvT00*GinvT00)/2.0);
-  A[3*0 + 2] = detG*(-(GinvT11*GinvT11)/2.0-GinvT10*GinvT00/2.0
-      -GinvT01*GinvT11/2.0-(GinvT10*GinvT10)/2.0);
-  A[3*1 + 0] = detG*(-(GinvT01*GinvT01)/2.0-GinvT10*GinvT00/2.0
-      -GinvT01*GinvT11/2.0-(GinvT00*GinvT00)/2.0);
-  A[3*1 + 1] = detG*((GinvT01*GinvT01)/2.0+(GinvT00*GinvT00)/2.0);
-  A[3*1 + 2] = detG*(GinvT10*GinvT00/2.0+GinvT01*GinvT11/2.0);
-  A[3*2 + 0] = detG*(-(GinvT11*GinvT11)/2.0-GinvT10*GinvT00/2.0
-      -GinvT01*GinvT11/2.0-(GinvT10*GinvT10)/2.0);
-  A[3*2 + 1] = detG*(GinvT10*GinvT00/2.0+GinvT01*GinvT11/2.0);
-  A[3*2 + 2] = detG*((GinvT11*GinvT11)/2.0+(GinvT10*GinvT10)/2.0);
-}
-
-
-
-/// Constructor
-form__stiffness_form__Lagrange_1_2D::form__stiffness_form__Lagrange_1_2D() : ufc::form()
-{
-  
-}
-
-/// Destructor
-form__stiffness_form__Lagrange_1_2D::~form__stiffness_form__Lagrange_1_2D()
-{
-  
-}
-
-/// Return a string identifying the form
-const char* form__stiffness_form__Lagrange_1_2D::signature() const
-{
-  return "form__stiffness_form__Lagrange_1_2D // generated by SyFi";
-}
-
-/// Return the rank of the global tensor (r)
-unsigned int form__stiffness_form__Lagrange_1_2D::rank() const
-{
-  return 2;
-}
-
-/// Return the number of coefficients (n)
-unsigned int form__stiffness_form__Lagrange_1_2D::num_coefficients() const
-{
-  return 0;
-}
-
-/// Return the number of cell integrals
-unsigned int form__stiffness_form__Lagrange_1_2D::num_cell_integrals() const
-{
-  return 1;
-}
-  
-/// Return the number of exterior facet integrals
-unsigned int form__stiffness_form__Lagrange_1_2D::num_exterior_facet_integrals() const
-{
-  return 0;
-}
-  
-/// Return the number of interior facet integrals
-unsigned int form__stiffness_form__Lagrange_1_2D::num_interior_facet_integrals() const
-{
-  return 0;
-}
-
-/// Create a new finite element for argument function i
-ufc::finite_element* form__stiffness_form__Lagrange_1_2D::
-     create_finite_element(unsigned int i) const
-{
-  switch(i)
-  {
-  case 0:
-    return new fe_Lagrange_1_2D();
-  case 1:
-    return new fe_Lagrange_1_2D();
-  }
-  throw std::runtime_error("Invalid index in create_finite_element()");
-}
-
-/// Create a new dof map for argument function i
-ufc::dof_map* form__stiffness_form__Lagrange_1_2D::create_dof_map(unsigned int i) const
-{
-  switch(i)
-  {
-  case 0:
-    return new dof_map_Lagrange_1_2D();
-  case 1:
-    return new dof_map_Lagrange_1_2D();
-  }
-  throw std::runtime_error("Invalid index in create_dof_map()");
-}
-
-/// Create a new cell integral on sub domain i
-ufc::cell_integral* form__stiffness_form__Lagrange_1_2D::
-     create_cell_integral(unsigned int i) const
-{
-  return new cell_itg__stiffness_form__Lagrange_1_2D();
-}
-
-/// Create a new exterior facet integral on sub domain i
-ufc::exterior_facet_integral* form__stiffness_form__Lagrange_1_2D::
-     create_exterior_facet_integral(unsigned int i) const
-{
-  return 0;
-}
-
-/// Create a new interior facet integral on sub domain i
-ufc::interior_facet_integral* form__stiffness_form__Lagrange_1_2D::
-     create_interior_facet_integral(unsigned int i) const
-{
-  return 0;
-}
-
-
-} // namespace
diff --git a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h b/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h
deleted file mode 100644
index edd5a37..0000000
--- a/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0.
-//
-// http://www.fenics.org/syfi/
-// http://www.fenics.org/ufc/
-//
-
-
-#ifndef __form__stiffness_form__Lagrange_1_2D_H
-#define __form__stiffness_form__Lagrange_1_2D_H
-
-#include <stdexcept>
-#include <math.h>
-#include <ufc.h>
-#include <pycc/Functions/Ptv.h>
-#include <pycc/Functions/Ptv_tools.h>
-#include <pycc/Functions/Dof_Ptv.h>
-#include <pycc/Functions/OrderedPtvSet.h>
-#include <pycc/Functions/Dof_OrderedPtvSet.h>
-#include "dof_map_Lagrange_1_2D.h"
-#include "fe_Lagrange_1_2D.h"
-
-
-namespace pycc
-{
-
-/// This class defines the interface for the assembly of the global
-/// tensor corresponding to a form with r + n arguments, that is, a
-/// mapping
-///
-///     a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R
-///
-/// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r
-/// global tensor A is defined by
-///
-///     A = a(V1, V2, ..., Vr, w1, w2, ..., wn),
-///
-/// where each argument Vj represents the application to the
-/// sequence of basis functions of Vj and w1, w2, ..., wn are given
-/// fixed functions (coefficients).
-
-class form__stiffness_form__Lagrange_1_2D: public ufc::form
-{  
-public:
-
-  /// Constructor
-  form__stiffness_form__Lagrange_1_2D();
-
-  /// Destructor
-  virtual ~form__stiffness_form__Lagrange_1_2D();
-
-  /// Return a string identifying the form
-  virtual const char* signature() const;
-
-  /// Return the rank of the global tensor (r)
-  virtual unsigned int rank() const;
-
-  /// Return the number of coefficients (n)
-  virtual unsigned int num_coefficients() const;
-
-  /// Return the number of cell integrals
-  virtual unsigned int num_cell_integrals() const;
-  
-  /// Return the number of exterior facet integrals
-  virtual unsigned int num_exterior_facet_integrals() const;
-  
-  /// Return the number of interior facet integrals
-  virtual unsigned int num_interior_facet_integrals() const;
-
-  /// Create a new finite element for argument function i
-  virtual ufc::finite_element* create_finite_element(unsigned int i) const;
-
-  /// Create a new dof map for argument function i
-  virtual ufc::dof_map* create_dof_map(unsigned int i) const;
-
-  /// Create a new cell integral on sub domain i
-  virtual ufc::cell_integral* create_cell_integral(unsigned int i) const;
-
-  /// Create a new exterior facet integral on sub domain i
-  virtual ufc::exterior_facet_integral* 
-    create_exterior_facet_integral(unsigned int i) const;
-
-  /// Create a new interior facet integral on sub domain i
-  virtual ufc::interior_facet_integral* 
-    create_interior_facet_integral(unsigned int i) const;
-
-};
-
-
-} // namespace
-
-#endif
diff --git a/ufc-merge-into-ffc/doc/manual/eps/hexahedron.eps b/ufc-merge-into-ffc/doc/manual/eps/hexahedron.eps
deleted file mode 100644
index d87024f..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/hexahedron.eps
+++ /dev/null
@@ -1,215 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 32 56 543 470
-%%HiResBoundingBox: 32.571875 56 542.36875 469.99688
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-492.81509 958.50976 lineto
-stroke
-gsave [-1.7236793 -0.51858435 0.51858435 -1.7236793 492.81509 958.50976] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave [1 0 0 -1 40 872.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 380 982.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 650 842.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 40 522.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v4) show
-grestore
-grestore
-gsave
-0.38431373 0.52549022 0.92941177 setrgbcolor
-newpath
-140 852.36218 moveto
-440 942.36218 lineto
-440 552.36218 lineto
-140 522.36218 lineto
-140 852.36218 lineto
-closepath
-eofill
-grestore
-gsave
-0.27450982 0.41960785 0.83529413 setrgbcolor
-newpath
-440 942.36218 moveto
-640 832.36218 lineto
-640 522.36218 lineto
-440 552.36218 lineto
-440 942.36218 lineto
-closepath
-eofill
-grestore
-gsave
-0.21568628 0.39607844 0.9137255 setrgbcolor
-newpath
-140 522.36218 moveto
-440 552.36218 lineto
-640 522.36218 lineto
-370 502.36218 lineto
-140 522.36218 lineto
-closepath
-eofill
-grestore
-0.29411766 0.29411766 0.58823532 setrgbcolor
-[18 6] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-370 502.36218 moveto
-370 772.36218 lineto
-stroke
-0.29411766 0.29411766 0.58823532 setrgbcolor
-[18 6] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-370 772.36218 lineto
-640 832.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-140 482.36218 lineto
-stroke
-gsave [-1.1021457e-16 1.8 -1.8 -1.1021457e-16 140 482.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave [1 0 0 -1 650 512.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v6) show
-grestore
-grestore
-gsave [1 0 0 -1 330 482.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v7) show
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-440 942.36218 lineto
-440 552.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 522.36218 moveto
-440 552.36218 lineto
-640 522.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-440 942.36218 moveto
-640 832.36218 lineto
-640 522.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 522.36218 moveto
-370 502.36218 lineto
-640 522.36218 lineto
-stroke
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/insertion.eps b/ufc-merge-into-ffc/doc/manual/eps/insertion.eps
deleted file mode 100644
index 2ea4ced..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/insertion.eps
+++ /dev/null
@@ -1,472 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%BoundingBox: 60 258 582 592
-%%HiResBoundingBox: 60.809375 258.244922 581.239844 591.487500
-%...................................
-%%Creator: ESP Ghostscript 81504 (epswrite)
-%%CreationDate: 2007/09/08 00:52:13
-%%DocumentData: Clean7Bit
-%%LanguageLevel: 2
-%%EndComments
-%%BeginProlog
-% This copyright applies to everything between here and the %%EndProlog:
-% Copyright (C) 2004 artofcode LLC, Benicia, CA.  All rights reserved.
-%%BeginResource: procset GS_epswrite_2_0_1001
-/GS_epswrite_2_0_1001 80 dict dup begin
-/PageSize 2 array def/setpagesize{ PageSize aload pop 3 index eq exch
-4 index eq and{ pop pop pop}{ PageSize dup  1
-5 -1 roll put 0 4 -1 roll put dup null eq {false} {dup where} ifelse{ exch get exec}
-{ pop/setpagedevice where
-{ pop 1 dict dup /PageSize PageSize put setpagedevice}
-{ /setpage where{ pop PageSize aload pop pageparams 3 {exch pop} repeat
-setpage}if}ifelse}ifelse}ifelse} bind def
-/!{bind def}bind def/#{load def}!/N/counttomark #
-/rG{3{3 -1 roll 255 div}repeat setrgbcolor}!/G{255 div setgray}!/K{0 G}!
-/r6{dup 3 -1 roll rG}!/r5{dup 3 1 roll rG}!/r3{dup rG}!
-/w/setlinewidth #/J/setlinecap #
-/j/setlinejoin #/M/setmiterlimit #/d/setdash #/i/setflat #
-/m/moveto #/l/lineto #/c/rcurveto #
-/p{N 2 idiv{N -2 roll rlineto}repeat}!
-/P{N 0 gt{N -2 roll moveto p}if}!
-/h{p closepath}!/H{P closepath}!
-/lx{0 rlineto}!/ly{0 exch rlineto}!/v{0 0 6 2 roll c}!/y{2 copy c}!
-/re{4 -2 roll m exch dup lx exch ly neg lx h}!
-/^{3 index neg 3 index neg}!
-/f{P fill}!/f*{P eofill}!/s{H stroke}!/S{P stroke}!
-/q/gsave #/Q/grestore #/rf{re fill}!
-/Y{P clip newpath}!/Y*{P eoclip newpath}!/rY{re Y}!
-/|={pop exch 4 1 roll 1 array astore cvx 3 array astore cvx exch 1 index def exec}!
-/|{exch string readstring |=}!
-/+{dup type/nametype eq{2 index 7 add -3 bitshift 2 index mul}if}!
-/@/currentfile #/${+ @ |}!
-/B{{2 copy string{readstring pop}aload pop 4 array astore cvx
-3 1 roll}repeat pop pop true}!
-/Ix{[1 0 0 1 11 -2 roll exch neg exch neg]exch}!
-/,{true exch Ix imagemask}!/If{false exch Ix imagemask}!/I{exch Ix image}!
-/Ic{exch Ix false 3 colorimage}!
-/F{/Columns counttomark 3 add -2 roll/Rows exch/K -1/BlackIs1 true>>
-/CCITTFaxDecode filter}!/FX{<</EndOfBlock false F}!
-/X{/ASCII85Decode filter}!/@X{@ X}!/&2{2 index 2 index}!
-/@F{@ &2<<F}!/@C{@X &2 FX}!
-/$X{+ @X |}!/&4{4 index 4 index}!/$F{+ @ &4<<F |}!/$C{+ @X &4 FX |}!
-/IC{3 1 roll 10 dict begin 1{/ImageType/Interpolate/Decode/DataSource
-/ImageMatrix/BitsPerComponent/Height/Width}{exch def}forall
-currentdict end image}!
-/~{@ read {pop} if}!
-end def
-%%EndResource
-/pagesave null def
-%%EndProlog
-%%Page: 1 1
-%%BeginPageSetup
-GS_epswrite_2_0_1001 begin
-/pagesave save store 197 dict begin
-0.1 0.1 scale
-%%EndPageSetup
-gsave mark
-Q q
-0 0 250000 250000 re
-Y
-24.8 w
-K
-1012.4 2594.85 3000 3000 re
-S
-0 128 176 rG
-1012.4 4794.85 3000 200 re
-f
-K
-1012.4 4794.85 3000 200 re
-S
-0 128 176 rG
-1012.4 3219.64 3000 200 re
-f
-K
-1012.4 3219.64 3000 200 re
-S
-0 128 176 rG
-1012.4 4194.85 3000 200 re
-f
-K
-1012.4 4194.85 3000 200 re
-S
-0 215 32 rG
-1412.4 2594.85 200 3000 re
-f
-K
-1412.4 2594.85 200 3000 re
-S
-0 197 75 rG
-2412.4 2594.85 200 3000 re
-f
-K
-2412.4 2594.85 200 3000 re
-S
-0 180 67 rG
-2987.6 2594.85 200 3000 re
-f
-K
-2987.6 2594.85 200 3000 re
-S
-0 128 176 rG
-4600 5207.25 400 400 re
-f
-K
-4600 5207.25 400 400 re
-S
-0 128 176 rG
-5000 5207.25 400 400 re
-f
-K
-5000 5207.25 400 400 re
-S
-0 128 176 rG
-5400 5207.25 400 400 re
-f
-K
-5400 5207.25 400 400 re
-S
-0 128 176 rG
-4600 4807.25 400 400 re
-f
-K
-4600 4807.25 400 400 re
-S
-0 128 176 rG
-5000 4807.25 400 400 re
-f
-K
-5000 4807.25 400 400 re
-S
-0 128 176 rG
-5400 4807.25 400 400 re
-f
-K
-5400 4807.25 400 400 re
-S
-0 128 176 rG
-4600 4407.25 400 400 re
-f
-K
-4600 4407.25 400 400 re
-S
-0 128 176 rG
-5000 4407.25 400 400 re
-f
-K
-5000 4407.25 400 400 re
-S
-0 128 176 rG
-5400 4407.25 400 400 re
-f
-K
-5400 4407.25 400 400 re
-S
-0.2 i
-608.09 4832 0 105 17.25 0 ^ ^ H
-608.09 4956.03 0 21.84 17.25 0 ^ ^ f
-704.03 4959.5 m
--9.75 0 -17.08 -4.8 -21.98 -14.39 c
--4.91 -9.6 -7.36 -24.02 -7.36 -43.27 c
-0 -19.19 2.45 -33.58 7.36 -43.17 c
-4.9 -9.6 12.23 -14.39 21.98 -14.39 c
-9.81 0 17.17 4.79 22.08 14.39 c
-4.9 9.59 7.36 23.98 7.36 43.17 c
-0 19.25 -2.46 33.67 -7.36 43.27 c
--4.91 9.59 -12.27 14.39 -22.08 14.39 c
-h
-704.03 4974.5 m
-15.68 0 27.67 -6.21 35.95 -18.61 c
-8.28 -12.41 12.42 -30.42 12.42 -54.05 c
-0 -23.57 -4.14 -41.55 -12.42 -53.95 c
--8.29 -12.41 -20.27 -18.61 -35.95 -18.61 c
--15.69 0 -27.68 6.2 -35.95 18.61 c
--8.29 12.4 -12.42 30.39 -12.42 53.95 c
-0 23.63 4.14 41.64 12.42 54.05 c
-8.28 12.4 20.26 18.61 35.95 18.61 c
-f
-608.09 4209 0 105 17.25 0 ^ ^ H
-608.09 4333.03 0 21.84 17.25 0 ^ ^ f
-666.81 4209 0 15.94 30.94 0 0 106.78 -33.66 -6.75 0 17.25 33.47 6.75 18.94 0 
-0 -124.03 30.94 0 0 -15.94 -80.62 0 f
-608.09 3215 0 105 17.25 0 ^ ^ H
-608.09 3339.03 0 21.84 17.25 0 ^ ^ f
-679.84 3230.94 66.09 0 0 -15.94 -88.87 0 ^ P
-7.18 7.43 16.98 17.42 29.39 29.95 c
-12.4 12.53 20.2 20.61 23.39 24.23 c
-6.06 6.81 10.29 12.57 12.7 17.3 c
-2.4 4.71 3.61 9.36 3.61 13.92 c
-0 7.43 -2.61 13.5 -7.83 18.19 c
--5.22 4.69 -12.02 7.03 -20.39 7.03 c
--5.94 0 -12.21 -1.03 -18.8 -3.09 c
--6.6 -2.06 -13.64 -5.19 -21.14 -9.37 c
-0 19.13 p
-7.62 3.06 14.75 5.37 21.38 6.94 c
-6.62 1.56 12.68 2.34 18.19 2.34 c
-14.5 0 26.06 -3.63 34.69 -10.87 c
-8.63 -7.25 12.94 -16.94 12.94 -29.06 c
-0 -5.75 -1.08 -11.2 -3.23 -16.36 c
--2.16 -5.16 -6.08 -11.24 -11.77 -18.23 c
--1.57 -1.82 -6.54 -7.05 -14.91 -15.7 c
--8.38 -8.66 -20.19 -20.77 -35.44 -36.33 c
-f
-1442.09 5765.13 0 106.88 17.25 0 ^ P
-0 -13.37 -2.55 -23.06 -7.64 -29.06 c
--5.1 -6 -13.3 -9 -24.61 -9 c
--6.56 0 0 14.63 4.59 0 p
-6.56 0 11.03 1.52 13.41 4.55 c
-2.37 3.03 3.56 9.33 3.56 18.89 c
-h
-1442.09 5891.03 0 21.84 17.25 0 ^ ^ f
-1539.03 5894.5 m
--9.75 0 -17.08 -4.8 -21.98 -14.39 c
--4.91 -9.6 -7.36 -24.02 -7.36 -43.27 c
-0 -19.19 2.45 -33.58 7.36 -43.17 c
-4.9 -9.6 12.23 -14.39 21.98 -14.39 c
-9.81 0 17.17 4.79 22.08 14.39 c
-4.9 9.59 7.36 23.98 7.36 43.17 c
-0 19.25 -2.46 33.67 -7.36 43.27 c
--4.91 9.59 -12.27 14.39 -22.08 14.39 c
-h
-1539.03 5909.5 m
-15.68 0 27.67 -6.21 35.95 -18.61 c
-8.28 -12.41 12.42 -30.42 12.42 -54.05 c
-0 -23.57 -4.14 -41.55 -12.42 -53.95 c
--8.29 -12.41 -20.27 -18.61 -35.95 -18.61 c
--15.69 0 -27.68 6.2 -35.95 18.61 c
--8.29 12.4 -12.42 30.39 -12.42 53.95 c
-0 23.63 4.14 41.64 12.42 54.05 c
-8.28 12.4 20.26 18.61 35.95 18.61 c
-f
-2426.09 5767.13 0 106.88 17.25 0 ^ P
-0 -13.37 -2.55 -23.06 -7.64 -29.06 c
--5.1 -6 -13.3 -9 -24.61 -9 c
--6.56 0 0 14.63 4.59 0 p
-6.56 0 11.03 1.52 13.41 4.55 c
-2.37 3.03 3.56 9.33 3.56 18.89 c
-h
-2426.09 5893.03 0 21.84 17.25 0 ^ ^ f
-2485.81 5769 0 15.94 30.94 0 0 106.78 -33.66 -6.75 0 17.25 33.47 6.75 18.94 0 
-0 -124.03 30.94 0 0 -15.94 -80.62 0 f
-3008.09 5759.13 0 106.88 17.25 0 ^ P
-0 -13.37 -2.55 -23.06 -7.64 -29.06 c
--5.1 -6 -13.3 -9 -24.61 -9 c
--6.56 0 0 14.63 4.59 0 p
-6.56 0 11.03 1.52 13.41 4.55 c
-2.37 3.03 3.56 9.33 3.56 18.89 c
-h
-3008.09 5885.03 0 21.84 17.25 0 ^ ^ f
-3079.84 5776.94 66.09 0 0 -15.94 -88.87 0 ^ P
-7.18 7.43 16.98 17.42 29.39 29.95 c
-12.4 12.53 20.2 20.61 23.39 24.23 c
-6.06 6.81 10.29 12.57 12.7 17.3 c
-2.4 4.71 3.61 9.36 3.61 13.92 c
-0 7.43 -2.61 13.5 -7.83 18.19 c
--5.22 4.69 -12.02 7.03 -20.39 7.03 c
--5.94 0 -12.21 -1.03 -18.8 -3.09 c
--6.6 -2.06 -13.64 -5.19 -21.14 -9.37 c
-0 19.13 p
-7.62 3.06 14.75 5.37 21.38 6.94 c
-6.62 1.56 12.68 2.34 18.19 2.34 c
-14.5 0 26.06 -3.63 34.69 -10.87 c
-8.63 -7.25 12.94 -16.94 12.94 -29.06 c
-0 -5.75 -1.08 -11.2 -3.23 -16.36 c
--2.16 -5.16 -6.08 -11.24 -11.77 -18.23 c
--1.57 -1.82 -6.54 -7.05 -14.91 -15.7 c
--8.38 -8.66 -20.19 -20.77 -35.44 -36.33 c
-f
-4319.81 5351 0 15.94 30.94 0 0 106.78 -33.66 -6.75 0 17.25 33.47 6.75 18.94 0 
-0 -124.03 30.94 0 0 -15.94 -80.62 0 f
-4333.84 4982.94 66.09 0 0 -15.94 -88.87 0 ^ P
-7.18 7.43 16.98 17.42 29.39 29.95 c
-12.4 12.53 20.2 20.61 23.39 24.23 c
-6.06 6.81 10.29 12.57 12.7 17.3 c
-2.4 4.71 3.61 9.36 3.61 13.92 c
-0 7.43 -2.61 13.5 -7.83 18.19 c
--5.22 4.69 -12.02 7.03 -20.39 7.03 c
--5.94 0 -12.21 -1.03 -18.8 -3.09 c
--6.6 -2.06 -13.64 -5.19 -21.14 -9.37 c
-0 19.13 p
-7.62 3.06 14.75 5.37 21.38 6.94 c
-6.62 1.56 12.68 2.34 18.19 2.34 c
-14.5 0 26.06 -3.63 34.69 -10.87 c
-8.63 -7.25 12.94 -16.94 12.94 -29.06 c
-0 -5.75 -1.08 -11.2 -3.23 -16.36 c
--2.16 -5.16 -6.08 -11.24 -11.77 -18.23 c
--1.57 -1.82 -6.54 -7.05 -14.91 -15.7 c
--8.38 -8.66 -20.19 -20.77 -35.44 -36.33 c
-f
-4370.91 4634.47 m
-9.06 -1.94 16.14 -5.97 21.23 -12.09 c
-5.09 -6.13 7.64 -13.69 7.64 -22.69 c
-0 -13.82 -4.75 -24.5 -14.25 -32.06 c
--9.5 -7.57 -23 -11.34 -40.5 -11.34 c
--5.88 0 -11.93 0.58 -18.14 1.73 c
--6.22 1.15 -12.64 2.89 -19.27 5.2 c
-0 18.28 p
-5.25 -3.07 11 -5.38 17.25 -6.94 c
-6.25 -1.57 12.78 -2.34 19.59 -2.34 c
-11.87 0 20.92 2.34 27.14 7.03 c
-6.21 4.69 9.33 11.5 9.33 20.44 c
-0 8.25 -2.89 14.7 -8.67 19.36 c
--5.79 4.65 -13.83 6.98 -24.14 6.98 c
--16.31 0 0 15.56 17.06 0 p
-9.31 0 16.43 1.86 21.38 5.58 c
-4.93 3.71 7.41 9.07 7.41 16.08 c
-0 7.18 -2.55 12.7 -7.64 16.55 c
--5.1 3.84 -12.39 5.77 -21.89 5.77 c
--5.19 0 -10.75 -0.56 -16.69 -1.69 c
--5.94 -1.12 -12.47 -2.88 -19.59 -5.25 c
-0 16.88 p
-7.18 2 13.92 3.5 20.2 4.5 c
-6.28 1 12.2 1.5 17.77 1.5 c
-14.37 0 25.75 -3.27 34.13 -9.8 c
-8.37 -6.54 12.56 -15.36 12.56 -26.48 c
-0 -7.75 -2.22 -14.3 -6.66 -19.64 c
--4.44 -5.34 -10.75 -9.05 -18.94 -11.11 c
-f
-4767.81 5708 0 15.94 30.94 0 0 106.78 -33.66 -6.75 0 17.25 33.47 6.75 18.94 0 
-0 -124.03 30.94 0 0 -15.94 -80.62 0 f
-5173.84 5720.94 66.09 0 0 -15.94 -88.87 0 ^ P
-7.18 7.43 16.98 17.42 29.39 29.95 c
-12.4 12.53 20.2 20.61 23.39 24.23 c
-6.06 6.81 10.29 12.57 12.7 17.3 c
-2.4 4.71 3.61 9.36 3.61 13.92 c
-0 7.43 -2.61 13.5 -7.83 18.19 c
--5.22 4.69 -12.02 7.03 -20.39 7.03 c
--5.94 0 -12.21 -1.03 -18.8 -3.09 c
--6.6 -2.06 -13.64 -5.19 -21.14 -9.37 c
-0 19.13 p
-7.62 3.06 14.75 5.37 21.38 6.94 c
-6.62 1.56 12.68 2.34 18.19 2.34 c
-14.5 0 26.06 -3.63 34.69 -10.87 c
-8.63 -7.25 12.94 -16.94 12.94 -29.06 c
-0 -5.75 -1.08 -11.2 -3.23 -16.36 c
--2.16 -5.16 -6.08 -11.24 -11.77 -18.23 c
--1.57 -1.82 -6.54 -7.05 -14.91 -15.7 c
--8.38 -8.66 -20.19 -20.77 -35.44 -36.33 c
-f
-5603.91 5780.47 m
-9.06 -1.94 16.14 -5.97 21.23 -12.09 c
-5.09 -6.13 7.64 -13.69 7.64 -22.69 c
-0 -13.82 -4.75 -24.5 -14.25 -32.06 c
--9.5 -7.57 -23 -11.34 -40.5 -11.34 c
--5.88 0 -11.93 0.58 -18.14 1.73 c
--6.22 1.15 -12.64 2.89 -19.27 5.2 c
-0 18.28 p
-5.25 -3.07 11 -5.38 17.25 -6.94 c
-6.25 -1.57 12.78 -2.34 19.59 -2.34 c
-11.87 0 20.92 2.34 27.14 7.03 c
-6.21 4.69 9.33 11.5 9.33 20.44 c
-0 8.25 -2.89 14.7 -8.67 19.36 c
--5.79 4.65 -13.83 6.98 -24.14 6.98 c
--16.31 0 0 15.56 17.06 0 p
-9.31 0 16.43 1.86 21.38 5.58 c
-4.93 3.71 7.41 9.07 7.41 16.08 c
-0 7.18 -2.55 12.7 -7.64 16.55 c
--5.1 3.84 -12.39 5.77 -21.89 5.77 c
--5.19 0 -10.75 -0.56 -16.69 -1.69 c
--5.94 -1.12 -12.47 -2.88 -19.59 -5.25 c
-0 16.88 p
-7.18 2 13.92 3.5 20.2 4.5 c
-6.28 1 12.2 1.5 17.77 1.5 c
-14.37 0 25.75 -3.27 34.13 -9.8 c
-8.37 -6.54 12.56 -15.36 12.56 -26.48 c
-0 -7.75 -2.22 -14.3 -6.66 -19.64 c
--4.44 -5.34 -10.75 -9.05 -18.94 -11.11 c
-f
-4867.41 3525.66 -25.78 69.66 -25.69 -69.66 51.47 0 H
-4777.5 3474 53.44 139.97 21.47 0 53.34 -139.97 -19.69 0 -12.75 35.91 -63.09 0 -12.75 -35.91 
--19.97 0 f
-4943.84 3489.94 66.09 0 0 -15.94 -88.87 0 ^ P
-7.18 7.43 16.98 17.42 29.39 29.95 c
-12.4 12.53 20.2 20.61 23.39 24.23 c
-6.06 6.81 10.29 12.57 12.7 17.3 c
-2.4 4.71 3.61 9.36 3.61 13.92 c
-0 7.43 -2.61 13.5 -7.83 18.19 c
--5.22 4.69 -12.02 7.03 -20.39 7.03 c
--5.94 0 -12.21 -1.03 -18.8 -3.09 c
--6.6 -2.06 -13.64 -5.19 -21.14 -9.37 c
-0 19.13 p
-7.62 3.06 14.75 5.37 21.38 6.94 c
-6.62 1.56 12.68 2.34 18.19 2.34 c
-14.5 0 26.06 -3.63 34.69 -10.87 c
-8.63 -7.25 12.94 -16.94 12.94 -29.06 c
-0 -5.75 -1.08 -11.2 -3.23 -16.36 c
--2.16 -5.16 -6.08 -11.24 -11.77 -18.23 c
--1.57 -1.82 -6.54 -7.05 -14.91 -15.7 c
--8.38 -8.66 -20.19 -20.77 -35.44 -36.33 c
-f
-5053.81 3474 0 15.94 30.94 0 0 106.78 -33.66 -6.75 0 17.25 33.47 6.75 18.94 0 
-0 -124.03 30.94 0 0 -15.94 -80.62 0 f
-1 i
-255 0 r6
-1412.04 4795.21 200 200 re
-f*
-16 w
-K
-1412.04 4795.21 200 200 re
-S
-255 0 r6
-2410.07 4795.21 200 200 re
-f*
-K
-2410.07 4795.21 200 200 re
-S
-255 0 r6
-2987.88 4795.21 200 200 re
-f*
-K
-2987.88 4795.21 200 200 re
-S
-255 0 r6
-1412.04 4197.2 200 200 re
-f*
-K
-1412.04 4197.2 200 200 re
-S
-255 0 r6
-2986.69 4197.2 200 200 re
-f*
-K
-2986.69 4197.2 200 200 re
-S
-255 0 r6
-1412.04 3223.41 200 200 re
-f*
-K
-1412.04 3223.41 200 200 re
-S
-255 0 r6
-2412.41 3220.05 200 200 re
-f*
-K
-2412.41 3220.05 200 200 re
-S
-255 0 r6
-2986.69 3220.05 200 200 re
-f*
-K
-2986.69 3220.05 200 200 re
-S
-255 0 r6
-2986.69 3220.05 200 200 re
-f*
-K
-2986.69 3220.05 200 200 re
-S
-255 0 r6
-2409.55 4194.34 200 200 re
-f*
-K
-2409.55 4194.34 200 200 re
-S
-24 w
-5200 4567.84 m
--1204.11 -2788.02 -2561.75 -1406.13 y
-S
-2631.09 3017.05 -126.48 280.09 277.8 -131.43 P
--76.97 -11.02 -137.89 -71.24 -151.32 -148.66 c
-f*
-cleartomark end end pagesave restore showpage
-%%PageTrailer
-%%Trailer
-%%Pages: 1
diff --git a/ufc-merge-into-ffc/doc/manual/eps/interval.eps b/ufc-merge-into-ffc/doc/manual/eps/interval.eps
deleted file mode 100644
index bf3eca6..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/interval.eps
+++ /dev/null
@@ -1,76 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 93 71 540 118
-%%HiResBoundingBox: 93.669684 71.728125 539.1785 117.80858
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-670 912.36218 lineto
-stroke
-gsave [-1.8 0 0 -1.8 670 912.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave [1 0 0 -1 115.50507 962.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(0) show
-grestore
-grestore
-gsave [1 0 0 -1 611.96954 962.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(1) show
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-121.51523 911.8571 moveto
-121.51523 931.8571 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-620 912.36218 moveto
-620 932.36218 lineto
-stroke
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/numbering_example_quadrilaterals.eps b/ufc-merge-into-ffc/doc/manual/eps/numbering_example_quadrilaterals.eps
deleted file mode 100644
index 2f2ed40..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/numbering_example_quadrilaterals.eps
+++ /dev/null
@@ -1,339 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 24 79 456 528
-%%HiResBoundingBox: 24 79.728125 455.31426 527.24741
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-80 912.36218 moveto
-320 912.36218 lineto
-320 672.36218 lineto
-80 672.36218 lineto
-80 912.36218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 68.417969 652.02234] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(5) show
-grestore
-grestore
-gsave [1 0 0 -1 88.813477 902.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 1 14.57143 185.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-320 912.36218 moveto
-560 912.36218 lineto
-560 672.36218 lineto
-320 672.36218 lineto
-320 912.36218 lineto
-closepath
-stroke
-gsave [1 0 0 1 15.42857 424.5714] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 254.5714 185.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 254.5714 425.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 494.5714 425.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 494.5714 185.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 -1 67.363281 952.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(0) show
-grestore
-grestore
-gsave [1 0 0 -1 307.36328 952.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(1) show
-grestore
-grestore
-gsave [1 0 0 -1 547.36328 952.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(2) show
-grestore
-grestore
-gsave [1 0 0 -1 547.36328 652.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(3) show
-grestore
-grestore
-gsave [1 0 0 -1 307.36328 652.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(4) show
-grestore
-grestore
-gsave [1 0 0 -1 288.81348 902.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 288.81348 702.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 90 702.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v3) show
-grestore
-grestore
-gsave [1 0 0 -1 330 902.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 528.81348 902.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v3) show
-grestore
-grestore
-gsave [1 0 0 -1 528.81348 702.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 328.81348 702.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/numbering_example_triangles.eps b/ufc-merge-into-ffc/doc/manual/eps/numbering_example_triangles.eps
deleted file mode 100644
index dcad7aa..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/numbering_example_triangles.eps
+++ /dev/null
@@ -1,245 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 24 80 568 528
-%%HiResBoundingBox: 24 80.271875 568 527.24741
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-58.5 693.86218 moveto
-378.5 913.86218 lineto
-378.5 473.86218 lineto
-58.5 693.86218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 28.417969 672.02234] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(0) show
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-2 setlinejoin
-0 setlinecap
-newpath
-698.5 693.86218 moveto
-378.5 913.86218 lineto
-378.5 473.86218 lineto
-698.5 693.86218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 358.41797 952.02234] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(1) show
-grestore
-grestore
-gsave [1 0 0 -1 358.17188 452.02234] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(3) show
-grestore
-grestore
-gsave [1 0 0 -1 697.13281 670.17468] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(2) show
-grestore
-grestore
-gsave [1 0 0 -1 88.813477 702.1073] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 330 862.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 329.46387 532.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 390 862.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 640 702.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 389.46387 532.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 1 -5 205] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 313.5613 -13.0562] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 313.5612 425.4286] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 633.4848 205] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/ordering_example_tetrahedron.eps b/ufc-merge-into-ffc/doc/manual/eps/ordering_example_tetrahedron.eps
deleted file mode 100644
index e4a0816..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/ordering_example_tetrahedron.eps
+++ /dev/null
@@ -1,110 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 80 64 374 455
-%%HiResBoundingBox: 80.571875 64 373.80625 454.25
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-gsave [1 0 0 -1 100 872.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 440 972.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 380 762.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 120 502.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v3) show
-grestore
-grestore
-0 0 0 setrgbcolor
-[18 6] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-370 772.36218 lineto
-370 772.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-140 522.36218 lineto
-440 942.36218 lineto
-140 852.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 522.36218 moveto
-370 772.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-370 772.36218 moveto
-440 942.36218 lineto
-stroke
-gsave [1 0 0 -1 290 662.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(e0) show
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/ordering_example_triangle.eps b/ufc-merge-into-ffc/doc/manual/eps/ordering_example_triangle.eps
deleted file mode 100644
index b87c2e7..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/ordering_example_triangle.eps
+++ /dev/null
@@ -1,71 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 64 96 496 512
-%%HiResBoundingBox: 64 96 496 512
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-580 912.36218 lineto
-120 452.36218 lineto
-120 912.36218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 79.285156 932.02234] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 592.74219 929.85828] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 92.929688 430.17468] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 360 652.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(e0) show
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/orientation_example_triangles.eps b/ufc-merge-into-ffc/doc/manual/eps/orientation_example_triangles.eps
deleted file mode 100644
index c481f3d..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/orientation_example_triangles.eps
+++ /dev/null
@@ -1,375 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 8 104 567 528
-%%HiResBoundingBox: 8.6857134 104.68574 566.44498 527.24741
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-21.5 690.86218 moveto
-341.5 910.86218 lineto
-341.5 470.86218 lineto
-21.5 690.86218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-2 setlinejoin
-0 setlinecap
-newpath
-701.5 690.86218 moveto
-381.5 910.86218 lineto
-381.5 470.86218 lineto
-701.5 690.86218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 49.463867 692.1073] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 310 882.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 310 512.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 390 882.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 649.46387 692.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 388.68164 512.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-18 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 1 -44.57143 204.5714] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 315.4286 -15.42857] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 315.4286 424.5714] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 633.4848 205] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 275.4286 -15.42857] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-gsave [1 0 0 1 275.4286 424.5714] concat
-gsave
-0 0 0.99215686 setrgbcolor
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-fill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2 setlinewidth
-1 setlinejoin
-0 setlinecap
-newpath
-73.571428 487.36218 moveto
-73.571428 492.09361 69.731428 495.93361 65 495.93361 curveto
-60.268572 495.93361 56.428572 492.09361 56.428572 487.36218 curveto
-56.428572 482.63075 60.268572 478.79075 65 478.79075 curveto
-69.54042 478.79075 73.26647 482.28771 73.5542 486.819 curveto
-stroke
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-320 822.36218 moveto
-320 545.28126 lineto
-stroke
-gsave [-2.0206005e-16 3.3 -3.3 -2.0206005e-16 320 548.58126] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-399.38393 822.6459 moveto
-399.38393 545.56498 lineto
-stroke
-gsave [-2.0206005e-16 3.3 -3.3 -2.0206005e-16 399.38393 548.86498] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-641.64291 671.00917 moveto
-432.98922 527.69729 lineto
-stroke
-gsave [2.7201751 1.8683274 -1.8683274 2.7201751 435.7094 529.56562] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-81.5 670.86218 moveto
-290.15369 527.5503 lineto
-stroke
-gsave [-2.7201751 1.8683274 -1.8683274 -2.7201751 287.43351 529.41863] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8647974 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-90.017323 712.34476 moveto
-290.10879 851.15558 lineto
-stroke
-gsave [-2.5892224 -1.796239 1.796239 -2.5892224 287.51957 849.35934] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8647974 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-429.90853 851.173 moveto
-630 712.36218 lineto
-stroke
-gsave [-2.5892225 1.796239 -1.796239 -2.5892225 627.41078 714.15842] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/quadrilateral.eps b/ufc-merge-into-ffc/doc/manual/eps/quadrilateral.eps
deleted file mode 100644
index c458e7d..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/quadrilateral.eps
+++ /dev/null
@@ -1,125 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 32 79 516 532
-%%HiResBoundingBox: 32.571875 79.728125 515.1785 531.1785
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-640 912.36218 lineto
-stroke
-gsave [-1.8 0 0 -1.8 640 912.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-120 392.36218 lineto
-stroke
-gsave [-1.1021457e-16 1.8 -1.8 -1.1021457e-16 120 392.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave
-0.38431373 0.52549022 0.92941177 setrgbcolor
-newpath
-120 912.36218 moveto
-580 912.36218 lineto
-579.99999 452.36218 lineto
-120 452.36218 lineto
-120 912.36218 lineto
-closepath
-eofill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-580 912.36218 lineto
-579.99999 452.36218 lineto
-120 452.36218 lineto
-120 912.36218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 40 952.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 540 952.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 540 432.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 40 432.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v3) show
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/tetrahedron.eps b/ufc-merge-into-ffc/doc/manual/eps/tetrahedron.eps
deleted file mode 100644
index 466f570..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/tetrahedron.eps
+++ /dev/null
@@ -1,184 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 24 64 398 460
-%%HiResBoundingBox: 24.571875 64 397.30244 459.1785
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-492.81509 958.50976 lineto
-stroke
-gsave [-1.7236793 -0.51858435 0.51858435 -1.7236793 492.81509 958.50976] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave [1 0 0 -1 30 872.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 350 972.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 360 742.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-gsave [1 0 0 -1 30 522.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v3) show
-grestore
-grestore
-gsave
-0.38431373 0.52549022 0.92941177 setrgbcolor
-newpath
-140 852.36218 moveto
-440 942.36218 lineto
-140 522.36218 lineto
-140 522.36218 lineto
-140 852.36218 lineto
-closepath
-eofill
-grestore
-gsave
-0.27450982 0.41960785 0.83529413 setrgbcolor
-newpath
-440 942.36218 moveto
-440 942.36218 lineto
-370 772.36218 lineto
-140 522.36218 lineto
-440 942.36218 lineto
-closepath
-eofill
-grestore
-0.29411766 0.29411766 0.58823532 setrgbcolor
-[18 6] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-370 772.36218 lineto
-370 772.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-140 482.36218 lineto
-stroke
-gsave [-1.1021457e-16 1.8 -1.8 -1.1021457e-16 140 482.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-370 772.36218 moveto
-430 752.36218 lineto
-stroke
-gsave [-1.7076299 0.56920998 -0.56920998 -1.7076299 430 752.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 852.36218 moveto
-140 522.36218 lineto
-440 942.36218 lineto
-140 852.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-140 522.36218 moveto
-370 772.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-2.8 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-370 772.36218 moveto
-440 942.36218 lineto
-stroke
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/triangle.eps b/ufc-merge-into-ffc/doc/manual/eps/triangle.eps
deleted file mode 100644
index 94ac471..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/triangle.eps
+++ /dev/null
@@ -1,112 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.44
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 32 79 516 532
-%%HiResBoundingBox: 32.571875 79.728125 515.1785 531.1785
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-gsave [1 0 0 1 0 0] concat
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-640 912.36218 lineto
-stroke
-gsave [-1.8 0 0 -1.8 640 912.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-120 392.36218 lineto
-stroke
-gsave [-1.1021457e-16 1.8 -1.8 -1.1021457e-16 120 392.36218] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-8.7185878 4.0337352 moveto
--2.2072895 0.016013256 lineto
-8.7185884 -4.0017078 lineto
-6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto
-closepath
-eofill
-grestore
-grestore
-gsave
-0.38431373 0.52549022 0.92941177 setrgbcolor
-newpath
-120 912.36218 moveto
-580 912.36218 lineto
-120 452.36218 lineto
-120 912.36218 lineto
-closepath
-eofill
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-120 912.36218 moveto
-580 912.36218 lineto
-120 452.36218 lineto
-120 912.36218 lineto
-closepath
-stroke
-gsave [1 0 0 -1 40 952.36218] concat
-gsave
-/newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def
-/BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v0) show
-grestore
-grestore
-gsave [1 0 0 -1 540 952.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v1) show
-grestore
-grestore
-gsave [1 0 0 -1 40 432.36218] concat
-gsave
-/BitstreamVeraSans-Roman-ISOLatin1 findfont
-24 scalefont
-setfont
-0 0 0 setrgbcolor
-newpath
-0 0 moveto
-(v2) show
-grestore
-grestore
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/eps/ufcfig.eps b/ufc-merge-into-ffc/doc/manual/eps/ufcfig.eps
deleted file mode 100644
index 0af655c..0000000
--- a/ufc-merge-into-ffc/doc/manual/eps/ufcfig.eps
+++ /dev/null
@@ -1,643 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: inkscape 0.45
-%%Pages: 1
-%%Orientation: Portrait
-%%BoundingBox: 138 286 410 530
-%%HiResBoundingBox: 138.58268 286.58268 409.41732 529.41732
-%%EndComments
-%%Page: 1 1
-0 842 translate
-0.8 -0.8 scale
-0 0 0 setrgbcolor
-[] 0 setdash
-1 setlinewidth
-0 setlinejoin
-0 setlinecap
-gsave [1 0 0 1 0 0] concat
-gsave [1 0 0 1 88.640653 5.8411812] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-131.65625 406.94285 moveto
-134.82812 406.94285 lineto
-134.82812 421.11472 lineto
-134.82812 423.61473 135.28124 425.41681 136.1875 426.52097 curveto
-137.09374 427.61473 138.56249 428.1616 140.59375 428.1616 curveto
-142.61457 428.1616 144.07811 427.61473 144.98438 426.52097 curveto
-145.89061 425.41681 146.34373 423.61473 146.34375 421.11472 curveto
-146.34375 406.94285 lineto
-149.51562 406.94285 lineto
-149.51562 421.50535 lineto
-149.5156 424.54702 148.7604 426.84389 147.25 428.39597 curveto
-145.74998 429.94806 143.53124 430.7241 140.59375 430.7241 curveto
-137.64582 430.7241 135.41666 429.94806 133.90625 428.39597 curveto
-132.40625 426.84389 131.65625 424.54702 131.65625 421.50535 curveto
-131.65625 406.94285 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-155.45312 406.94285 moveto
-168.85938 406.94285 lineto
-168.85938 409.5991 lineto
-158.60938 409.5991 lineto
-158.60938 416.4741 lineto
-167.85938 416.4741 lineto
-167.85938 419.13035 lineto
-158.60938 419.13035 lineto
-158.60938 430.27097 lineto
-155.45312 430.27097 lineto
-155.45312 406.94285 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-191.35938 408.73972 moveto
-191.35938 412.06785 lineto
-190.29686 411.07828 189.16144 410.3387 187.95312 409.8491 curveto
-186.75519 409.35954 185.47915 409.11474 184.125 409.11472 curveto
-181.45832 409.11474 179.41666 409.93245 178 411.56785 curveto
-176.58333 413.19287 175.87499 415.54703 175.875 418.63035 curveto
-175.87499 421.70327 176.58333 424.05744 178 425.69285 curveto
-179.41666 427.31785 181.45832 428.13035 184.125 428.13035 curveto
-185.47915 428.13035 186.75519 427.88556 187.95312 427.39597 curveto
-189.16144 426.90639 190.29686 426.16681 191.35938 425.17722 curveto
-191.35938 428.4741 lineto
-190.25519 429.2241 189.08331 429.7866 187.84375 430.1616 curveto
-186.61457 430.5366 185.31249 430.7241 183.9375 430.7241 curveto
-180.40624 430.7241 177.62499 429.64597 175.59375 427.48972 curveto
-173.5625 425.32306 172.54687 422.36994 172.54688 418.63035 curveto
-172.54687 414.88036 173.5625 411.92724 175.59375 409.77097 curveto
-177.62499 407.60433 180.40624 406.521 183.9375 406.52097 curveto
-185.33332 406.521 186.64582 406.7085 187.875 407.08347 curveto
-189.11456 407.44808 190.27602 408.00016 191.35938 408.73972 curveto
-fill
-grestore
-grestore
-gsave [1 0 0 1 -64.406222 25.403681] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-465.98438 387.94285 moveto
-470.6875 387.94285 lineto
-476.64062 403.81785 lineto
-482.625 387.94285 lineto
-487.32812 387.94285 lineto
-487.32812 411.27097 lineto
-484.25 411.27097 lineto
-484.25 390.7866 lineto
-478.23438 406.7866 lineto
-475.0625 406.7866 lineto
-469.04688 390.7866 lineto
-469.04688 411.27097 lineto
-465.98438 411.27097 lineto
-465.98438 387.94285 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-508.45312 401.80222 moveto
-508.45312 403.20847 lineto
-495.23438 403.20847 lineto
-495.35937 405.18765 495.95312 406.69806 497.01562 407.73972 curveto
-498.08853 408.77098 499.57812 409.2866 501.48438 409.2866 curveto
-502.58853 409.2866 503.65624 409.15118 504.6875 408.88035 curveto
-505.72915 408.60952 506.7604 408.20327 507.78125 407.6616 curveto
-507.78125 410.38035 lineto
-506.74998 410.81785 505.69269 411.15118 504.60938 411.38035 curveto
-503.52603 411.60951 502.42707 411.7241 501.3125 411.7241 curveto
-498.52083 411.7241 496.30729 410.9116 494.67188 409.2866 curveto
-493.04687 407.6616 492.23437 405.46369 492.23438 402.69285 curveto
-492.23437 399.82828 493.00521 397.55745 494.54688 395.88035 curveto
-496.09895 394.19287 498.18749 393.34912 500.8125 393.3491 curveto
-503.16665 393.34912 505.02603 394.10953 506.39062 395.63035 curveto
-507.76561 397.14078 508.45311 399.19807 508.45312 401.80222 curveto
-505.57812 400.95847 moveto
-505.55728 399.38557 505.11457 398.13036 504.25 397.19285 curveto
-503.39582 396.25536 502.2604 395.78661 500.84375 395.7866 curveto
-499.23957 395.78661 497.95312 396.23974 496.98438 397.14597 curveto
-496.02604 398.05224 495.47395 399.32828 495.32812 400.9741 curveto
-505.57812 400.95847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-524.32812 394.2866 moveto
-524.32812 397.00535 lineto
-523.51561 396.5887 522.67186 396.2762 521.79688 396.06785 curveto
-520.92186 395.85953 520.01562 395.75536 519.07812 395.75535 curveto
-517.65103 395.75536 516.57812 395.97411 515.85938 396.4116 curveto
-515.15104 396.84911 514.79687 397.50536 514.79688 398.38035 curveto
-514.79687 399.04703 515.05208 399.57307 515.5625 399.95847 curveto
-516.07291 400.33348 517.09895 400.69286 518.64062 401.0366 curveto
-519.625 401.25535 lineto
-521.66666 401.69286 523.11457 402.31265 523.96875 403.11472 curveto
-524.83332 403.9064 525.26561 405.01577 525.26562 406.44285 curveto
-525.26561 408.06785 524.61978 409.35431 523.32812 410.30222 curveto
-522.04686 411.25014 520.28124 411.7241 518.03125 411.7241 curveto
-517.09374 411.7241 516.11458 411.63035 515.09375 411.44285 curveto
-514.08333 411.26576 513.01562 410.99493 511.89062 410.63035 curveto
-511.89062 407.6616 lineto
-512.95312 408.21368 514 408.63035 515.03125 408.9116 curveto
-516.06249 409.18243 517.08333 409.31785 518.09375 409.31785 curveto
-519.44791 409.31785 520.48957 409.08868 521.21875 408.63035 curveto
-521.9479 408.1616 522.31249 407.50535 522.3125 406.6616 curveto
-522.31249 405.88035 522.04686 405.2814 521.51562 404.86472 curveto
-520.99478 404.44806 519.84374 404.04702 518.0625 403.6616 curveto
-517.0625 403.42722 lineto
-515.28124 403.05223 513.99479 402.47932 513.20312 401.70847 curveto
-512.41146 400.92723 512.01562 399.85953 512.01562 398.50535 curveto
-512.01562 396.85953 512.59896 395.5887 513.76562 394.69285 curveto
-514.93229 393.79703 516.58854 393.34912 518.73438 393.3491 curveto
-519.79687 393.34912 520.79686 393.42724 521.73438 393.58347 curveto
-522.67186 393.73974 523.53644 393.97412 524.32812 394.2866 curveto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-544.40625 400.70847 moveto
-544.40625 411.27097 lineto
-541.53125 411.27097 lineto
-541.53125 400.80222 lineto
-541.53124 399.14599 541.20832 397.9064 540.5625 397.08347 curveto
-539.91665 396.26057 538.9479 395.84911 537.65625 395.8491 curveto
-536.10416 395.84911 534.8802 396.3439 533.98438 397.33347 curveto
-533.08854 398.32307 532.64062 399.67203 532.64062 401.38035 curveto
-532.64062 411.27097 lineto
-529.75 411.27097 lineto
-529.75 386.95847 lineto
-532.64062 386.95847 lineto
-532.64062 396.48972 lineto
-533.32812 395.43766 534.13541 394.6512 535.0625 394.13035 curveto
-535.99999 393.60953 537.07811 393.34912 538.29688 393.3491 curveto
-540.30728 393.34912 541.82811 393.97412 542.85938 395.2241 curveto
-543.89061 396.4637 544.40623 398.29182 544.40625 400.70847 curveto
-fill
-grestore
-grestore
-gsave [1 0 0 1 27.265597 37.638112] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-268.73438 610.94285 moveto
-273.4375 610.94285 lineto
-279.39062 626.81785 lineto
-285.375 610.94285 lineto
-290.07812 610.94285 lineto
-290.07812 634.27097 lineto
-287 634.27097 lineto
-287 613.7866 lineto
-280.98438 629.7866 lineto
-277.8125 629.7866 lineto
-271.79688 613.7866 lineto
-271.79688 634.27097 lineto
-268.73438 634.27097 lineto
-268.73438 610.94285 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-304.1875 625.4741 moveto
-301.86457 625.47411 300.2552 625.73973 299.35938 626.27097 curveto
-298.46354 626.80223 298.01562 627.70848 298.01562 628.98972 curveto
-298.01562 630.01056 298.34895 630.82306 299.01562 631.42722 curveto
-299.6927 632.02098 300.60937 632.31785 301.76562 632.31785 curveto
-303.35936 632.31785 304.63541 631.75535 305.59375 630.63035 curveto
-306.56249 629.49494 307.04686 627.98973 307.04688 626.11472 curveto
-307.04688 625.4741 lineto
-304.1875 625.4741 lineto
-309.92188 624.2866 moveto
-309.92188 634.27097 lineto
-307.04688 634.27097 lineto
-307.04688 631.61472 lineto
-306.39061 632.67722 305.5729 633.46368 304.59375 633.9741 curveto
-303.61457 634.4741 302.41666 634.7241 301 634.7241 curveto
-299.20833 634.7241 297.78125 634.2241 296.71875 633.2241 curveto
-295.66666 632.21368 295.14062 630.86473 295.14062 629.17722 curveto
-295.14062 627.20848 295.79687 625.72411 297.10938 624.7241 curveto
-298.43229 623.72411 300.40103 623.22411 303.01562 623.2241 curveto
-307.04688 623.2241 lineto
-307.04688 622.94285 lineto
-307.04686 621.61994 306.60936 620.59911 305.73438 619.88035 curveto
-304.86978 619.1512 303.65103 618.78661 302.07812 618.7866 curveto
-301.07812 618.78661 300.10416 618.90641 299.15625 619.14597 curveto
-298.20833 619.38557 297.29687 619.74495 296.42188 620.2241 curveto
-296.42188 617.56785 lineto
-297.47395 617.16162 298.49479 616.85953 299.48438 616.6616 curveto
-300.47395 616.45328 301.43749 616.34912 302.375 616.3491 curveto
-304.90624 616.34912 306.79686 617.00537 308.04688 618.31785 curveto
-309.29686 619.63036 309.92186 621.61994 309.92188 624.2866 curveto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-318.70312 611.80222 moveto
-318.70312 616.77097 lineto
-324.625 616.77097 lineto
-324.625 619.00535 lineto
-318.70312 619.00535 lineto
-318.70312 628.50535 lineto
-318.70312 629.93244 318.89583 630.8491 319.28125 631.25535 curveto
-319.67708 631.6616 320.47395 631.86473 321.67188 631.86472 curveto
-324.625 631.86472 lineto
-324.625 634.27097 lineto
-321.67188 634.27097 lineto
-319.45312 634.27097 317.92187 633.85952 317.07812 633.0366 curveto
-316.23437 632.20327 315.8125 630.69285 315.8125 628.50535 curveto
-315.8125 619.00535 lineto
-313.70312 619.00535 lineto
-313.70312 616.77097 lineto
-315.8125 616.77097 lineto
-315.8125 611.80222 lineto
-318.70312 611.80222 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-338.5625 619.45847 moveto
-338.23957 619.27099 337.8854 619.13557 337.5 619.05222 curveto
-337.12499 618.95849 336.70832 618.91161 336.25 618.9116 curveto
-334.62499 618.91161 333.37499 619.44286 332.5 620.50535 curveto
-331.63541 621.55744 331.20312 623.07307 331.20312 625.05222 curveto
-331.20312 634.27097 lineto
-328.3125 634.27097 lineto
-328.3125 616.77097 lineto
-331.20312 616.77097 lineto
-331.20312 619.48972 lineto
-331.80729 618.42724 332.59374 617.64078 333.5625 617.13035 curveto
-334.53124 616.60953 335.70832 616.34912 337.09375 616.3491 curveto
-337.29165 616.34912 337.5104 616.36474 337.75 616.39597 curveto
-337.98957 616.41682 338.2552 616.45328 338.54688 616.50535 curveto
-338.5625 619.45847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-341.60938 616.77097 moveto
-344.48438 616.77097 lineto
-344.48438 634.27097 lineto
-341.60938 634.27097 lineto
-341.60938 616.77097 lineto
-341.60938 609.95847 moveto
-344.48438 609.95847 lineto
-344.48438 613.5991 lineto
-341.60938 613.5991 lineto
-341.60938 609.95847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-365.03125 616.77097 moveto
-358.70312 625.2866 lineto
-365.35938 634.27097 lineto
-361.96875 634.27097 lineto
-356.875 627.39597 lineto
-351.78125 634.27097 lineto
-348.39062 634.27097 lineto
-355.1875 625.11472 lineto
-348.96875 616.77097 lineto
-352.35938 616.77097 lineto
-357 623.00535 lineto
-361.64062 616.77097 lineto
-365.03125 616.77097 lineto
-fill
-grestore
-grestore
-gsave [1 0 0 1 14.343744 76.18141] concat
-gsave
-0 0 0 setrgbcolor
-newpath
-246.34375 459.05222 moveto
-242.0625 470.6616 lineto
-250.64062 470.6616 lineto
-246.34375 459.05222 lineto
-244.5625 455.94285 moveto
-248.14062 455.94285 lineto
-257.03125 479.27097 lineto
-253.75 479.27097 lineto
-251.625 473.2866 lineto
-241.10938 473.2866 lineto
-238.98438 479.27097 lineto
-235.65625 479.27097 lineto
-244.5625 455.94285 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-271.45312 462.2866 moveto
-271.45312 465.00535 lineto
-270.64061 464.5887 269.79686 464.2762 268.92188 464.06785 curveto
-268.04686 463.85953 267.14062 463.75536 266.20312 463.75535 curveto
-264.77603 463.75536 263.70312 463.97411 262.98438 464.4116 curveto
-262.27604 464.84911 261.92187 465.50536 261.92188 466.38035 curveto
-261.92187 467.04703 262.17708 467.57307 262.6875 467.95847 curveto
-263.19791 468.33348 264.22395 468.69286 265.76562 469.0366 curveto
-266.75 469.25535 lineto
-268.79166 469.69286 270.23957 470.31265 271.09375 471.11472 curveto
-271.95832 471.9064 272.39061 473.01577 272.39062 474.44285 curveto
-272.39061 476.06785 271.74478 477.35431 270.45312 478.30222 curveto
-269.17186 479.25014 267.40624 479.7241 265.15625 479.7241 curveto
-264.21874 479.7241 263.23958 479.63035 262.21875 479.44285 curveto
-261.20833 479.26576 260.14062 478.99493 259.01562 478.63035 curveto
-259.01562 475.6616 lineto
-260.07812 476.21368 261.125 476.63035 262.15625 476.9116 curveto
-263.18749 477.18243 264.20833 477.31785 265.21875 477.31785 curveto
-266.57291 477.31785 267.61457 477.08868 268.34375 476.63035 curveto
-269.0729 476.1616 269.43749 475.50535 269.4375 474.6616 curveto
-269.43749 473.88035 269.17186 473.2814 268.64062 472.86472 curveto
-268.11978 472.44806 266.96874 472.04702 265.1875 471.6616 curveto
-264.1875 471.42722 lineto
-262.40624 471.05223 261.11979 470.47932 260.32812 469.70847 curveto
-259.53646 468.92723 259.14062 467.85953 259.14062 466.50535 curveto
-259.14062 464.85953 259.72396 463.5887 260.89062 462.69285 curveto
-262.05729 461.79703 263.71354 461.34912 265.85938 461.3491 curveto
-266.92187 461.34912 267.92186 461.42724 268.85938 461.58347 curveto
-269.79686 461.73974 270.66144 461.97412 271.45312 462.2866 curveto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-288.14062 462.2866 moveto
-288.14062 465.00535 lineto
-287.32811 464.5887 286.48436 464.2762 285.60938 464.06785 curveto
-284.73436 463.85953 283.82812 463.75536 282.89062 463.75535 curveto
-281.46353 463.75536 280.39062 463.97411 279.67188 464.4116 curveto
-278.96354 464.84911 278.60937 465.50536 278.60938 466.38035 curveto
-278.60937 467.04703 278.86458 467.57307 279.375 467.95847 curveto
-279.88541 468.33348 280.91145 468.69286 282.45312 469.0366 curveto
-283.4375 469.25535 lineto
-285.47916 469.69286 286.92707 470.31265 287.78125 471.11472 curveto
-288.64582 471.9064 289.07811 473.01577 289.07812 474.44285 curveto
-289.07811 476.06785 288.43228 477.35431 287.14062 478.30222 curveto
-285.85936 479.25014 284.09374 479.7241 281.84375 479.7241 curveto
-280.90624 479.7241 279.92708 479.63035 278.90625 479.44285 curveto
-277.89583 479.26576 276.82812 478.99493 275.70312 478.63035 curveto
-275.70312 475.6616 lineto
-276.76562 476.21368 277.8125 476.63035 278.84375 476.9116 curveto
-279.87499 477.18243 280.89583 477.31785 281.90625 477.31785 curveto
-283.26041 477.31785 284.30207 477.08868 285.03125 476.63035 curveto
-285.7604 476.1616 286.12499 475.50535 286.125 474.6616 curveto
-286.12499 473.88035 285.85936 473.2814 285.32812 472.86472 curveto
-284.80728 472.44806 283.65624 472.04702 281.875 471.6616 curveto
-280.875 471.42722 lineto
-279.09374 471.05223 277.80729 470.47932 277.01562 469.70847 curveto
-276.22396 468.92723 275.82812 467.85953 275.82812 466.50535 curveto
-275.82812 464.85953 276.41146 463.5887 277.57812 462.69285 curveto
-278.74479 461.79703 280.40104 461.34912 282.54688 461.3491 curveto
-283.60937 461.34912 284.60936 461.42724 285.54688 461.58347 curveto
-286.48436 461.73974 287.34894 461.97412 288.14062 462.2866 curveto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-308.64062 469.80222 moveto
-308.64062 471.20847 lineto
-295.42188 471.20847 lineto
-295.54687 473.18765 296.14062 474.69806 297.20312 475.73972 curveto
-298.27603 476.77098 299.76562 477.2866 301.67188 477.2866 curveto
-302.77603 477.2866 303.84374 477.15118 304.875 476.88035 curveto
-305.91665 476.60952 306.9479 476.20327 307.96875 475.6616 curveto
-307.96875 478.38035 lineto
-306.93748 478.81785 305.88019 479.15118 304.79688 479.38035 curveto
-303.71353 479.60951 302.61457 479.7241 301.5 479.7241 curveto
-298.70833 479.7241 296.49479 478.9116 294.85938 477.2866 curveto
-293.23437 475.6616 292.42187 473.46369 292.42188 470.69285 curveto
-292.42187 467.82828 293.19271 465.55745 294.73438 463.88035 curveto
-296.28645 462.19287 298.37499 461.34912 301 461.3491 curveto
-303.35415 461.34912 305.21353 462.10953 306.57812 463.63035 curveto
-307.95311 465.14078 308.64061 467.19807 308.64062 469.80222 curveto
-305.76562 468.95847 moveto
-305.74478 467.38557 305.30207 466.13036 304.4375 465.19285 curveto
-303.58332 464.25536 302.4479 463.78661 301.03125 463.7866 curveto
-299.42707 463.78661 298.14062 464.23974 297.17188 465.14597 curveto
-296.21354 466.05224 295.66145 467.32828 295.51562 468.9741 curveto
-305.76562 468.95847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-326.98438 465.13035 moveto
-327.70311 463.8387 328.56248 462.88557 329.5625 462.27097 curveto
-330.56248 461.65641 331.73956 461.34912 333.09375 461.3491 curveto
-334.91664 461.34912 336.32289 461.98974 337.3125 463.27097 curveto
-338.30206 464.54182 338.79685 466.35432 338.79688 468.70847 curveto
-338.79688 479.27097 lineto
-335.90625 479.27097 lineto
-335.90625 468.80222 lineto
-335.90622 467.12515 335.60935 465.88036 335.01562 465.06785 curveto
-334.42185 464.25536 333.5156 463.84911 332.29688 463.8491 curveto
-330.80727 463.84911 329.63019 464.3439 328.76562 465.33347 curveto
-327.90102 466.32307 327.46873 467.67203 327.46875 469.38035 curveto
-327.46875 479.27097 lineto
-324.57812 479.27097 lineto
-324.57812 468.80222 lineto
-324.57811 467.11474 324.28124 465.86994 323.6875 465.06785 curveto
-323.09374 464.25536 322.17707 463.84911 320.9375 463.8491 curveto
-319.46874 463.84911 318.30208 464.34911 317.4375 465.3491 curveto
-316.57291 466.33869 316.14062 467.68244 316.14062 469.38035 curveto
-316.14062 479.27097 lineto
-313.25 479.27097 lineto
-313.25 461.77097 lineto
-316.14062 461.77097 lineto
-316.14062 464.48972 lineto
-316.79687 463.41682 317.58333 462.62516 318.5 462.11472 curveto
-319.41666 461.60432 320.5052 461.34912 321.76562 461.3491 curveto
-323.03645 461.34912 324.11457 461.67203 325 462.31785 curveto
-325.89582 462.9637 326.55728 463.9012 326.98438 465.13035 curveto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-357.10938 470.5366 moveto
-357.10936 468.42203 356.67186 466.76578 355.79688 465.56785 curveto
-354.93228 464.35953 353.73957 463.75536 352.21875 463.75535 curveto
-350.69791 463.75536 349.49999 464.35953 348.625 465.56785 curveto
-347.76041 466.76578 347.32812 468.42203 347.32812 470.5366 curveto
-347.32812 472.65119 347.76041 474.31264 348.625 475.52097 curveto
-349.49999 476.71889 350.69791 477.31785 352.21875 477.31785 curveto
-353.73957 477.31785 354.93228 476.71889 355.79688 475.52097 curveto
-356.67186 474.31264 357.10936 472.65119 357.10938 470.5366 curveto
-347.32812 464.42722 moveto
-347.93229 463.38557 348.6927 462.61474 349.60938 462.11472 curveto
-350.53645 461.60432 351.64061 461.34912 352.92188 461.3491 curveto
-355.04686 461.34912 356.77082 462.19287 358.09375 463.88035 curveto
-359.42707 465.56786 360.09373 467.78661 360.09375 470.5366 curveto
-360.09373 473.2866 359.42707 475.50535 358.09375 477.19285 curveto
-356.77082 478.88035 355.04686 479.7241 352.92188 479.7241 curveto
-351.64061 479.7241 350.53645 479.4741 349.60938 478.9741 curveto
-348.6927 478.46368 347.93229 477.68764 347.32812 476.64597 curveto
-347.32812 479.27097 lineto
-344.4375 479.27097 lineto
-344.4375 454.95847 lineto
-347.32812 454.95847 lineto
-347.32812 464.42722 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-364.85938 454.95847 moveto
-367.73438 454.95847 lineto
-367.73438 479.27097 lineto
-364.85938 479.27097 lineto
-364.85938 454.95847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-388.70312 469.80222 moveto
-388.70312 471.20847 lineto
-375.48438 471.20847 lineto
-375.60937 473.18765 376.20312 474.69806 377.26562 475.73972 curveto
-378.33853 476.77098 379.82812 477.2866 381.73438 477.2866 curveto
-382.83853 477.2866 383.90624 477.15118 384.9375 476.88035 curveto
-385.97915 476.60952 387.0104 476.20327 388.03125 475.6616 curveto
-388.03125 478.38035 lineto
-386.99998 478.81785 385.94269 479.15118 384.85938 479.38035 curveto
-383.77603 479.60951 382.67707 479.7241 381.5625 479.7241 curveto
-378.77083 479.7241 376.55729 478.9116 374.92188 477.2866 curveto
-373.29687 475.6616 372.48437 473.46369 372.48438 470.69285 curveto
-372.48437 467.82828 373.25521 465.55745 374.79688 463.88035 curveto
-376.34895 462.19287 378.43749 461.34912 381.0625 461.3491 curveto
-383.41665 461.34912 385.27603 462.10953 386.64062 463.63035 curveto
-388.01561 465.14078 388.70311 467.19807 388.70312 469.80222 curveto
-385.82812 468.95847 moveto
-385.80728 467.38557 385.36457 466.13036 384.5 465.19285 curveto
-383.64582 464.25536 382.5104 463.78661 381.09375 463.7866 curveto
-379.48957 463.78661 378.20312 464.23974 377.23438 465.14597 curveto
-376.27604 466.05224 375.72395 467.32828 375.57812 468.9741 curveto
-385.82812 468.95847 lineto
-fill
-grestore
-gsave
-0 0 0 setrgbcolor
-newpath
-403.5625 464.45847 moveto
-403.23957 464.27099 402.8854 464.13557 402.5 464.05222 curveto
-402.12499 463.95849 401.70832 463.91161 401.25 463.9116 curveto
-399.62499 463.91161 398.37499 464.44286 397.5 465.50535 curveto
-396.63541 466.55744 396.20312 468.07307 396.20312 470.05222 curveto
-396.20312 479.27097 lineto
-393.3125 479.27097 lineto
-393.3125 461.77097 lineto
-396.20312 461.77097 lineto
-396.20312 464.48972 lineto
-396.80729 463.42724 397.59374 462.64078 398.5625 462.13035 curveto
-399.53124 461.60953 400.70832 461.34912 402.09375 461.3491 curveto
-402.29165 461.34912 402.5104 461.36474 402.75 461.39597 curveto
-402.98957 461.41682 403.2552 461.45328 403.54688 461.50535 curveto
-403.5625 464.45847 lineto
-fill
-grestore
-grestore
-0 0 0 setrgbcolor
-[] 0 setdash
-3.5433071 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-175 392.36218 moveto
-320 392.36218 lineto
-320 457.36218 lineto
-175 457.36218 lineto
-175 392.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3.5433071 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-365 392.36218 moveto
-510 392.36218 lineto
-510 457.36218 lineto
-365 457.36218 lineto
-365 392.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-3.5433071 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-270 627.36218 moveto
-415 627.36218 lineto
-415 692.36218 lineto
-270 692.36218 lineto
-270 627.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-4.3671684 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-230 512.36218 moveto
-450 512.36218 lineto
-450 577.36218 lineto
-230 577.36218 lineto
-230 512.36218 lineto
-closepath
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-1 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-300 512.36218 moveto
-300 512.36218 lineto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-1.7716535 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-440 457.36218 moveto
-437.87868 477.31193 410.33408 480.63754 398.08148 489.98781 curveto
-387.10025 498.36786 385 512.36218 385 512.36218 curveto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-1.7716535 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-240.5 456.86218 moveto
-242.62132 476.81193 270.16592 480.13754 282.41852 489.48781 curveto
-293.39975 497.86786 295.5 511.86218 295.5 511.86218 curveto
-stroke
-0 0 0 setrgbcolor
-[] 0 setdash
-1.7716535 setlinewidth
-0 setlinejoin
-0 setlinecap
-newpath
-340 577.36218 moveto
-340 627.36218 340 627.36218 340 627.36218 curveto
-340 627.36218 lineto
-340 627.36218 lineto
-stroke
-grestore
-showpage
-%%EOF
diff --git a/ufc-merge-into-ffc/doc/manual/fenicsmanual.cls b/ufc-merge-into-ffc/doc/manual/fenicsmanual.cls
deleted file mode 100644
index 18d49c2..0000000
--- a/ufc-merge-into-ffc/doc/manual/fenicsmanual.cls
+++ /dev/null
@@ -1,110 +0,0 @@
-% Copyright (C) 2005-2007 Anders Logg.
-% Licensed under the GNU GPL Version 2.
-%
-% First added:  2004-09-03
-% Last changed: 2009-08-18
-%
-% LaTeX document class for FEniCS manuals.
-
-%--- Set up class ----
-\ProvidesClass{fenicsmanual}[2005/09/03 FEniCS manual]
-\NeedsTeXFormat{LaTeX2e}
-\LoadClass[12pt,twoside]{book}
-
-%--- Load packages ---
-\RequirePackage{graphicx}
-\RequirePackage{psfrag}
-\RequirePackage{fancyhdr}
-\RequirePackage{fancybox}
-\RequirePackage{fancyvrb}
-\RequirePackage{sectsty}
-\RequirePackage{amsmath}
-\RequirePackage{amssymb}
-\RequirePackage{stmaryrd}
-\RequirePackage{makeidx}
-\RequirePackage{url}
-\RequirePackage[latin1]{inputenc}
-\RequirePackage[colorlinks]{hyperref}
-\RequirePackage[small]{caption}
-\RequirePackage{algorithm}
-
-%--- Misc options ---
-\setlength{\parindent}{0pt}
-\setlength{\parskip}{12pt}
-\allsectionsfont{\sffamily}
-\newcommand{\tab}{\hspace*{2em}}
-\makeindex
-
-%--- Remove header and footer from blank pages  ---
-\let\origdoublepage\cleardoublepage
-\newcommand{\clearemptydoublepage}{%
-  \clearpage
-  {\pagestyle{empty}\origdoublepage}%
-}
-\let\cleardoublepage\clearemptydoublepage
-
-%--- Print index at end of document ---
-\AtEndDocument{\cleardoublepage\printindex}
-
-%--- Variables ---
-\newcommand{\@fenicstitle}{}
-\newcommand{\fenicstitle}[1]{\renewcommand{\@fenicstitle}{#1}}
-\newcommand{\@fenicsauthor}{}
-\newcommand{\fenicsauthor}[1]{\renewcommand{\@fenicsauthor}{#1}}
-\newcommand{\@fenicsimage}{\vspace{8cm}}
-\newcommand{\fenicsimage}[1]{\renewcommand{\@fenicsimage}{
-    \begin{center}
-      \includegraphics[height=8cm]{#1}
-    \end{center}}}
-\newcommand{\@fenicspackage}{<package unspecified>}
-\newcommand{\@fenicspackagett}{<package unspecified>}
-\newcommand{\fenicspackage}[2]{\renewcommand{\@fenicspackage}{#1}\renewcommand{\@fenicspackagett}{#2}}
-\newcommand{\package}{\@fenicspackage}
-\newcommand{\packagett}{\@fenicspackagett}
-
-%--- Commands ---
-\renewcommand{\maketitle}{
-  \lhead{\textsf{\textbf{\@fenicstitle}}}
-  \rhead{\textsf{\@fenicsauthor}}
-  \pagestyle{fancy}
-  \renewcommand{\footrulewidth}{2pt}
-  \renewcommand{\headrulewidth}{2pt}
-  \thispagestyle{empty}
-  \Large\textsf{\textbf{\@fenicstitle}} \\
-  \vspace{-0.5cm}
-  \hrule height 2pt
-  \hfill\large\textsf{\today}
-  \vspace{3cm}
-  \@fenicsimage
-  \vfill\large\textsf{\textbf{\@fenicsauthor}} \\
-  \hrule height 2pt
-  \hfill\large\texttt{www.fenics.org}
-  \newpage
-  \null\vfill
-  \normalsize
-  Visit \texttt{http://www.fenics.org/} for the latest version of this manual. \\
-  Send comments and suggestions to \texttt{\@fenicspackagett{}-dev at fenics.org}.
-  \pagestyle{empty}
-  \cleardoublepage
-  \tableofcontents
-  \cleardoublepage
-  \pagestyle{fancy}}
-
-\newcommand{\fenics}{\textbf{\textsf{\normalsize{FE}\Large{ni}\normalsize{CS}}}}
-\newcommand{\dolfin}{\textbf{\textsf{DOLFIN}}}
-\newcommand{\pydolfin}{\textbf{\textsf{PyDOLFIN}}}
-\newcommand{\ffc}{\textbf{\textsf{FFC}}}
-\newcommand{\fiat}{\textbf{\textsf{FIAT}}}
-\newcommand{\fixme}[1]{\ \\ \begin{tabular}{||p{\textwidth}||}\hline\rm\textbf{FIXME:}\rm #1 \\ \hline\end{tabular} \\}
-\newcommand{\devnote}[1]{$\blacktriangleright$ \emph{Developer's note:} #1}
-
-%--- Environments ---
-\DefineVerbatimEnvironment{code}{Verbatim}{frame=single,rulecolor=\color{blue}}
-\DefineVerbatimEnvironment{macrocode}{Verbatim}{commandchars=\\\{\},frame=single,rulecolor=\color{blue}}
-
-%--- Macros ---
-\newcommand{\dx}{\, \mathrm{d}x}
-\newcommand{\dX}{\, \mathrm{d}X}
-\newcommand{\ds}{\, \mathrm{d}s}
-\newcommand{\dS}{\, \mathrm{d}S}
-\newcommand{\R}{\mathbb{R}}
diff --git a/ufc-merge-into-ffc/doc/manual/svg/hexahedron.svg b/ufc-merge-into-ffc/doc/manual/svg/hexahedron.svg
deleted file mode 100644
index 025b9e2..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/hexahedron.svg
+++ /dev/null
@@ -1,249 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="hexahedron.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="50"
-     objecttolerance="50"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="467.24294"
-     inkscape:cy="339.50488"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1600"
-     inkscape:window-height="1157"
-     inkscape:window-x="0"
-     inkscape:window-y="43"
-     gridspacingx="1px"
-     gridspacingy="1px"
-     inkscape:object-bbox="false"
-     inkscape:object-points="false"
-     inkscape:grid-points="true"
-     inkscape:object-nodes="false"
-     inkscape:object-paths="false"
-     inkscape:grid-bbox="false"
-     inkscape:guide-bbox="false" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 140,852.36218 L 492.81509,958.50976"
-       id="path2942"
-       sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="872.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="40"
-         y="872.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="380"
-       y="982.36218"
-       id="text2785"
-       inkscape:transform-center-x="97.765625"><tspan
-         sodipodi:role="line"
-         id="tspan2787"
-         x="380"
-         y="982.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="650"
-       y="842.36218"
-       id="text2789"><tspan
-         sodipodi:role="line"
-         id="tspan2791"
-         x="650"
-         y="842.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="522.36218"
-       id="text2793"><tspan
-         sodipodi:role="line"
-         id="tspan2795"
-         x="40"
-         y="522.36218">v4</tspan></text>
-    <path
-       style="fill:#6286ed;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 440,942.36218 L 440,552.36218 L 140,522.36218 L 140,852.36218 z "
-       id="path2000"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:#466bd5;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 440,942.36218 L 640,832.36218 L 640,522.36218 L 440,552.36218 L 440,942.36218 z "
-       id="path2887"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:#3765e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,522.36218 L 440,552.36218 L 640,522.36218 L 370,502.36218 L 140,522.36218 z "
-       id="path2889" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#4b4b96;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:18,6;stroke-dashoffset:0"
-       d="M 370,502.36218 L 370,772.36218"
-       id="path2891"
-       sodipodi:nodetypes="cc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#4b4b96;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:18,6;stroke-dashoffset:0"
-       d="M 140,852.36218 L 370,772.36218 L 640,832.36218"
-       id="path2893" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 140,852.36218 L 140,482.36218"
-       id="path2913"
-       sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="650"
-       y="512.36218"
-       id="text2917"><tspan
-         sodipodi:role="line"
-         id="tspan2919"
-         x="650"
-         y="512.36218">v6</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="330"
-       y="482.36218"
-       id="text2921"><tspan
-         sodipodi:role="line"
-         id="tspan2923"
-         x="330"
-         y="482.36218">v7</tspan></text>
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 440,942.36218 L 440,552.36218"
-       id="path2798" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,522.36218 L 440,552.36218 L 640,522.36218"
-       id="path2800" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 440,942.36218 L 640,832.36218 L 640,522.36218"
-       id="path2802" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,522.36218 L 370,502.36218 L 640,522.36218"
-       id="path2804" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/insertion.svg b/ufc-merge-into-ffc/doc/manual/svg/insertion.svg
deleted file mode 100644
index d637ecf..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/insertion.svg
+++ /dev/null
@@ -1,429 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://inkscape.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="841.88975pt"
-   height="595.27559pt"
-   id="svg2795"
-   sodipodi:version="0.32"
-   inkscape:version="0.42"
-   sodipodi:docbase="/home/anders/hg/arcme-2006/svg"
-   sodipodi:docname="insertion.svg">
-  <defs
-     id="defs2797">
-    <marker
-       inkscape:stockid="Arrow2Lend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Lend"
-       style="overflow:visible;">
-      <path
-         sodipodi:nodetypes="cccc"
-         id="path3050"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(1.1) rotate(180) translate(-5,0)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="0.98994949"
-     inkscape:cx="375.00000"
-     inkscape:cy="478.15811"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="false"
-     inkscape:grid-points="true"
-     gridspacingx="25.000000px"
-     gridspacingy="25.000000px"
-     gridtolerance="10.000000px"
-     inkscape:guide-points="false"
-     inkscape:window-width="1158"
-     inkscape:window-height="878"
-     inkscape:window-x="3"
-     inkscape:window-y="25" />
-  <metadata
-     id="metadata2800">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <rect
-       style="fill:none;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2803"
-       width="375.00000"
-       height="375.00000"
-       x="126.55000"
-       y="45.644482" />
-    <rect
-       style="opacity:0.36813185;fill:#0080b0;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2805"
-       width="375.00000"
-       height="25.000000"
-       x="126.55000"
-       y="120.64449" />
-    <rect
-       style="opacity:0.36813185;fill:#0080b0;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2809"
-       width="375.00000"
-       height="25.000000"
-       x="126.55000"
-       y="317.54449" />
-    <rect
-       style="opacity:0.36813185;fill:#0080b0;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2811"
-       width="0.0000000"
-       height="25.000000"
-       x="150.00000"
-       y="302.36218" />
-    <rect
-       style="opacity:0.36813185;fill:#0080b0;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2813"
-       width="375.00000"
-       height="25.000000"
-       x="126.55000"
-       y="195.64449" />
-    <rect
-       style="opacity:0.36813185;fill:#00d720;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2815"
-       width="25.000000"
-       height="375.00000"
-       x="176.55000"
-       y="45.644482" />
-    <rect
-       style="opacity:0.36813185;fill:#00c54b;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2817"
-       width="25.000000"
-       height="375.00000"
-       x="301.54999"
-       y="45.644482" />
-    <rect
-       style="opacity:0.36813185;fill:#00b443;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2819"
-       width="25.000000"
-       height="375.00000"
-       x="373.45001"
-       y="45.644482" />
-    <rect
-       style="opacity:0.36813185;fill:#0080b0;fill-opacity:1.0000000;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2823"
-       width="0.0000000"
-       height="25.000000"
-       x="150.00000"
-       y="144.09448" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2831"
-       width="50.000000"
-       height="50.000000"
-       x="575.00000"
-       y="44.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2833"
-       width="50.000000"
-       height="50.000000"
-       x="625.00000"
-       y="44.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2835"
-       width="50.000000"
-       height="50.000000"
-       x="675.00000"
-       y="44.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2837"
-       width="13.918934"
-       height="0.0000000"
-       x="575.00000"
-       y="144.09448" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2839"
-       width="50.000000"
-       height="50.000000"
-       x="575.00000"
-       y="94.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2841"
-       width="50.000000"
-       height="50.000000"
-       x="625.00000"
-       y="94.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2845"
-       width="50.000000"
-       height="50.000000"
-       x="675.00000"
-       y="94.094482" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2847"
-       width="50.000000"
-       height="50.000000"
-       x="575.00000"
-       y="144.09448" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2851"
-       width="50.000000"
-       height="50.000000"
-       x="625.00000"
-       y="144.09448" />
-    <rect
-       style="opacity:1.0000000;fill:#0080b0;fill-opacity:0.35519126;stroke:#000000;stroke-width:3.0999999;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2853"
-       width="50.000000"
-       height="50.000000"
-       x="675.00000"
-       y="144.09448" />
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="73.741135"
-       y="141.03342"
-       id="text2875"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2877"
-         x="73.741135"
-         y="141.03342">i0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="73.741135"
-       y="218.81516"
-       id="text2879"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2881"
-         x="73.741135"
-         y="218.81516">i1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="73.741135"
-       y="343.06393"
-       id="text2883"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2885"
-         x="73.741135"
-         y="343.06393">i2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="178.03046"
-       y="24.145245"
-       id="text2891"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2893"
-         x="178.03046"
-         y="24.145245">j0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="301.02545"
-       y="23.855717"
-       id="text2895"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2897"
-         x="301.02545"
-         y="23.855717">j1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="373.75644"
-       y="24.865871"
-       id="text2899"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2901"
-         x="373.75644"
-         y="24.865871">j2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="536.94531"
-       y="76.16555"
-       id="text2907"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2909"
-         x="536.94531"
-         y="76.165550">1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="537.13281"
-       y="124.14525"
-       id="text2911"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2913"
-         x="537.13281"
-         y="124.14525">2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="536.65234"
-       y="175.1554"
-       id="text2915"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2917"
-         x="536.65234"
-         y="175.15540">3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="593.00623"
-       y="31.539814"
-       id="text2919"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2921"
-         x="593.00623"
-         y="31.539814">1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="642.18353"
-       y="31.856218"
-       id="text2923"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2925"
-         x="642.18353"
-         y="31.856218">2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="690.69293"
-       y="31.856218"
-       id="text2927"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan2929"
-         x="690.69293"
-         y="31.856218">3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24.000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125.00000%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1.0000000;stroke:none;stroke-width:1.0000000px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1.0000000;font-family:Bitstream Vera Sans"
-       x="597.00012"
-       y="310.73904"
-       id="text3074"
-       sodipodi:linespacing="125.00000%"><tspan
-         sodipodi:role="line"
-         id="tspan3076"
-         x="597.00012"
-         y="310.73904">A21</tspan></text>
-    <rect
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect1351"
-       width="25.000000"
-       height="25.000000"
-       x="176.50508"
-       y="120.59956" />
-    <rect
-       y="120.59956"
-       x="301.25891"
-       height="25.000000"
-       width="25.000000"
-       id="rect2105"
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000" />
-    <rect
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2107"
-       width="25.000000"
-       height="25.000000"
-       x="373.48483"
-       y="120.59956" />
-    <rect
-       y="195.35085"
-       x="176.50508"
-       height="25.000000"
-       width="25.000000"
-       id="rect2109"
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000" />
-    <rect
-       y="195.35085"
-       x="373.33688"
-       height="25.000000"
-       width="25.000000"
-       id="rect2113"
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000" />
-    <rect
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2115"
-       width="25.000000"
-       height="25.000000"
-       x="176.50507"
-       y="317.07422" />
-    <rect
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2117"
-       width="25.000000"
-       height="25.000000"
-       x="301.55118"
-       y="317.49371" />
-    <rect
-       y="317.49371"
-       x="373.33688"
-       height="25.000000"
-       width="25.000000"
-       id="rect2119"
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000" />
-    <rect
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       id="rect2121"
-       width="25.000000"
-       height="25.000000"
-       x="373.33688"
-       y="317.49371" />
-    <rect
-       y="195.70799"
-       x="301.19403"
-       height="25.000000"
-       width="25.000000"
-       id="rect2123"
-       style="fill:#0000ff;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:2.0000000;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000" />
-  </g>
-  <g
-     inkscape:groupmode="layer"
-     id="layer2"
-     inkscape:label="top">
-    <path
-       style="fill:none;fill-opacity:0.75000000;fill-rule:evenodd;stroke:#000000;stroke-width:3.0000000;stroke-linecap:butt;stroke-linejoin:miter;marker-start:none;marker-end:url(#Arrow2Lend);stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-opacity:1.0000000"
-       d="M 650.00000,174.02025 C 499.48727,522.52287 329.78165,349.78679 329.78165,349.78679"
-       id="path2935"
-       sodipodi:nodetypes="cc" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/interval.svg b/ufc-merge-into-ffc/doc/manual/svg/interval.svg
deleted file mode 100644
index 217514a..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/interval.svg
+++ /dev/null
@@ -1,172 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="interval.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="50"
-     objecttolerance="50"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.979899"
-     inkscape:cx="396.17568"
-     inkscape:cy="99.202224"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="false"
-     inkscape:window-width="1600"
-     inkscape:window-height="1157"
-     inkscape:window-x="0"
-     inkscape:window-y="43"
-     inkscape:object-points="false"
-     inkscape:object-bbox="false"
-     inkscape:object-nodes="false"
-     inkscape:grid-points="true"
-     inkscape:guide-points="false"
-     inkscape:object-paths="false"
-     inkscape:grid-bbox="false"
-     inkscape:guide-bbox="false" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 120,912.36218 L 670,912.36218"
-       id="path2942"
-       sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="115.50507"
-       y="962.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="115.50507"
-         y="962.36218">0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="611.96954"
-       y="962.36218"
-       id="text3933"><tspan
-         sodipodi:role="line"
-         id="tspan3935"
-         x="611.96954"
-         y="962.36218">1</tspan></text>
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 121.51523,911.8571 L 121.51523,931.8571"
-       id="path5236" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 620,912.36218 L 620,932.36218"
-       id="path5240"
-       sodipodi:nodetypes="cc" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/numbering_example_quadrilaterals.svg b/ufc-merge-into-ffc/doc/manual/svg/numbering_example_quadrilaterals.svg
deleted file mode 100644
index 19efdab..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/numbering_example_quadrilaterals.svg
+++ /dev/null
@@ -1,412 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="numbering_example_quadrilaterals.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="368.19011"
-     inkscape:cy="342.51351"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1590"
-     inkscape:window-height="1118"
-     inkscape:window-x="0"
-     inkscape:window-y="29"
-     inkscape:grid-points="true"
-     gridempspacing="1" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 80,912.36218 L 320,912.36218 L 320,672.36218 L 80,672.36218 L 80,912.36218 z "
-       id="path3018"
-       sodipodi:nodetypes="ccccc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="68.417969"
-       y="652.02234"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="68.417969"
-         y="652.02234">5</tspan></text>
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3681"
-       sodipodi:cx="85"
-       sodipodi:cy="517.36218"
-       sodipodi:rx="15"
-       sodipodi:ry="15"
-       d="M 100,517.36218 A 15,15 0 1 1 81.545637,502.76536 L 85,517.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3683"
-       sodipodi:cx="75"
-       sodipodi:cy="447.36218"
-       sodipodi:rx="45"
-       sodipodi:ry="55"
-       d="M 120,447.36218 A 45,55 0 1 1 64.636911,393.84048 L 75,447.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3685"
-       sodipodi:cx="85"
-       sodipodi:cy="427.36218"
-       sodipodi:rx="35"
-       sodipodi:ry="35"
-       d="M 120,427.36218 A 35,35 0 1 1 76.939819,393.30292 L 85,427.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3687"
-       sodipodi:cx="160"
-       sodipodi:cy="437.36218"
-       sodipodi:rx="50"
-       sodipodi:ry="35"
-       d="M 210,437.36218 A 50,35 0 1 1 150.96766,402.938 L 160,437.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.5307449" />
-    <rect
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="rect4574"
-       width="70"
-       height="80"
-       x="80"
-       y="422.36218"
-       ry="5.5558391" />
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="88.813477"
-       y="902.1073"
-       id="text4576"><tspan
-         sodipodi:role="line"
-         id="tspan4578"
-         x="88.813477"
-         y="902.1073">v0</tspan></text>
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path4604"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(14.57143,185.4286)" />
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 320,912.36218 L 560,912.36218 L 560,672.36218 L 320,672.36218 L 320,912.36218 z "
-       id="path1917"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path1919"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(15.42857,424.5714)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path1921"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(254.5714,185.4286)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path1925"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(254.5714,425.4286)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path1927"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(494.5714,425.4286)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path1929"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(494.5714,185.4286)" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="67.363281"
-       y="952.36218"
-       id="text1895"><tspan
-         sodipodi:role="line"
-         id="tspan1897"
-         x="67.363281"
-         y="952.36218">0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="307.36328"
-       y="952.36218"
-       id="text1935"><tspan
-         sodipodi:role="line"
-         id="tspan1937"
-         x="307.36328"
-         y="952.36218">1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="547.36328"
-       y="952.36218"
-       id="text1939"><tspan
-         sodipodi:role="line"
-         id="tspan1941"
-         x="547.36328"
-         y="952.36218">2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="547.36328"
-       y="652.36218"
-       id="text1943"><tspan
-         sodipodi:role="line"
-         id="tspan1945"
-         x="547.36328"
-         y="652.36218">3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="307.36328"
-       y="652.36218"
-       id="text1949"><tspan
-         sodipodi:role="line"
-         id="tspan1951"
-         x="307.36328"
-         y="652.36218">4</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="288.81348"
-       y="902.1073"
-       id="text1957"><tspan
-         sodipodi:role="line"
-         id="tspan1959"
-         x="288.81348"
-         y="902.1073">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="288.81348"
-       y="702.1073"
-       id="text1961"><tspan
-         sodipodi:role="line"
-         id="tspan1963"
-         x="288.81348"
-         y="702.1073">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="90"
-       y="702.36218"
-       id="text1965"><tspan
-         sodipodi:role="line"
-         id="tspan1967"
-         x="90"
-         y="702.36218">v3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="330"
-       y="902.36218"
-       id="text1969"><tspan
-         sodipodi:role="line"
-         id="tspan1971"
-         x="330"
-         y="902.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="528.81348"
-       y="902.1073"
-       id="text1973"><tspan
-         sodipodi:role="line"
-         id="tspan1975"
-         x="528.81348"
-         y="902.1073">v3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="528.81348"
-       y="702.1073"
-       id="text1977"><tspan
-         sodipodi:role="line"
-         id="tspan1979"
-         x="528.81348"
-         y="702.1073">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="328.81348"
-       y="702.1073"
-       id="text1981"><tspan
-         sodipodi:role="line"
-         id="tspan1983"
-         x="328.81348"
-         y="702.1073">v1</tspan></text>
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/numbering_example_triangles.svg b/ufc-merge-into-ffc/doc/manual/svg/numbering_example_triangles.svg
deleted file mode 100644
index 8383013..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/numbering_example_triangles.svg
+++ /dev/null
@@ -1,348 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="numbering_example_triangles.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="368.19011"
-     inkscape:cy="342.87065"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1590"
-     inkscape:window-height="1118"
-     inkscape:window-x="0"
-     inkscape:window-y="29"
-     inkscape:grid-points="true"
-     gridempspacing="1" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 58.5,693.86218 L 378.5,913.86218 L 378.5,473.86218 L 58.5,693.86218 z "
-       id="path3018"
-       sodipodi:nodetypes="cccc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="28.417969"
-       y="672.02234"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="28.417969"
-         y="672.02234">0</tspan></text>
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 698.5,693.86218 L 378.5,913.86218 L 378.5,473.86218 L 698.5,693.86218 z "
-       id="path1893"
-       sodipodi:nodetypes="cccc"
-       inkscape:transform-center-x="-120"
-       inkscape:transform-center-y="-40.000003" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="358.41797"
-       y="952.02234"
-       id="text1895"><tspan
-         sodipodi:role="line"
-         id="tspan1897"
-         x="358.41797"
-         y="952.02234">1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="358.17188"
-       y="452.02234"
-       id="text1899"><tspan
-         sodipodi:role="line"
-         id="tspan1901"
-         x="358.17188"
-         y="452.02234">3</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="697.13281"
-       y="670.17468"
-       id="text1903"><tspan
-         sodipodi:role="line"
-         id="tspan1905"
-         x="697.13281"
-         y="670.17468">2</tspan></text>
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3681"
-       sodipodi:cx="85"
-       sodipodi:cy="517.36218"
-       sodipodi:rx="15"
-       sodipodi:ry="15"
-       d="M 100,517.36218 A 15,15 0 1 1 81.545637,502.76536 L 85,517.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3683"
-       sodipodi:cx="75"
-       sodipodi:cy="447.36218"
-       sodipodi:rx="45"
-       sodipodi:ry="55"
-       d="M 120,447.36218 A 45,55 0 1 1 64.636911,393.84048 L 75,447.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3685"
-       sodipodi:cx="85"
-       sodipodi:cy="427.36218"
-       sodipodi:rx="35"
-       sodipodi:ry="35"
-       d="M 120,427.36218 A 35,35 0 1 1 76.939819,393.30292 L 85,427.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3687"
-       sodipodi:cx="160"
-       sodipodi:cy="437.36218"
-       sodipodi:rx="50"
-       sodipodi:ry="35"
-       d="M 210,437.36218 A 50,35 0 1 1 150.96766,402.938 L 160,437.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.5307449" />
-    <rect
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="rect4574"
-       width="70"
-       height="80"
-       x="80"
-       y="422.36218"
-       ry="5.5558391" />
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="88.813477"
-       y="702.1073"
-       id="text4576"><tspan
-         sodipodi:role="line"
-         id="tspan4578"
-         x="88.813477"
-         y="702.1073">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="330"
-       y="862.36218"
-       id="text4580"><tspan
-         sodipodi:role="line"
-         id="tspan4582"
-         x="330"
-         y="862.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="329.46387"
-       y="532.36218"
-       id="text4584"><tspan
-         sodipodi:role="line"
-         id="tspan4586"
-         x="329.46387"
-         y="532.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="390"
-       y="862.36218"
-       id="text4588"><tspan
-         sodipodi:role="line"
-         id="tspan4590"
-         x="390"
-         y="862.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="640"
-       y="702.36218"
-       id="text4596"><tspan
-         sodipodi:role="line"
-         id="tspan4598"
-         x="640"
-         y="702.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="389.46387"
-       y="532.36218"
-       id="text4600"><tspan
-         sodipodi:role="line"
-         id="tspan4602"
-         x="389.46387"
-         y="532.36218">v2</tspan></text>
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path4604"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(-5,205)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6376"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(313.5613,-13.0562)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6378"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(313.5612,425.4286)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6380"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(633.4848,205)" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/ordering_example_tetrahedron.svg b/ufc-merge-into-ffc/doc/manual/svg/ordering_example_tetrahedron.svg
deleted file mode 100644
index 22f9296..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/ordering_example_tetrahedron.svg
+++ /dev/null
@@ -1,217 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="ordering_example_tetrahedron.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="50"
-     objecttolerance="50"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="467.24294"
-     inkscape:cy="368.07631"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1600"
-     inkscape:window-height="1157"
-     inkscape:window-x="0"
-     inkscape:window-y="43"
-     gridspacingx="1px"
-     gridspacingy="1px"
-     inkscape:object-bbox="false"
-     inkscape:object-points="false"
-     inkscape:grid-points="true"
-     inkscape:object-nodes="false"
-     inkscape:object-paths="false"
-     inkscape:grid-bbox="false"
-     inkscape:guide-bbox="false" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="100"
-       y="872.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="100"
-         y="872.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="440"
-       y="972.36218"
-       id="text2785"
-       inkscape:transform-center-x="97.765625"><tspan
-         sodipodi:role="line"
-         id="tspan2787"
-         x="440"
-         y="972.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="380"
-       y="762.36218"
-       id="text2789"><tspan
-         sodipodi:role="line"
-         id="tspan2791"
-         x="380"
-         y="762.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="120"
-       y="502.36218"
-       id="text2793"><tspan
-         sodipodi:role="line"
-         id="tspan2795"
-         x="120"
-         y="502.36218">v3</tspan></text>
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 440,942.36218 L 140,522.36218 L 140,522.36218 L 140,852.36218 z "
-       id="path2000"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 440,942.36218 L 440,942.36218 L 370,772.36218 L 140,522.36218 L 440,942.36218 z "
-       id="path2887"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:18,6;stroke-dashoffset:0"
-       d="M 140,852.36218 L 370,772.36218 L 370,772.36218"
-       id="path2893"
-       sodipodi:nodetypes="ccc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 140,522.36218 L 440,942.36218 L 140,852.36218 z "
-       id="path3835" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,522.36218 L 370,772.36218"
-       id="path3837" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 370,772.36218 L 440,942.36218"
-       id="path3839" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="290"
-       y="662.36218"
-       id="text1905"><tspan
-         sodipodi:role="line"
-         id="tspan1907"
-         x="290"
-         y="662.36218">e0</tspan></text>
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/ordering_example_triangle.svg b/ufc-merge-into-ffc/doc/manual/svg/ordering_example_triangle.svg
deleted file mode 100644
index a9ce0a7..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/ordering_example_triangle.svg
+++ /dev/null
@@ -1,176 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="ordering_example_triangle.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="0.7"
-     inkscape:cx="375"
-     inkscape:cy="318.4798"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="976"
-     inkscape:window-height="802"
-     inkscape:window-x="354"
-     inkscape:window-y="184"
-     inkscape:grid-points="true"
-     gridempspacing="1" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 120,912.36218 L 580,912.36218 L 120,452.36218 L 120,912.36218 z "
-       id="path3018" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="79.285156"
-       y="932.02234"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="79.285156"
-         y="932.02234">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="592.74219"
-       y="929.85828"
-       id="text4019"><tspan
-         sodipodi:role="line"
-         id="tspan4021"
-         x="592.74219"
-         y="929.85828">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="92.929688"
-       y="430.17468"
-       id="text4023"><tspan
-         sodipodi:role="line"
-         id="tspan4025"
-         x="92.929688"
-         y="430.17468">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="360"
-       y="652.36218"
-       id="text2779"><tspan
-         sodipodi:role="line"
-         id="tspan2781"
-         x="360"
-         y="652.36218">e0</tspan></text>
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/orientation_example_triangles.svg b/ufc-merge-into-ffc/doc/manual/svg/orientation_example_triangles.svg
deleted file mode 100644
index 470138a..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/orientation_example_triangles.svg
+++ /dev/null
@@ -1,374 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="numbering_example_triangles.eps"
-   inkscape:output_extension="org.inkscape.output.eps"
-   inkscape:dataloss="true"
-   sodipodi:modified="true">
-  <defs
-     id="defs4">
-    <marker
-       inkscape:stockid="Arrow2Lend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Lend"
-       style="overflow:visible;">
-      <path
-         id="path3469"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(1.1) rotate(180) translate(1,0)" />
-    </marker>
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="368.19011"
-     inkscape:cy="342.87065"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1590"
-     inkscape:window-height="1118"
-     inkscape:window-x="0"
-     inkscape:window-y="29"
-     inkscape:grid-points="true"
-     gridempspacing="1" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 21.5,690.86218 L 341.5,910.86218 L 341.5,470.86218 L 21.5,690.86218 z "
-       id="path3018"
-       sodipodi:nodetypes="cccc" />
-    <path
-       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 701.5,690.86218 L 381.5,910.86218 L 381.5,470.86218 L 701.5,690.86218 z "
-       id="path1893"
-       sodipodi:nodetypes="cccc"
-       inkscape:transform-center-x="-120"
-       inkscape:transform-center-y="-40.000003" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3681"
-       sodipodi:cx="85"
-       sodipodi:cy="517.36218"
-       sodipodi:rx="15"
-       sodipodi:ry="15"
-       d="M 100,517.36218 A 15,15 0 1 1 81.545637,502.76536 L 85,517.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3683"
-       sodipodi:cx="75"
-       sodipodi:cy="447.36218"
-       sodipodi:rx="45"
-       sodipodi:ry="55"
-       d="M 120,447.36218 A 45,55 0 1 1 64.636911,393.84048 L 75,447.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3685"
-       sodipodi:cx="85"
-       sodipodi:cy="427.36218"
-       sodipodi:rx="35"
-       sodipodi:ry="35"
-       d="M 120,427.36218 A 35,35 0 1 1 76.939819,393.30292 L 85,427.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.4800124" />
-    <path
-       sodipodi:type="arc"
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="path3687"
-       sodipodi:cx="160"
-       sodipodi:cy="437.36218"
-       sodipodi:rx="50"
-       sodipodi:ry="35"
-       d="M 210,437.36218 A 50,35 0 1 1 150.96766,402.938 L 160,437.36218 z"
-       sodipodi:start="0"
-       sodipodi:end="4.5307449" />
-    <rect
-       style="fill:none;fill-opacity:1;stroke:none;stroke-width:1.30170989;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:2.60341978, 1.30170989;stroke-dashoffset:0;stroke-opacity:1"
-       id="rect4574"
-       width="70"
-       height="80"
-       x="80"
-       y="422.36218"
-       ry="5.5558391" />
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="49.463867"
-       y="692.1073"
-       id="text4576"><tspan
-         sodipodi:role="line"
-         id="tspan4578"
-         x="49.463867"
-         y="692.1073">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="310"
-       y="882.36218"
-       id="text4580"><tspan
-         sodipodi:role="line"
-         id="tspan4582"
-         x="310"
-         y="882.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="310"
-       y="512.36218"
-       id="text4584"><tspan
-         sodipodi:role="line"
-         id="tspan4586"
-         x="310"
-         y="512.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="390"
-       y="882.36218"
-       id="text4588"><tspan
-         sodipodi:role="line"
-         id="tspan4590"
-         x="390"
-         y="882.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="649.46387"
-       y="692.36218"
-       id="text4596"><tspan
-         sodipodi:role="line"
-         id="tspan4598"
-         x="649.46387"
-         y="692.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:18px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="388.68164"
-       y="512.36218"
-       id="text4600"><tspan
-         sodipodi:role="line"
-         id="tspan4602"
-         x="388.68164"
-         y="512.36218">v2</tspan></text>
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path4604"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(-44.57143,204.5714)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6376"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(315.4286,-15.42857)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6378"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(315.4286,424.5714)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path6380"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(633.4848,205)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path2426"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(275.4286,-15.42857)" />
-    <path
-       sodipodi:type="arc"
-       style="fill:#0000fd;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
-       id="path2428"
-       sodipodi:cx="65"
-       sodipodi:cy="487.36218"
-       sodipodi:rx="8.5714283"
-       sodipodi:ry="8.5714283"
-       d="M 73.571428,487.36218 A 8.5714283,8.5714283 0 1 1 73.5542,486.819"
-       sodipodi:start="0"
-       sodipodi:end="6.219772"
-       sodipodi:open="true"
-       transform="translate(275.4286,424.5714)" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 320,822.36218 L 320,545.28126"
-       id="path2430" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 399.38393,822.6459 L 399.38393,545.56498"
-       id="path3499" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 641.64291,671.00917 L 432.98922,527.69729"
-       id="path3501" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 81.5,670.86218 L 290.15369,527.5503"
-       id="path3503" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.86479735;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 90.017323,712.34476 L 290.10879,851.15558"
-       id="path3505" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.86479735;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Lend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 429.90853,851.173 L 630,712.36218"
-       id="path3509" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/quadrilateral.svg b/ufc-merge-into-ffc/doc/manual/svg/quadrilateral.svg
deleted file mode 100644
index 2de221e..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/quadrilateral.svg
+++ /dev/null
@@ -1,193 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="quadrilateral.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="50"
-     objecttolerance="50"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="0.7"
-     inkscape:cx="486.87935"
-     inkscape:cy="386.84357"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="977"
-     inkscape:window-height="802"
-     inkscape:window-x="613"
-     inkscape:window-y="185"
-     gridspacingx="1px"
-     gridspacingy="1px"
-     inkscape:object-bbox="false"
-     inkscape:object-points="false"
-     inkscape:grid-points="true"
-     inkscape:object-nodes="false"
-     inkscape:object-paths="false"
-     inkscape:grid-bbox="false"
-     inkscape:guide-bbox="false" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
-       d="M 120,912.36218 L 640,912.36218"
-       id="path2942" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
-       d="M 120,912.36218 L 120,392.36218"
-       id="path2946" />
-    <path
-       style="fill:#6286ed;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 120,912.36218 L 580,912.36218 L 579.99999,452.36218 L 120,452.36218 L 120,912.36218 z "
-       id="path3018"
-       sodipodi:nodetypes="ccccc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="952.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="40"
-         y="952.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="540"
-       y="952.36218"
-       id="text2785"
-       inkscape:transform-center-x="97.765625"><tspan
-         sodipodi:role="line"
-         id="tspan2787"
-         x="540"
-         y="952.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="540"
-       y="432.36218"
-       id="text2789"><tspan
-         sodipodi:role="line"
-         id="tspan2791"
-         x="540"
-         y="432.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="432.36218"
-       id="text2793"><tspan
-         sodipodi:role="line"
-         id="tspan2795"
-         x="40"
-         y="432.36218">v3</tspan></text>
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/tetrahedron.svg b/ufc-merge-into-ffc/doc/manual/svg/tetrahedron.svg
deleted file mode 100644
index cdaf8b6..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/tetrahedron.svg
+++ /dev/null
@@ -1,222 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="tetrahedron.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="50"
-     objecttolerance="50"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="1.4"
-     inkscape:cx="467.24294"
-     inkscape:cy="368.07631"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="1590"
-     inkscape:window-height="1118"
-     inkscape:window-x="0"
-     inkscape:window-y="29"
-     gridspacingx="1px"
-     gridspacingy="1px"
-     inkscape:object-bbox="false"
-     inkscape:object-points="false"
-     inkscape:grid-points="true"
-     inkscape:object-nodes="false"
-     inkscape:object-paths="false"
-     inkscape:grid-bbox="false"
-     inkscape:guide-bbox="false" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 140,852.36218 L 492.81509,958.50976"
-       id="path2942"
-       sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="30"
-       y="872.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="30"
-         y="872.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="350"
-       y="972.36218"
-       id="text2785"
-       inkscape:transform-center-x="97.765625"><tspan
-         sodipodi:role="line"
-         id="tspan2787"
-         x="350"
-         y="972.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="360"
-       y="742.36218"
-       id="text2789"><tspan
-         sodipodi:role="line"
-         id="tspan2791"
-         x="360"
-         y="742.36218">v2</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="30"
-       y="522.36218"
-       id="text2793"><tspan
-         sodipodi:role="line"
-         id="tspan2795"
-         x="30"
-         y="522.36218">v3</tspan></text>
-    <path
-       style="fill:#6286ed;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 440,942.36218 L 140,522.36218 L 140,522.36218 L 140,852.36218 z "
-       id="path2000"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:#466bd5;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 440,942.36218 L 440,942.36218 L 370,772.36218 L 140,522.36218 L 440,942.36218 z "
-       id="path2887"
-       sodipodi:nodetypes="ccccc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#4b4b96;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:18,6;stroke-dashoffset:0"
-       d="M 140,852.36218 L 370,772.36218 L 370,772.36218"
-       id="path2893"
-       sodipodi:nodetypes="ccc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 140,852.36218 L 140,482.36218"
-       id="path2913"
-       sodipodi:nodetypes="cc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow2Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
-       d="M 370,772.36218 L 430,752.36218"
-       id="path2945"
-       sodipodi:nodetypes="cc" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,852.36218 L 140,522.36218 L 440,942.36218 L 140,852.36218 z "
-       id="path3835" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 140,522.36218 L 370,772.36218"
-       id="path3837" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
-       d="M 370,772.36218 L 440,942.36218"
-       id="path3839" />
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/svg/triangle.svg b/ufc-merge-into-ffc/doc/manual/svg/triangle.svg
deleted file mode 100644
index 7bbd87d..0000000
--- a/ufc-merge-into-ffc/doc/manual/svg/triangle.svg
+++ /dev/null
@@ -1,174 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://web.resource.org/cc/"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="744.09448819"
-   height="1052.3622047"
-   id="svg2"
-   sodipodi:version="0.32"
-   inkscape:version="0.44"
-   sodipodi:docbase="/home/logg/work/src/fenics/ufc/ufc-dev/doc/manual/svg"
-   sodipodi:docname="triangle.svg">
-  <defs
-     id="defs4">
-    <linearGradient
-       id="linearGradient3008">
-      <stop
-         style="stop-color:#557cec;stop-opacity:1;"
-         offset="0"
-         id="stop3010" />
-      <stop
-         id="stop3016"
-         offset="1"
-         style="stop-color:#5b81ec;stop-opacity:0.49803922;" />
-      <stop
-         style="stop-color:#6286ed;stop-opacity:0;"
-         offset="1"
-         id="stop3012" />
-    </linearGradient>
-    <linearGradient
-       id="linearGradient2976">
-      <stop
-         style="stop-color:black;stop-opacity:1;"
-         offset="0"
-         id="stop2978" />
-      <stop
-         id="stop3004"
-         offset="1"
-         style="stop-color:#6685f0;stop-opacity:0.16470589;" />
-      <stop
-         style="stop-color:#76b9c5;stop-opacity:0.08235294;"
-         offset="1"
-         id="stop3006" />
-      <stop
-         style="stop-color:black;stop-opacity:0;"
-         offset="1"
-         id="stop2980" />
-    </linearGradient>
-    <marker
-       inkscape:stockid="Arrow2Mend"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="Arrow2Mend"
-       style="overflow:visible;">
-      <path
-         id="path2906"
-         style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
-         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
-         transform="scale(0.6) rotate(180) translate(0,0)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutL"
-       style="overflow:visible">
-      <path
-         id="path2835"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.8)" />
-    </marker>
-    <marker
-       inkscape:stockid="TriangleOutM"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="TriangleOutM"
-       style="overflow:visible">
-      <path
-         id="path2832"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
-         transform="scale(0.4)" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     gridtolerance="10000"
-     guidetolerance="10"
-     objecttolerance="10"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="0.7"
-     inkscape:cx="375"
-     inkscape:cy="318.4798"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="true"
-     inkscape:window-width="977"
-     inkscape:window-height="802"
-     inkscape:window-x="166"
-     inkscape:window-y="216"
-     inkscape:grid-points="true"
-     gridempspacing="1" />
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1">
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
-       d="M 120,912.36218 L 640,912.36218"
-       id="path2942" />
-    <path
-       style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
-       d="M 120,912.36218 L 120,392.36218"
-       id="path2946" />
-    <path
-       style="fill:#6286ed;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1"
-       d="M 120,912.36218 L 580,912.36218 L 120,452.36218 L 120,912.36218 z "
-       id="path3018" />
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="952.36218"
-       id="text3929"><tspan
-         sodipodi:role="line"
-         id="tspan3931"
-         x="40"
-         y="952.36218">v0</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="540"
-       y="952.36218"
-       id="text4019"><tspan
-         sodipodi:role="line"
-         id="tspan4021"
-         x="540"
-         y="952.36218">v1</tspan></text>
-    <text
-       xml:space="preserve"
-       style="font-size:24px;font-style:normal;font-weight:normal;fill:black;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
-       x="40"
-       y="432.36218"
-       id="text4023"><tspan
-         sodipodi:role="line"
-         id="tspan4025"
-         x="40"
-         y="432.36218">v2</tspan></text>
-  </g>
-</svg>
diff --git a/ufc-merge-into-ffc/doc/manual/ufc-user-manual.tex b/ufc-merge-into-ffc/doc/manual/ufc-user-manual.tex
deleted file mode 100644
index b4f97e2..0000000
--- a/ufc-merge-into-ffc/doc/manual/ufc-user-manual.tex
+++ /dev/null
@@ -1,35 +0,0 @@
-\documentclass{fenicsmanual}
-
-\begin{document}
-
-\fenicstitle{UFC Specification and User Manual 1.1}
-\fenicsauthor{Martin Sandve Aln\ae{}s, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen}
-\fenicspackage{\textbf{\textsf{UFC}}}{ufc}
-
-\maketitle
-
-\rhead{}
-
-\newcommand{\ufc}{UFC}
-
-\input{chapters/introduction.tex}
-\input{chapters/assembly.tex}
-\input{chapters/interface.tex}
-\input{chapters/referencecells.tex}
-\input{chapters/numbering.tex}
-
-\newpage
-\bibliographystyle{siam}
-\bibliography{bibliography}
-
-\appendix
-
-\input{chapters/interface_cpp.tex}
-\input{chapters/assembly_cpp.tex}
-\input{chapters/examples.tex}
-\input{chapters/pythonutils.tex}
-\input{chapters/installation.tex}
-\input{chapters/versions.tex}
-\input{chapters/license.tex}
-
-\end{document}
diff --git a/ufc-merge-into-ffc/doc/sphinx/README b/ufc-merge-into-ffc/doc/sphinx/README
deleted file mode 100644
index 9447e0d..0000000
--- a/ufc-merge-into-ffc/doc/sphinx/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This file has been moved here from the fenics-web (old fenics-doc)
-repository. It needs to be checked and possible expanded, then integrated
-with the online documentation system.
diff --git a/ufc-merge-into-ffc/doc/sphinx/index.rst b/ufc-merge-into-ffc/doc/sphinx/index.rst
deleted file mode 100644
index 06a6ee5..0000000
--- a/ufc-merge-into-ffc/doc/sphinx/index.rst
+++ /dev/null
@@ -1,104 +0,0 @@
-.. UFC documentation
-
-.. _ufc_introduction:
-
-#################
-UFC documentation
-#################
-
-============
-Introduction
-============
-
-Large parts of a finite element program are similar from problem to
-problem and can therefore be coded as a general, reusable library.
-Mesh data structures, linear algebra and finite element assembly are
-examples of operations that are naturally coded in a
-problem-independent way and made available in reusable
-libraries~\cite{www:fenics,www:petsc,www:sundance,www:deal.II,www:trilinos,www:diffpack}.
-However, some parts of a finite element program are difficult to code
-in a problem-independent way. In particular, this includes the
-evaluation of the \emph{element tensor} (the `element stiffness
-matrix'), that is, the evaluation of the local contribution from a
-finite element to a global sparse tensor (the ``stiffness matrix'')
-representing a discretized differential operator. These parts must
-thus be implemented by the application programmer for each specific
-combination of differential equation and discretization (finite
-element spaces).
-
-\index{form compilers} \index{FFC} \index{SyFi} However,
-domain-specific compilers such as
-FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11}
-and SyFi~\cite{www:syfi} make it possible to automatically generate
-the code for the evaluation of the element tensor. These \emph{form
-compilers} accept as input a high-level description of a finite
-element variational form and generate low-level code code for
-efficient evaluation of the element tensor and associated quantities.
-It thus becomes important to specify the \emph{interface} between form
-compilers and finite element assemblers such that the code generated
-by FFC, SyFi and other form compilers can be used to assemble finite
-element matrices and vectors (and in general tensors).
-
-
-Unified Form-assembly Code
-==========================
-
-UFC (Unified Form-assembly Code) is a unified framework for finite
-element assembly. More precisely, it defines a fixed interface for
-communicating low level routines (functions) for evaluating and
-assembling finite element variational forms. The UFC interface
-consists of a single header file \texttt{ufc.h} that specifies a C++
-interface that must be implemented by code that complies with the UFC
-specification.
-
-Both FFC (since version 0.4.0) and SyFi (since version 0.3.4)
-generate code that complies with the UFC specification. Thus, code
-generated by FFC and SyFi may be used interchangeably by any UFC-based
-finite element assembler, such as DOLFIN~\cite{www:dolfin}.
-
-
-Aim and scope
-=============
-
-The UFC interface has been designed to make a minimal amount of
-assumptions on the form compilers generating the UFC code and the
-assemblers built on top of the UFC specification. Thus, the UFC
-specification provides a minimal amount of abstractions and data
-structures. Programmers wishing to implement the UFC specification
-will typically want to create system-specific (but simple) wrappers
-for the generated code.
-
-Few assumptions have also been made on the underlying finite element
-methodology. The current specification is limited to affinely mapped
-cells, but does not restrict the mapping of finite element function
-spaces. Thus, UFC code may be generated for elements where basis
-functions are transformed from the reference cell by the affine
-mapping, as well as for elements where the basis functions must be
-transformed by the Piola mapping. UFC code has been successfully
-generated and used in finite element codes for standard continuous
-Galerkin methods (Lagrange finite elements of arbitrary order),
-discontinuous Galerkin methods (including integrals of jumps and
-averages over interior facets) and mixed methods (including
-Brezzi--Douglas--Marini and Raviart--Thomas elements).
-
-Outline
-=======
-
-In the next section, we give an overview of finite element assembly
-and explain how the code generated by form compilers may be used as
-the basic building blocks in the assembly algorithm. We then present
-the UFC interface in detail in Section~\ref{sec:interface}. In
-Section~\ref{sec:referencecells} and Section~\ref{sec:numbering}, we
-define the reference cells and numbering conventions that must be
-followed by UFC-based form compilers and assemblers.
-
-
-
-=======================
-Finite element assembly
-=======================
-
-In this section, we present a general algorithm for assembly of finite
-element variational forms and define the concepts that the UFC interface
-is based on.
-
diff --git a/ufc/__init__.py b/ufc/__init__.py
deleted file mode 100644
index dd79317..0000000
--- a/ufc/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-__author__ = "Johan Hake (hake.dev at gmail.com)"
-__copyright__ = "Copyright (C) 2010-2015 Johan Hake"
-__date__ = "2010-08-19 -- 2015-02-26"
-__license__  = "Released to the public domain"
-
-# Modified by Anders Logg 2015
diff --git a/ufc_benchmark/Makefile b/ufc_benchmark/Makefile
deleted file mode 100644
index 5e287c5..0000000
--- a/ufc_benchmark/Makefile
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# An overly complicated makefile for compiling a swig module.
-#
-
-MODULENAME=ufc_benchmark
-
-CXX=g++
-
-ufcinclude=-I../../../ufc
-
-# Python location and version
-PYTHONROOT:=$(shell python -c 'import sys; print sys.prefix')
-PYTHONVER:=$(shell python -c 'import sys; print sys.version[:3]')
-
-
-all: _$(MODULENAME).so
-	echo Done.
-
-# link module
-_$(MODULENAME).so: $(MODULENAME)_wrap.o $(MODULENAME).o
-	$(CXX) -shared -o _$(MODULENAME).so $(MODULENAME)_wrap.o $(MODULENAME).o
-
-# compile wrapper
-$(MODULENAME)_wrap.o: $(MODULENAME)_wrap.cxx
-	$(CXX) -I$(PYTHONROOT)/include/python$(PYTHONVER) -o $@ -c $<
-
-# generate wrapper
-$(MODULENAME)_wrap.cxx: $(MODULENAME).i $(MODULENAME).h
-	swig -c++ -python $(ufcinclude) $(MODULENAME).i
-
-# compile module code
-$(MODULENAME).o: *.h *.cpp
-	$(CXX) -c -o $(MODULENAME).o $(MODULENAME).cpp
-
-clean:
-	rm -f $(MODULENAME).o
-	rm -f $(MODULENAME).py
-	rm -f $(MODULENAME).pyc
-	rm -f _$(MODULENAME).so
-	rm -f $(MODULENAME)_wrap.cxx
-	rm -f $(MODULENAME)_wrap.o
-	rm -rf build
diff --git a/ufc_benchmark/setup.py b/ufc_benchmark/setup.py
deleted file mode 100644
index 8cf7fcd..0000000
--- a/ufc_benchmark/setup.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-from distutils.core import setup
-from distutils.core import Extension
-import os
-
-# the buggy swig-support in distutils doesn't manage to invoke g++, uses gcc...
-os.system("make ufc_benchmark_wrap.cxx")
-extension = Extension('_ufc_benchmark', ['ufc_benchmark.cpp', 'ufc_benchmark_wrap.cxx'], language="c++", include_dirs=["../../../ufc"])
-
-setup(### metadata:
-      name              = 'ufc_benchmark',
-      version           = '1.1.2',
-      author            = 'Martin Sandve Alnes',
-      author_email      = 'martinal at simula.no',
-      maintainer        = 'Martin Sandve Alnes',
-      maintainer_email  = 'martinal at simula.no',
-      url               = 'http://www.fenicsproject.org',
-      description       = 'Benchmark utility for UFC implementations.',
-      download_url      = 'https://bitbucket/fenics-project/ufc',
-      ### contents:
-      py_modules   = ['ufc_benchmark'],
-      ext_modules  = [extension],
-      )
diff --git a/ufc_benchmark/ufc_benchmark.cpp b/ufc_benchmark/ufc_benchmark.cpp
deleted file mode 100644
index ac56712..0000000
--- a/ufc_benchmark/ufc_benchmark.cpp
+++ /dev/null
@@ -1,372 +0,0 @@
-// This is utility code for UFC (Unified Form-assembly Code).
-// This code is released into the public domain.
-//
-// The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
-
-#include <iostream>
-#include <vector>
-using std::cout;
-using std::endl;
-using std::vector;
-
-#include <ctime>
-#define TMIN 3.0
-#define MMIN 1000
-
-#include "ufc_data.h"
-#include "ufc_reference_cell.h"
-#include "ufc_benchmark.h"
-
-clock_t __tic_time;
-
-void tic()
-{
-  __tic_time = clock();
-}
-
-double toc()
-{
-  clock_t __toc_time = clock();
-  double elapsed_time = ((double) (__toc_time - __tic_time)) / CLOCKS_PER_SEC;
-  return elapsed_time;
-}
-
-// Adaptive timing: make sure we run for at least TMIN to get reliable results
-double time_tabulate_tensor(ufc::cell_integral& integral, double *A, const double * const * w, const ufc::cell & c)
-{
-  std::size_t M = MMIN;
-  while ( true )
-  {
-    tic();
-    for (std::size_t i = 0; i < M; i++)
-    {
-      integral.tabulate_tensor(A, w, c);
-    }
-    double t = toc();
-    if ( t >= TMIN )
-      return t / static_cast<double>(M);
-    M *= 10;
-    cout << "Elapsed time too short, increasing number of iterations to" << M << endl;
-  }
-
-  return 0.0;
-}
-
-// Adaptive timing: make sure we run for at least TMIN to get reliable results
-double time_tabulate_tensor(ufc::exterior_facet_integral& integral, double *A, const double * const * w, const ufc::cell & c, std::size_t facet)
-{
-  std::size_t M = MMIN;
-  while ( true )
-  {
-    tic();
-    for (std::size_t i = 0; i < M; i++)
-    {
-      integral.tabulate_tensor(A, w, c, facet);
-    }
-    double t = toc();
-    if ( t >= TMIN )
-      return t / static_cast<double>(M);
-    M *= 10;
-    cout << "Elapsed time too short, increasing number of iterations to" << M << endl;
-  }
-
-  return 0.0;
-}
-
-// Benchmark all integrals of a form.
-vector< vector<double> > benchmark(const ufc::form & form, bool print_tensor)
-{
-    // construct and allocate some stuff
-    ufc::ufc_data data(form);
-
-    // create a reference cell geometry
-    ufc::reference_cell c(data.elements[0]->cell_shape());
-
-    // data structures for times
-    vector<double> cell_times(form.num_cell_domains());
-    vector<double> exterior_facet_times(form.num_exterior_facet_domains());
-    vector<double> interior_facet_times(form.num_interior_facet_domains());
-
-    // benchmark all cell integrals
-    for(unsigned i = 0; i < form.num_cell_domains(); i++)
-    {
-        cell_times[i] = time_tabulate_tensor(*data.cell_domains[i], data.A, data.w, c);
-
-        if(print_tensor)
-        {
-          cout << "Cell element tensor " << i << ":" << endl;
-          data.print_tensor();
-          cout << endl;
-        }
-    }
-
-    // benchmark all exterior facet integrals
-    for(unsigned i = 0; i < form.num_exterior_facet_domains(); i++)
-    {
-        std::size_t facet = 0; // TODO: would it be interesting to time all facets?
-        exterior_facet_times[i] = time_tabulate_tensor(*data.exterior_facet_domains[i], data.A, data.w, c, facet);
-
-        if(print_tensor)
-        {
-          cout << "Exterior facet element tensor " << i << ":" << endl;
-          data.print_tensor();
-          cout << endl;
-        }
-    }
-
-    // benchmark all interior facet integrals
-    /* // TODO: If somebody needs this, please implement it! Need two cells, and larger A.
-    for(unsigned i = 0; i < form.num_interior_facet_domains(); i++)
-    {
-        std::size_t facet = 0; // TODO: would it be interesting to time all facets?
-        interior_facet_times[i] = time_tabulate_tensor(*data.interior_facet_domains[i], data.A, data.w, c, facet);
-
-        if(print_tensor)
-        {
-          cout << "Interior facet element tensor " << i << ":" << endl;
-          data.print_tensor();
-          cout << endl;
-        }
-    }
-    */
-
-    vector< vector<double> > result(3);
-    result[0] = cell_times;
-    result[1] = exterior_facet_times;
-    result[2] = interior_facet_times;
-
-    return result;
-}
-
-
-vector< vector<double> > tabulate_cell_tensor(const ufc::form & form, vector< vector<double> > w, int domain)
-{
-  ufc::ufc_data data(form);
-
-  // copy w to the appropriate array
-  if(data.num_coefficients != w.size())
-      throw std::runtime_error("Wrong number of coefficients");
-  for(unsigned i=0; i<data.num_coefficients; i++)
-  {
-    if(data.dimensions[data.rank+i] != w[i].size())
-        throw std::runtime_error("Wrong coefficient dimension.");
-    for(unsigned j=0; j<data.dimensions[data.rank+i]; j++)
-    {
-      data.w[i][j] = w[i][j];
-    }
-  }
-
-  // create a reference cell geometry
-  ufc::reference_cell c(data.elements[0]->cell_shape());
-
-  // tabulate the tensor
-  data.cell_integrals[domain]->tabulate_tensor(data.A, data.w, c);
-
-  // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy)
-  vector< vector<double> > A;
-  if(data.rank == 2)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(data.dimensions[1]);
-      for(unsigned j=0; j<data.dimensions[1]; j++)
-      {
-        A[i][j] = data.A[i*data.dimensions[1] + j];
-      }
-    }
-  }
-  else if(data.rank == 1)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(1);
-      A[i][0] = data.A[i];
-    }
-  }
-  else if(data.rank == 0)
-  {
-    A.resize(1);
-    A[0].resize(1);
-    A[0][0] = data.A[0];
-  }
-  else
-  {
-    throw std::runtime_error("rank != 0,1,2 not implemented");
-  }
-
-  return A;
-}
-
-std::vector< std::vector<double> > tabulate_cell_integral(const std::shared_ptr<ufc::form> form, std::vector< std::vector<double> > w, ufc::cell cell, int domain)
-{
-  ufc::ufc_data data(*form);
-
-  // copy w to the appropriate array
-  if(data.num_coefficients != w.size())
-      throw std::runtime_error("Wrong number of coefficients");
-  for(unsigned i=0; i<data.num_coefficients; i++)
-  {
-    if(data.dimensions[data.rank+i] != w[i].size())
-        throw std::runtime_error("Wrong coefficient dimension.");
-    for(unsigned j=0; j<data.dimensions[data.rank+i]; j++)
-    {
-      data.w[i][j] = w[i][j];
-    }
-  }
-
-  // tabulate the tensor
-  data.cell_integrals[domain]->tabulate_tensor(data.A, data.w, cell);
-
-  // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy)
-  vector< vector<double> > A;
-  if(data.rank == 2)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(data.dimensions[1]);
-      for(unsigned j=0; j<data.dimensions[1]; j++)
-      {
-        A[i][j] = data.A[i*data.dimensions[1] + j];
-      }
-    }
-  }
-  else if(data.rank == 1)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(1);
-      A[i][0] = data.A[i];
-    }
-  }
-  else if(data.rank == 0)
-  {
-    A.resize(1);
-    A[0].resize(1);
-    A[0][0] = data.A[0];
-  }
-  else
-  {
-    throw std::runtime_error("rank != 0,1,2 not implemented");
-  }
-
-  return A;
-}
-
-std::vector< std::vector<double> > tabulate_exterior_facet_integral(const std::shared_ptr<ufc::form> form, std::vector< std::vector<double> > w, ufc::cell& cell, int facet, int domain)
-{
-  ufc::ufc_data data(*form);
-
-  // copy w to the appropriate array
-  if(data.num_coefficients != w.size())
-      throw std::runtime_error("Wrong number of coefficients");
-  for(unsigned i=0; i<data.num_coefficients; i++)
-  {
-    if(data.dimensions[data.rank+i] != w[i].size())
-        throw std::runtime_error("Wrong coefficient dimension.");
-    for(unsigned j=0; j<data.dimensions[data.rank+i]; j++)
-    {
-      data.w[i][j] = w[i][j];
-    }
-  }
-
-  // tabulate the tensor
-  data.exterior_facet_integrals[domain]->tabulate_tensor(data.A, data.w, cell, facet);
-
-  // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy)
-  vector< vector<double> > A;
-  if(data.rank == 2)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(data.dimensions[1]);
-      for(unsigned j=0; j<data.dimensions[1]; j++)
-      {
-        A[i][j] = data.A[i*data.dimensions[1] + j];
-      }
-    }
-  }
-  else if(data.rank == 1)
-  {
-    A.resize(data.dimensions[0]);
-    for(unsigned i=0; i<data.dimensions[0]; i++)
-    {
-      A[i].resize(1);
-      A[i][0] = data.A[i];
-    }
-  }
-  else if(data.rank == 0)
-  {
-    A.resize(1);
-    A[0].resize(1);
-    A[0][0] = data.A[0];
-  }
-  else
-  {
-    throw std::runtime_error("rank != 0,1,2 not implemented");
-  }
-
-  return A;
-}
-
-std::vector< std::vector<double> > tabulate_interior_facet_integral(const std::shared_ptr<ufc::form> form, std::vector< std::vector<double> > macro_w,\
-                                                                    ufc::cell& cell0, ufc::cell& cell1, int facet_0, int facet_1, int domain)
-{
-  ufc::ufc_data data(*form);
-
-  // copy w to the appropriate array
-  if(data.num_coefficients != macro_w.size())
-      throw std::runtime_error("Wrong number of coefficients");
-  for(unsigned i=0; i<data.num_coefficients; i++)
-  {
-    if(2*data.dimensions[data.rank+i] != macro_w[i].size())
-        throw std::runtime_error("Wrong coefficient dimension.");
-    for(unsigned j=0; j<2*data.dimensions[data.rank+i]; j++)
-    {
-      data.macro_w[i][j] = macro_w[i][j];
-    }
-  }
-
-  // tabulate the tensor
-  data.interior_facet_integrals[domain]->tabulate_tensor(data.macro_A, data.macro_w, cell0, cell1, facet_0, facet_1);
-
-  // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy)
-  vector< vector<double> > A;
-  if(data.rank == 2)
-  {
-    A.resize(2*data.dimensions[0]);
-    for(unsigned i=0; i<2*data.dimensions[0]; i++)
-    {
-      A[i].resize(2*data.dimensions[1]);
-      for(unsigned j=0; j<2*data.dimensions[1]; j++)
-      {
-        A[i][j] = data.macro_A[i*2*data.dimensions[1] + j];
-      }
-    }
-  }
-  else if(data.rank == 1)
-  {
-    A.resize(2*data.dimensions[0]);
-    for(unsigned i=0; i<2*data.dimensions[0]; i++)
-    {
-      A[i].resize(1);
-      A[i][0] = data.macro_A[i];
-    }
-  }
-  else if(data.rank == 0)
-  {
-    A.resize(1);
-    A[0].resize(1);
-    A[0][0] = data.macro_A[0];
-  }
-  else
-  {
-    throw std::runtime_error("rank != 0,1,2 not implemented");
-  }
-
-  return A;
-}
diff --git a/ufc_benchmark/ufc_benchmark.h b/ufc_benchmark/ufc_benchmark.h
deleted file mode 100644
index 9990c5e..0000000
--- a/ufc_benchmark/ufc_benchmark.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// This is utility code for UFC (Unified Form-assembly Code).
-// This code is released into the public domain.
-//
-// The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
-
-#ifndef __UFC_BENCHMARK_H__
-#define __UFC_BENCHMARK_H__
-
-#include "ufc.h"
-#include <memory>
-#include <vector>
-
-/* Benchmark time to run tabulate_tensor for all integrals in a form. *
- * Uses a reference cell and one-cell mesh, and sets all w_ij = 1.0.  */
-std::vector< std::vector<double> > benchmark(const ufc::form & form,
-                                             bool print_tensors);
-
-/* Compute one element tensor on the reference cell with the given coefficients. */
-std::vector< std::vector<double> > tabulate_cell_tensor(const ufc::form & form,
-                                                        std::vector< std::vector<double> > w,
-                                                        int domain);
-
-/* Compute one cell integral. */
-std::vector< std::vector<double> > tabulate_cell_integral(const std::shared_ptr<ufc::form> form,
-                                                          std::vector< std::vector<double> > w,
-                                                          ufc::cell cell,
-                                                          int domain);
-
-/* Compute one exterior facet integral. */
-std::vector< std::vector<double> > tabulate_exterior_facet_integral(const std::shared_ptr<ufc::form> form,
-                                                                    std::vector< std::vector<double> > w,
-                                                                    ufc::cell& cell,
-                                                                    int facet,
-                                                                    int domain);
-
-/* Compute one interior facet integral. */
-std::vector< std::vector<double> > tabulate_interior_facet_integral(const std::shared_ptr<ufc::form> form,
-                                                                    std::vector< std::vector<double> > macro_w,
-                                                                    ufc::cell& cell0,
-                                                                    ufc::cell& cell1,
-                                                                    int facet_0,
-                                                                    int facet_1,
-                                                                    int domain);
-
-#endif
diff --git a/ufc_benchmark/ufc_benchmark.i b/ufc_benchmark/ufc_benchmark.i
deleted file mode 100644
index 2891744..0000000
--- a/ufc_benchmark/ufc_benchmark.i
+++ /dev/null
@@ -1,62 +0,0 @@
-%module ufc_benchmark
-
-// ------------------------ STL stuff
-
-%{
-#include <vector>
-%}
-
-%include stl.i
-%include std_vector.i
-%include std_carray.i
-
-%template(vector_double)     std::vector<double>;
-%typedef std::vector<double> vector_double;
-
-%template(vector_vector_double)             std::vector< std::vector<double> >;
-%typedef std::vector< std::vector<double> > vector_vector_double;
-
-%template(vector_std_t) std::vector< std::size_t >;
-%typedef std::vector< std::size_t > vector_size_t;
-
-%template(vector_vector_size_t) std::vector< std::vector< std::size_t > >;
-%typedef std::vector< std::vector< std::size_t > > vector_vector_size_t;
-
-// ------------------------ UFC stuff
-
-%import ufc.i
-
-%{
-#include "ufc.h"
-#include "ufc_benchmark.h"
-#include "ufc_reference_cell.h"
-%}
-
-%include "ufc.h"
-%include "ufc_benchmark.h"
-%include "ufc_reference_cell.h"
-
-// ----------------------- Reference to shared pointer utility
-
-%{
-class NoDeleter { public: void operator()(ufc::form *) {} };
-std::shared_ptr<ufc::form> form_ptr(ufc::form * form) { return std::shared_ptr<ufc::form>(form, NoDeleter()); }
-%}
-class NoDeleter { public: void operator()(ufc::form *) {} };
-std::shared_ptr<ufc::form> form_ptr(ufc::form * form) { return std::shared_ptr<ufc::form>(form, NoDeleter()); }
-
-// ----------------------- Python wrapper for benchmark
-
-%pythoncode{
-
-def benchmark_forms(forms, print_tensors):
-    import gc
-    gc.collect()
-
-    times = []
-    for f in forms:
-        res = benchmark(f, print_tensors)
-        times.append(tuple(res))
-    return times
-
-}
diff --git a/ufc_benchmark/ufc_data.h b/ufc_benchmark/ufc_data.h
deleted file mode 100644
index a004f4f..0000000
--- a/ufc_benchmark/ufc_data.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// This is utility code for UFC (Unified Form-assembly Code).
-// This code is released into the public domain.
-//
-// The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
-
-#ifndef __UFC_DATA_H__
-#define __UFC_DATA_H__
-
-#include <ufc.h>
-#include <vector>
-#include <stdexcept>
-#include <cstring>
-
-namespace ufc
-{
-
-  class ufc_data
-  {
-  public:
-
-    ufc_data(const ufc::form & form):
-      form(form)
-    {
-      // short forms of dimensions
-      rank             = form.rank();
-      num_coefficients = form.num_coefficients();
-      num_arguments    = rank + num_coefficients;
-
-      // construct all dofmaps and elements
-      dofmaps.resize(num_arguments);
-      elements.resize(num_arguments);
-      dimensions = new unsigned[num_arguments];
-
-      for(unsigned i=0; i<num_arguments; i++)
-      {
-        dofmaps[i]    = form.create_dofmap(i);
-        elements[i]   = form.create_finite_element(i);
-        dimensions[i] = dofmaps[i]->max_local_dimension();
-
-        if(dimensions[i] != elements[i]->space_dimension())
-          throw std::runtime_error("Mismatching dimensions between finite_elements and dofmaps!");
-
-        if(elements[0]->cell_shape() != elements[i]->cell_shape())
-          throw std::runtime_error("Mismatching cell shapes in elements!");
-      }
-
-      // construct all integral objects
-      cell_integrals.resize(form.num_cell_domains());
-      for(unsigned i=0; i<form.num_cell_domains(); i++)
-      {
-        cell_integrals[i] = form.create_cell_integral(i);
-      }
-      exterior_facet_integrals.resize(form.num_exterior_facet_domains());
-      for(unsigned i=0; i<form.num_exterior_facet_domains(); i++)
-      {
-        exterior_facet_integrals[i] = form.create_exterior_facet_integral(i);
-      }
-      interior_facet_integrals.resize(form.num_interior_facet_domains());
-      for(unsigned i=0; i<form.num_interior_facet_domains(); i++)
-      {
-        interior_facet_integrals[i] = form.create_interior_facet_integral(i);
-      }
-
-      // compute size of element tensor A
-      A_size = 1;
-      for(unsigned i=0; i<rank; i++)
-      {
-        A_size *= dimensions[i];
-      }
-
-      // allocate space for element tensor A
-      A = new double[A_size];
-
-      // Initialize local tensor for macro element
-      A_size = 1;
-      for (unsigned i = 0; i < form.rank(); i++)
-        A_size *= 2*dimensions[i];
-      macro_A = new double[A_size];
-
-      // allocate space for local coefficient data
-      w = new double*[num_coefficients];
-      for(unsigned i=0; i<num_coefficients; i++)
-      {
-        unsigned dim = dimensions[i+rank];
-        w[i] = new double[dim];
-        memset(w[i], 0, sizeof(double)*dim);
-      }
-
-      // allocate space for local macro coefficient data
-      macro_w = new double*[num_coefficients];
-      for(unsigned i=0; i<num_coefficients; i++)
-      {
-        unsigned dim = 2*dimensions[i+rank];
-        macro_w[i] = new double[dim];
-        memset(macro_w[i], 0, sizeof(double)*dim);
-      }
-    }
-
-    ~ufc_data()
-    {
-      for(unsigned i=0; i<num_arguments; i++)
-        delete dofmaps[i];
-
-      for(unsigned i=0; i<num_arguments; i++)
-        delete elements[i];
-
-      delete [] dimensions;
-
-      for(unsigned i=0; i<form.num_cell_domains(); i++)
-        delete cell_integrals[i];
-
-      for(unsigned i=0; i<form.num_exterior_facet_domains(); i++)
-        delete exterior_facet_integrals[i];
-
-      for(unsigned i=0; i<form.num_interior_facet_domains(); i++)
-        delete interior_facet_integrals[i];
-
-      for(unsigned i=0; i<num_coefficients; i++)
-        delete [] w[i];
-      delete [] w;
-
-      for(unsigned i=0; i<num_coefficients; i++)
-        delete [] macro_w[i];
-      delete [] macro_w;
-
-      delete [] A;
-      delete [] macro_A;
-    }
-
-    const ufc::form & form;
-
-    vector< ufc::dofmap * > dofmaps;
-    vector< ufc::finite_element * > elements;
-
-    vector< ufc::cell_integral *> cell_integrals;
-    vector< ufc::exterior_facet_integral *> exterior_facet_integrals;
-    vector< ufc::interior_facet_integral *> interior_facet_integrals;
-
-    unsigned rank;
-    unsigned num_coefficients;
-    unsigned num_arguments;
-    unsigned A_size;
-
-    unsigned * dimensions;
-    double *  A;
-    double *  macro_A;
-    double ** w;
-    double ** macro_w;
-
-
-    void print_tensor()
-    {
-      int dim0 = 1;
-      int dim1 = 1;
-
-      if(rank == 1)
-      {
-          dim1 = dimensions[0];
-      }
-      if(rank == 2)
-      {
-          dim0 = dimensions[0];
-          dim1 = dimensions[1];
-      }
-
-      cout << "[" << endl;
-      int k=0;
-      for(int ii=0; ii<dim0; ii++)
-      {
-        for(int jj=0; jj<dim1; jj++)
-        {
-          cout << A[k++] << ", ";
-        }
-        cout << endl;
-      }
-      cout << "]" << endl;
-      cout << endl;
-    }
-  };
-
-}
-
-#endif
-
diff --git a/ufc_benchmark/ufc_reference_cell.h b/ufc_benchmark/ufc_reference_cell.h
deleted file mode 100644
index 9ea650b..0000000
--- a/ufc_benchmark/ufc_reference_cell.h
+++ /dev/null
@@ -1,294 +0,0 @@
-// This is utility code for UFC (Unified Form-assembly Code).
-// This code is released into the public domain.
-//
-// The FEniCS Project (http://www.fenicsproject.org/) 2006-2015.
-
-#ifndef __UFC_REFERENCE_CELL_H
-#define __UFC_REFERENCE_CELL_H
-
-#include "ufc.h"
-#include <cstddef>
-#include <stdexcept>
-
-namespace ufc
-{
-
-    /// Description of a reference cell, for debugging and testing UFC code.
-    class reference_cell: public ufc::cell
-    {
-    public:
-
-        /// Constructor
-        reference_cell(ufc::shape s)
-        {
-            cell_shape = s;
-
-            num_entities[0] = 0;
-            num_entities[1] = 0;
-            num_entities[2] = 0;
-            num_entities[3] = 0;
-
-            // Get topological dimension and number of entities in a cell of this type.
-            switch(s)
-            {
-            case interval:      topological_dimension = 1; num_entities[0] = 2; num_entities[1] = 1;  break;
-            case triangle:      topological_dimension = 2; num_entities[0] = 3; num_entities[1] = 3;  num_entities[2] = 1; break;
-            case quadrilateral: topological_dimension = 2; num_entities[0] = 4; num_entities[1] = 4;  num_entities[2] = 1; break;
-            case tetrahedron:   topological_dimension = 3; num_entities[0] = 4; num_entities[1] = 6;  num_entities[2] = 4; num_entities[3] = 1; break;
-            case hexahedron:    topological_dimension = 3; num_entities[0] = 8; num_entities[1] = 12; num_entities[2] = 6; num_entities[3] = 1; break;
-            default: throw std::runtime_error("Invalid shape.");
-            }
-
-            // Assume same geometric dimension.
-            geometric_dimension = topological_dimension;
-
-            // Fill global indices like we had a single-cell mesh.
-            entity_indices = new std::size_t*[topological_dimension+1];
-            for(std::size_t i = 0; i <= topological_dimension; i++)
-            {
-                entity_indices[i] = new std::size_t[num_entities[i]];
-                for(std::size_t j = 0; j < num_entities[i]; j++)
-                {
-                    entity_indices[i][j] = j;
-                }
-            }
-
-            // Allocate an empty array of vertex coordinates.
-            coordinates = new double*[num_entities[0]];
-            for(std::size_t i = 0; i < num_entities[0]; i++)
-            {
-                coordinates[i] = new double[geometric_dimension];
-                for(std::size_t j = 0; j < geometric_dimension; j++)
-                {
-                    coordinates[i][j] = 0.0;
-                }
-            }
-
-            // Fill coordinates with reference cell definition.
-            switch(s)
-            {
-            case interval:
-                coordinates[0][0] = 0.0;
-                coordinates[1][0] = 1.0;
-                break;
-
-            case triangle:
-                coordinates[0][0] = 0.0;
-                coordinates[0][1] = 0.0;
-
-                coordinates[1][0] = 1.0;
-                coordinates[1][1] = 0.0;
-
-                coordinates[2][0] = 0.0;
-                coordinates[2][1] = 1.0;
-                break;
-
-            case quadrilateral:
-                coordinates[0][0] = 0.0;
-                coordinates[0][1] = 0.0;
-
-                coordinates[1][0] = 1.0;
-                coordinates[1][1] = 0.0;
-
-                coordinates[2][0] = 1.0;
-                coordinates[2][1] = 1.0;
-
-                coordinates[3][0] = 0.0;
-                coordinates[3][1] = 1.0;
-                break;
-
-            case tetrahedron:
-                coordinates[0][0] = 0.0;
-                coordinates[0][1] = 0.0;
-                coordinates[0][2] = 0.0;
-
-                coordinates[1][0] = 1.0;
-                coordinates[1][1] = 0.0;
-                coordinates[1][2] = 0.0;
-
-                coordinates[2][0] = 0.0;
-                coordinates[2][1] = 1.0;
-                coordinates[2][2] = 0.0;
-
-                coordinates[3][0] = 0.0;
-                coordinates[3][1] = 0.0;
-                coordinates[3][2] = 1.0;
-                break;
-
-            case hexahedron:
-                coordinates[0][0] = 0.0;
-                coordinates[0][1] = 0.0;
-                coordinates[0][2] = 0.0;
-
-                coordinates[1][0] = 1.0;
-                coordinates[1][1] = 0.0;
-                coordinates[1][2] = 0.0;
-
-                coordinates[2][0] = 1.0;
-                coordinates[2][1] = 1.0;
-                coordinates[2][2] = 0.0;
-
-                coordinates[3][0] = 0.0;
-                coordinates[3][1] = 1.0;
-                coordinates[3][2] = 0.0;
-
-                coordinates[4][0] = 0.0;
-                coordinates[4][1] = 0.0;
-                coordinates[4][2] = 1.0;
-
-                coordinates[5][0] = 1.0;
-                coordinates[5][1] = 0.0;
-                coordinates[5][2] = 1.0;
-
-                coordinates[6][0] = 1.0;
-                coordinates[6][1] = 1.0;
-                coordinates[6][2] = 1.0;
-
-                coordinates[7][0] = 0.0;
-                coordinates[7][1] = 1.0;
-                coordinates[7][2] = 1.0;
-                break;
-            }
-        }
-
-        /// Destructor
-        virtual ~reference_cell()
-        {
-            for(std::size_t i = 0; i <= topological_dimension; i++)
-            {
-                delete [] entity_indices[i];
-            }
-            delete [] entity_indices;
-
-            for(std::size_t i = 0; i < num_entities[0]; i++)
-            {
-                delete [] coordinates[i];
-            }
-            delete [] coordinates;
-        }
-
-        /// The number of entities of a particular dimension
-        std::size_t num_entities[4];
-
-    };
-
-    /// Description of a reference cell, for debugging and testing UFC code.
-    class Cell: public ufc::cell
-    {
-    public:
-
-        /// Constructor
-        Cell(std::size_t top, std::size_t geo, std::vector< std::vector<double> > coords, std::vector< std::size_t> num_ents): ufc::cell(), num_entities(num_ents)
-        {
-            topological_dimension = top;
-            geometric_dimension   = geo;
-            num_entities[0] = coords.size();
-
-            // Fill global indices
-//            entity_indices = new std::size_t*[topological_dimension+1];
-//            for(std::size_t i = 0; i <= topological_dimension; i++)
-//            {
-//                entity_indices[i] = new std::size_t[num_entities[i]];
-//                for(std::size_t j = 0; j < num_entities[i]; j++)
-//                {
-//                    entity_indices[i][j] = j;
-//                }
-//            }
-
-            for(std::size_t i = 0; i < num_ents.size(); i++)
-              num_entities[i] = num_ents[i];
-
-            // Allocate an empty array of vertex coordinates.
-            coordinates = new double*[coords.size()];
-            for(std::size_t i = 0; i < coords.size(); i++)
-            {
-                coordinates[i] = new double[geometric_dimension];
-                for(std::size_t j = 0; j < geometric_dimension; j++)
-                {
-                    coordinates[i][j] = coords[i][j];
-                }
-            }
-
-        }
-
-        /// Destructor
-        virtual ~Cell()
-        {
-//            for(std::size_t i = 0; i <= topological_dimension; i++)
-//            {
-//                delete [] entity_indices[i];
-//            }
-//            delete [] entity_indices;
-
-            for(std::size_t i = 0; i < num_entities[0]; i++)
-            {
-                delete [] coordinates[i];
-            }
-            delete [] coordinates;
-        }
-
-        /// The number of entities of a particular dimension
-        std::vector<std::size_t> num_entities;
-    };
-
-
-    /// Consistent data for a mesh consisting of a single reference cell, for debugging and testing UFC code.
-    class reference_mesh: public ufc::mesh
-    {
-    public:
-
-        /// Constructor
-        reference_mesh(ufc::shape s):
-            c(s)
-        {
-            topological_dimension = c.topological_dimension;
-            geometric_dimension   = c.geometric_dimension;
-
-            // Set global number of entities of each topological dimension to that of a single cell.
-            num_entities = new std::size_t[topological_dimension+1];
-            for(std::size_t i = 0; i <= topological_dimension; i++)
-            {
-                num_entities[i] = c.num_entities[i];
-            }
-        }
-
-        /// Destructor
-        virtual ~reference_mesh()
-        {
-            delete [] num_entities;
-        }
-
-        /// A reference cell, the only cell in this mesh.
-        reference_cell c;
-
-    };
-
-    /// Consistent data for a mesh consisting of a single reference cell, for debugging and testing UFC code.
-    class Mesh: public ufc::mesh
-    {
-    public:
-
-        /// Constructor
-        Mesh(std::size_t top, std::size_t geo, std::vector<std::size_t> ents)//: ufc::mesh()
-        {
-            topological_dimension = top;
-            geometric_dimension   = geo;
-
-            // Set global number of entities of each topological dimension to that of a single cell.
-            num_entities = new std::size_t[topological_dimension+1];
-            for(std::size_t i = 0; i <= topological_dimension; i++)
-            {
-                num_entities[i] = ents[i];
-            }
-        }
-
-        /// Destructor
-        virtual ~Mesh()
-        {
-            delete [] num_entities;
-        }
-    };
-
-}
-
-#endif
diff --git a/uflacs-merge-into-ffc/COPYING b/uflacs-merge-into-ffc/COPYING
deleted file mode 100644
index 94a9ed0..0000000
--- a/uflacs-merge-into-ffc/COPYING
+++ /dev/null
@@ -1,674 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year>  <name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/uflacs-merge-into-ffc/COPYING.LESSER b/uflacs-merge-into-ffc/COPYING.LESSER
deleted file mode 100644
index fc8a5de..0000000
--- a/uflacs-merge-into-ffc/COPYING.LESSER
+++ /dev/null
@@ -1,165 +0,0 @@
-		   GNU LESSER GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
-  This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
-  0. Additional Definitions. 
-
-  As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
-  "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
-  An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
-  A "Combined Work" is a work produced by combining or linking an
-Application with the Library.  The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
-  The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
-  The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
-  1. Exception to Section 3 of the GNU GPL.
-
-  You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
-  2. Conveying Modified Versions.
-
-  If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
-   a) under this License, provided that you make a good faith effort to
-   ensure that, in the event an Application does not supply the
-   function or data, the facility still operates, and performs
-   whatever part of its purpose remains meaningful, or
-
-   b) under the GNU GPL, with none of the additional permissions of
-   this License applicable to that copy.
-
-  3. Object Code Incorporating Material from Library Header Files.
-
-  The object code form of an Application may incorporate material from
-a header file that is part of the Library.  You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
-   a) Give prominent notice with each copy of the object code that the
-   Library is used in it and that the Library and its use are
-   covered by this License.
-
-   b) Accompany the object code with a copy of the GNU GPL and this license
-   document.
-
-  4. Combined Works.
-
-  You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
-   a) Give prominent notice with each copy of the Combined Work that
-   the Library is used in it and that the Library and its use are
-   covered by this License.
-
-   b) Accompany the Combined Work with a copy of the GNU GPL and this license
-   document.
-
-   c) For a Combined Work that displays copyright notices during
-   execution, include the copyright notice for the Library among
-   these notices, as well as a reference directing the user to the
-   copies of the GNU GPL and this license document.
-
-   d) Do one of the following:
-
-       0) Convey the Minimal Corresponding Source under the terms of this
-       License, and the Corresponding Application Code in a form
-       suitable for, and under terms that permit, the user to
-       recombine or relink the Application with a modified version of
-       the Linked Version to produce a modified Combined Work, in the
-       manner specified by section 6 of the GNU GPL for conveying
-       Corresponding Source.
-
-       1) Use a suitable shared library mechanism for linking with the
-       Library.  A suitable mechanism is one that (a) uses at run time
-       a copy of the Library already present on the user's computer
-       system, and (b) will operate properly with a modified version
-       of the Library that is interface-compatible with the Linked
-       Version. 
-
-   e) Provide Installation Information, but only if you would otherwise
-   be required to provide such information under section 6 of the
-   GNU GPL, and only to the extent that such information is
-   necessary to install and execute a modified version of the
-   Combined Work produced by recombining or relinking the
-   Application with a modified version of the Linked Version. (If
-   you use option 4d0, the Installation Information must accompany
-   the Minimal Corresponding Source and Corresponding Application
-   Code. If you use option 4d1, you must provide the Installation
-   Information in the manner specified by section 6 of the GNU GPL
-   for conveying Corresponding Source.)
-
-  5. Combined Libraries.
-
-  You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
-   a) Accompany the combined library with a copy of the same work based
-   on the Library, uncombined with any other library facilities,
-   conveyed under the terms of this License.
-
-   b) Give prominent notice with the combined library that part of it
-   is a work based on the Library, and explaining where to find the
-   accompanying uncombined form of the same work.
-
-  6. Revised Versions of the GNU Lesser General Public License.
-
-  The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
-  Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
-  If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/uflacs-merge-into-ffc/README.rst b/uflacs-merge-into-ffc/README.rst
deleted file mode 100644
index 2d0c2e7..0000000
--- a/uflacs-merge-into-ffc/README.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-UFLACS - UFL Analyser and Compiler System
-=========================================
-
-Description
------------
-
-Uflacs, the UFL Analyser and Compiler System, is a collection of
-algorithms for processing symbolic UFL forms and expressions.
-The main feature is efficient translation of tensor intensive
-symbolic expressions into a low level expression representation and C++ code.
-
-
-Licencing
----------
-
-Uflacs is Copyright (2011-2014) by Martin Sandve Alnæs.
-
-This version of uflacs is released under the LGPL v3 licence.
-
-
-Installing
-----------
-
-Either install to default python location as root::
-
-    sudo python setup.py install
-
-Or install to your own python path directory::
-
-    python setup.py install --prefix=/path/to/my/own/site-packages
-
-
-Testing
--------
-
-To run the Python tests you need to install the py.test framework.
-Then run all tests simply by executing
-
-    cd test && py.test
-
-To run unittests of generated C++ code you also need the Google C++ Testing Framework:
-
-    https://code.google.com/p/googletest/
-
-
-Contact
--------
-
-Bitbucket site:
-
-    http://www.bitbucket.org/fenics-project/uflacs/
-
-FEniCS Project site:
-
-    http://www.fenicsproject.org/
-
-Author:
-
-    Martin Sandve Alnæs (martinal at simula.no)
-
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/print/paper.css b/uflacs-merge-into-ffc/doc/roadmap/css/print/paper.css
deleted file mode 100644
index 893184d..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/print/paper.css
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Default Print Stylesheet Template
-   by Rob Glazebrook of CSSnewbie.com
-   Last Updated: June 4, 2008
-
-   Feel free (nay, compelled) to edit, append, and
-   manipulate this file as you see fit. */
-
-
-/* SECTION 1: Set default width, margin, float, and
-   background. This prevents elements from extending
-   beyond the edge of the printed page, and prevents
-   unnecessary background images from printing */
-body {
-	background: #fff;
-	font-size: 13pt;
-	width: auto;
-	height: auto;
-	border: 0;
-	margin: 0 5%;
-	padding: 0;
-	float: none !important;
-	overflow: visible;
-}
-html {
-	background: #fff;
-	width: auto;
-	height: auto;
-	overflow: visible;
-}
-
-/* SECTION 2: Remove any elements not needed in print.
-   This would include navigation, ads, sidebars, etc. */
-.nestedarrow,
-.controls,
-.reveal .progress,
-.reveal.overview,
-.fork-reveal,
-.share-reveal,
-.state-background {
-	display: none !important;
-}
-
-/* SECTION 3: Set body font face, size, and color.
-   Consider using a serif font for readability. */
-body, p, td, li, div, a {
-	font-size: 16pt!important;
-	font-family: Georgia, "Times New Roman", Times, serif !important;
-	color: #000;
-}
-
-/* SECTION 4: Set heading font face, sizes, and color.
-   Differentiate your headings from your body text.
-   Perhaps use a large sans-serif for distinction. */
-h1,h2,h3,h4,h5,h6 {
-	color: #000!important;
-	height: auto;
-	line-height: normal;
-	font-family: Georgia, "Times New Roman", Times, serif !important;
-	text-shadow: 0 0 0 #000 !important;
-	text-align: left;
-	letter-spacing: normal;
-}
-/* Need to reduce the size of the fonts for printing */
-h1 { font-size: 26pt !important;  }
-h2 { font-size: 22pt !important; }
-h3 { font-size: 20pt !important; }
-h4 { font-size: 20pt !important; font-variant: small-caps; }
-h5 { font-size: 19pt !important; }
-h6 { font-size: 18pt !important; font-style: italic; }
-
-/* SECTION 5: Make hyperlinks more usable.
-   Ensure links are underlined, and consider appending
-   the URL to the end of the link for usability. */
-a:link,
-a:visited {
-	color: #000 !important;
-	font-weight: bold;
-	text-decoration: underline;
-}
-/*
-.reveal a:link:after,
-.reveal a:visited:after {
-	content: " (" attr(href) ") ";
-	color: #222 !important;
-	font-size: 90%;
-}
-*/
-
-
-/* SECTION 6: more reveal.js specific additions by @skypanther */
-ul, ol, div, p {
-	visibility: visible;
-	position: static;
-	width: auto;
-	height: auto;
-	display: block;
-	overflow: visible;
-	margin: auto;
-	text-align: left !important;
-}
-.reveal .slides {
-	position: static;
-	width: auto;
-	height: auto;
-
-	left: auto;
-	top: auto;
-	margin-left: auto;
-	margin-top: auto;
-	padding: auto;
-
-	overflow: visible;
-	display: block;
-
-	text-align: center;
-	-webkit-perspective: none;
-	   -moz-perspective: none;
-	    -ms-perspective: none;
-	        perspective: none;
-
-	-webkit-perspective-origin: 50% 50%; /* there isn't a none/auto value but 50-50 is the default */
-	   -moz-perspective-origin: 50% 50%;
-	    -ms-perspective-origin: 50% 50%;
-	        perspective-origin: 50% 50%;
-}
-.reveal .slides>section,
-.reveal .slides>section>section {
-
-	visibility: visible !important;
-	position: static !important;
-	width: 90% !important;
-	height: auto !important;
-	display: block !important;
-	overflow: visible !important;
-
-	left: 0% !important;
-	top: 0% !important;
-	margin-left: 0px !important;
-	margin-top: 0px !important;
-	padding: 20px 0px !important;
-
-	opacity: 1 !important;
-
-	-webkit-transform-style: flat !important;
-	   -moz-transform-style: flat !important;
-	    -ms-transform-style: flat !important;
-	        transform-style: flat !important;
-
-	-webkit-transform: none !important;
-	   -moz-transform: none !important;
-	    -ms-transform: none !important;
-	        transform: none !important;
-}
-.reveal section {
-	page-break-after: always !important;
-	display: block !important;
-}
-.reveal section .fragment {
-	opacity: 1 !important;
-	visibility: visible !important;
-
-	-webkit-transform: none !important;
-	   -moz-transform: none !important;
-	    -ms-transform: none !important;
-	        transform: none !important;
-}
-.reveal section:last-of-type {
-	page-break-after: avoid !important;
-}
-.reveal section img {
-	display: block;
-	margin: 15px 0px;
-	background: rgba(255,255,255,1);
-	border: 1px solid #666;
-	box-shadow: none;
-}
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/print/pdf.css b/uflacs-merge-into-ffc/doc/roadmap/css/print/pdf.css
deleted file mode 100644
index 41f70c6..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/print/pdf.css
+++ /dev/null
@@ -1,190 +0,0 @@
-/* Default Print Stylesheet Template
-   by Rob Glazebrook of CSSnewbie.com
-   Last Updated: June 4, 2008
-
-   Feel free (nay, compelled) to edit, append, and
-   manipulate this file as you see fit. */
-
-
-/* SECTION 1: Set default width, margin, float, and
-   background. This prevents elements from extending
-   beyond the edge of the printed page, and prevents
-   unnecessary background images from printing */
-
-* {
-	-webkit-print-color-adjust: exact;
-}
-
-body {
-	font-size: 18pt;
-	width: 297mm;
-	height: 229mm;
-	margin: 0 auto !important;
-	border: 0;
-	padding: 0;
-	float: none !important;
-	overflow: visible;
-}
-
-html {
-	width: 100%;
-	height: 100%;
-	overflow: visible;
-}
-
- at page {
-	size: letter landscape;
-	margin: 0;
-}
-
-/* SECTION 2: Remove any elements not needed in print.
-   This would include navigation, ads, sidebars, etc. */
-.nestedarrow,
-.controls,
-.reveal .progress,
-.reveal.overview,
-.fork-reveal,
-.share-reveal,
-.state-background {
-	display: none !important;
-}
-
-/* SECTION 3: Set body font face, size, and color.
-   Consider using a serif font for readability. */
-body, p, td, li, div {
-	font-size: 18pt;
-}
-
-/* SECTION 4: Set heading font face, sizes, and color.
-   Differentiate your headings from your body text.
-   Perhaps use a large sans-serif for distinction. */
-h1,h2,h3,h4,h5,h6 {
-	text-shadow: 0 0 0 #000 !important;
-}
-
-/* SECTION 5: Make hyperlinks more usable.
-   Ensure links are underlined, and consider appending
-   the URL to the end of the link for usability. */
-a:link,
-a:visited {
-	font-weight: normal;
-	text-decoration: underline;
-}
-
-.reveal pre code {
-	overflow: hidden !important;
-	font-family: monospace !important;
-}
-
-
-/* SECTION 6: more reveal.js specific additions by @skypanther */
-ul, ol, div, p {
-	visibility: visible;
-	position: static;
-	width: auto;
-	height: auto;
-	display: block;
-	overflow: visible;
-	margin: auto;
-}
-.reveal {
-	width: auto !important;
-	height: auto !important;
-	overflow: hidden !important;
-}
-.reveal .slides {
-	position: static;
-	width: 100%;
-	height: auto;
-
-	left: auto;
-	top: auto;
-	margin: 0 !important;
-	padding: 0 !important;
-
-	overflow: visible;
-	display: block;
-
-	text-align: center;
-
-	-webkit-perspective: none;
-	   -moz-perspective: none;
-	    -ms-perspective: none;
-	        perspective: none;
-
-	-webkit-perspective-origin: 50% 50%; /* there isn't a none/auto value but 50-50 is the default */
-	   -moz-perspective-origin: 50% 50%;
-	    -ms-perspective-origin: 50% 50%;
-	        perspective-origin: 50% 50%;
-}
-.reveal .slides section {
-
-	page-break-after: always !important;
-
-	visibility: visible !important;
-	position: relative !important;
-	width: 100% !important;
-	height: 229mm !important;
-	min-height: 229mm !important;
-	display: block !important;
-	overflow: hidden !important;
-
-	left: 0 !important;
-	top: 0 !important;
-	margin: 0 !important;
-	padding: 2cm 2cm 0 2cm !important;
-	box-sizing: border-box !important;
-
-	opacity: 1 !important;
-
-	-webkit-transform-style: flat !important;
-	   -moz-transform-style: flat !important;
-	    -ms-transform-style: flat !important;
-	        transform-style: flat !important;
-
-	-webkit-transform: none !important;
-	   -moz-transform: none !important;
-	    -ms-transform: none !important;
-	        transform: none !important;
-}
-.reveal section.stack {
-	margin: 0 !important;
-	padding: 0 !important;
-	page-break-after: avoid !important;
-	height: auto !important;
-	min-height: auto !important;
-}
-.reveal .absolute-element {
-	margin-left: 2.2cm;
-	margin-top: 1.8cm;
-}
-.reveal section .fragment {
-	opacity: 1 !important;
-	visibility: visible !important;
-
-	-webkit-transform: none !important;
-	   -moz-transform: none !important;
-	    -ms-transform: none !important;
-	        transform: none !important;
-}
-.reveal section .slide-background {
-	position: absolute;
-	top: 0;
-	left: 0;
-	width: 100%;
-	z-index: 0;
-}
-.reveal section>* {
-	position: relative;
-	z-index: 1;
-}
-.reveal img {
-	box-shadow: none;
-}
-.reveal .roll {
-	overflow: visible;
-	line-height: 1em;
-}
-.reveal small a {
-	font-size: 16pt !important;
-}
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/reveal.css b/uflacs-merge-into-ffc/doc/roadmap/css/reveal.css
deleted file mode 100644
index 842f67c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/reveal.css
+++ /dev/null
@@ -1,1880 +0,0 @@
- at charset "UTF-8";
-
-/*!
- * reveal.js
- * http://lab.hakim.se/reveal-js
- * MIT licensed
- *
- * Copyright (C) 2014 Hakim El Hattab, http://hakim.se
- */
-
-
-/*********************************************
- * RESET STYLES
- *********************************************/
-
-html, body, .reveal div, .reveal span, .reveal applet, .reveal object, .reveal iframe,
-.reveal h1, .reveal h2, .reveal h3, .reveal h4, .reveal h5, .reveal h6, .reveal p, .reveal blockquote, .reveal pre,
-.reveal a, .reveal abbr, .reveal acronym, .reveal address, .reveal big, .reveal cite, .reveal code,
-.reveal del, .reveal dfn, .reveal em, .reveal img, .reveal ins, .reveal kbd, .reveal q, .reveal s, .reveal samp,
-.reveal small, .reveal strike, .reveal strong, .reveal sub, .reveal sup, .reveal tt, .reveal var,
-.reveal b, .reveal u, .reveal i, .reveal center,
-.reveal dl, .reveal dt, .reveal dd, .reveal ol, .reveal ul, .reveal li,
-.reveal fieldset, .reveal form, .reveal label, .reveal legend,
-.reveal table, .reveal caption, .reveal tbody, .reveal tfoot, .reveal thead, .reveal tr, .reveal th, .reveal td,
-.reveal article, .reveal aside, .reveal canvas, .reveal details, .reveal embed,
-.reveal figure, .reveal figcaption, .reveal footer, .reveal header, .reveal hgroup,
-.reveal menu, .reveal nav, .reveal output, .reveal ruby, .reveal section, .reveal summary,
-.reveal time, .reveal mark, .reveal audio, video {
-	margin: 0;
-	padding: 0;
-	border: 0;
-	font-size: 100%;
-	font: inherit;
-	vertical-align: baseline;
-}
-
-.reveal article, .reveal aside, .reveal details, .reveal figcaption, .reveal figure,
-.reveal footer, .reveal header, .reveal hgroup, .reveal menu, .reveal nav, .reveal section {
-	display: block;
-}
-
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-
-html,
-body {
-	width: 100%;
-	height: 100%;
-	overflow: hidden;
-}
-
-body {
-	position: relative;
-	line-height: 1;
-}
-
-::selection {
-	background: #FF5E99;
-	color: #fff;
-	text-shadow: none;
-}
-
-
-/*********************************************
- * HEADERS
- *********************************************/
-
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-	-webkit-hyphens: auto;
-	   -moz-hyphens: auto;
-	        hyphens: auto;
-
-	word-wrap: break-word;
-	line-height: 1;
-}
-
-.reveal h1 { font-size: 3.77em; }
-.reveal h2 { font-size: 2.11em;	}
-.reveal h3 { font-size: 1.55em;	}
-.reveal h4 { font-size: 1em;	}
-
-
-/*********************************************
- * VIEW FRAGMENTS
- *********************************************/
-
-.reveal .slides section .fragment {
-	opacity: 0;
-
-	-webkit-transition: all .2s ease;
-	   -moz-transition: all .2s ease;
-	    -ms-transition: all .2s ease;
-	     -o-transition: all .2s ease;
-	        transition: all .2s ease;
-}
-	.reveal .slides section .fragment.visible {
-		opacity: 1;
-	}
-
-.reveal .slides section .fragment.grow {
-	opacity: 1;
-}
-	.reveal .slides section .fragment.grow.visible {
-		-webkit-transform: scale( 1.3 );
-		   -moz-transform: scale( 1.3 );
-		    -ms-transform: scale( 1.3 );
-		     -o-transform: scale( 1.3 );
-		        transform: scale( 1.3 );
-	}
-
-.reveal .slides section .fragment.shrink {
-	opacity: 1;
-}
-	.reveal .slides section .fragment.shrink.visible {
-		-webkit-transform: scale( 0.7 );
-		   -moz-transform: scale( 0.7 );
-		    -ms-transform: scale( 0.7 );
-		     -o-transform: scale( 0.7 );
-		        transform: scale( 0.7 );
-	}
-
-.reveal .slides section .fragment.zoom-in {
-	opacity: 0;
-
-	-webkit-transform: scale( 0.1 );
-	   -moz-transform: scale( 0.1 );
-	    -ms-transform: scale( 0.1 );
-	     -o-transform: scale( 0.1 );
-	        transform: scale( 0.1 );
-}
-
-	.reveal .slides section .fragment.zoom-in.visible {
-		opacity: 1;
-
-		-webkit-transform: scale( 1 );
-		   -moz-transform: scale( 1 );
-		    -ms-transform: scale( 1 );
-		     -o-transform: scale( 1 );
-		        transform: scale( 1 );
-	}
-
-.reveal .slides section .fragment.roll-in {
-	opacity: 0;
-
-	-webkit-transform: rotateX( 90deg );
-	   -moz-transform: rotateX( 90deg );
-	    -ms-transform: rotateX( 90deg );
-	     -o-transform: rotateX( 90deg );
-	        transform: rotateX( 90deg );
-}
-	.reveal .slides section .fragment.roll-in.visible {
-		opacity: 1;
-
-		-webkit-transform: rotateX( 0 );
-		   -moz-transform: rotateX( 0 );
-		    -ms-transform: rotateX( 0 );
-		     -o-transform: rotateX( 0 );
-		        transform: rotateX( 0 );
-	}
-
-.reveal .slides section .fragment.fade-out {
-	opacity: 1;
-}
-	.reveal .slides section .fragment.fade-out.visible {
-		opacity: 0;
-	}
-
-.reveal .slides section .fragment.semi-fade-out {
-	opacity: 1;
-}
-	.reveal .slides section .fragment.semi-fade-out.visible {
-		opacity: 0.5;
-	}
-
-.reveal .slides section .fragment.current-visible {
-	opacity:0;
-}
-
-.reveal .slides section .fragment.current-visible.current-fragment {
-	opacity:1;
-}
-
-.reveal .slides section .fragment.highlight-red,
-.reveal .slides section .fragment.highlight-current-red,
-.reveal .slides section .fragment.highlight-green,
-.reveal .slides section .fragment.highlight-current-green,
-.reveal .slides section .fragment.highlight-blue,
-.reveal .slides section .fragment.highlight-current-blue {
-	opacity: 1;
-}
-	.reveal .slides section .fragment.highlight-red.visible {
-		color: #ff2c2d
-	}
-	.reveal .slides section .fragment.highlight-green.visible {
-		color: #17ff2e;
-	}
-	.reveal .slides section .fragment.highlight-blue.visible {
-		color: #1b91ff;
-	}
-
-.reveal .slides section .fragment.highlight-current-red.current-fragment {
-	color: #ff2c2d
-}
-.reveal .slides section .fragment.highlight-current-green.current-fragment {
-	color: #17ff2e;
-}
-.reveal .slides section .fragment.highlight-current-blue.current-fragment {
-	color: #1b91ff;
-}
-
-
-/*********************************************
- * DEFAULT ELEMENT STYLES
- *********************************************/
-
-/* Fixes issue in Chrome where italic fonts did not appear when printing to PDF */
-.reveal:after {
-  content: '';
-  font-style: italic;
-}
-
-.reveal iframe {
-	z-index: 1;
-}
-
-/* Ensure certain elements are never larger than the slide itself */
-.reveal img,
-.reveal video,
-.reveal iframe {
-	max-width: 95%;
-	max-height: 95%;
-}
-
-/** Prevents layering issues in certain browser/transition combinations */
-.reveal a {
-	position: relative;
-}
-
-.reveal strong,
-.reveal b {
-	font-weight: bold;
-}
-
-.reveal em,
-.reveal i {
-	font-style: italic;
-}
-
-.reveal ol,
-.reveal ul {
-	display: inline-block;
-
-	text-align: left;
-	margin: 0 0 0 1em;
-}
-
-.reveal ol {
-	list-style-type: decimal;
-}
-
-.reveal ul {
-	list-style-type: disc;
-}
-
-.reveal ul ul {
-	list-style-type: square;
-}
-
-.reveal ul ul ul {
-	list-style-type: circle;
-}
-
-.reveal ul ul,
-.reveal ul ol,
-.reveal ol ol,
-.reveal ol ul {
-	display: block;
-	margin-left: 40px;
-}
-
-.reveal p {
-	margin-bottom: 10px;
-	line-height: 1.2em;
-}
-
-.reveal q,
-.reveal blockquote {
-	quotes: none;
-}
-
-.reveal blockquote {
-	display: block;
-	position: relative;
-	width: 70%;
-	margin: 5px auto;
-	padding: 5px;
-
-	font-style: italic;
-	background: rgba(255, 255, 255, 0.05);
-	box-shadow: 0px 0px 2px rgba(0,0,0,0.2);
-}
-	.reveal blockquote p:first-child,
-	.reveal blockquote p:last-child {
-		display: inline-block;
-	}
-
-.reveal q {
-	font-style: italic;
-}
-
-.reveal pre {
-	display: block;
-	position: relative;
-	width: 90%;
-	margin: 15px auto;
-
-	text-align: left;
-	font-size: 0.55em;
-	font-family: monospace;
-	line-height: 1.2em;
-
-	word-wrap: break-word;
-
-	box-shadow: 0px 0px 6px rgba(0,0,0,0.3);
-}
-.reveal code {
-	font-family: monospace;
-}
-.reveal pre code {
-	padding: 5px;
-	overflow: auto;
-	max-height: 400px;
-	word-wrap: normal;
-}
-.reveal pre.stretch code {
-	height: 100%;
-	max-height: 100%;
-
-	-webkit-box-sizing: border-box;
-	   -moz-box-sizing: border-box;
-	        box-sizing: border-box;
-}
-
-.reveal table th,
-.reveal table td {
-	text-align: left;
-	padding-right: .3em;
-}
-
-.reveal table th {
-	font-weight: bold;
-}
-
-.reveal sup {
-	vertical-align: super;
-}
-.reveal sub {
-	vertical-align: sub;
-}
-
-.reveal small {
-	display: inline-block;
-	font-size: 0.6em;
-	line-height: 1.2em;
-	vertical-align: top;
-}
-
-.reveal small * {
-	vertical-align: top;
-}
-
-.reveal .stretch {
-	max-width: none;
-	max-height: none;
-}
-
-
-/*********************************************
- * CONTROLS
- *********************************************/
-
-.reveal .controls {
-	display: none;
-	position: fixed;
-	width: 110px;
-	height: 110px;
-	z-index: 30;
-	right: 10px;
-	bottom: 10px;
-}
-
-.reveal .controls div {
-	position: absolute;
-	opacity: 0.05;
-	width: 0;
-	height: 0;
-	border: 12px solid transparent;
-
-	-moz-transform: scale(.9999);
-
-	-webkit-transition: all 0.2s ease;
-	   -moz-transition: all 0.2s ease;
-	    -ms-transition: all 0.2s ease;
-	     -o-transition: all 0.2s ease;
-	        transition: all 0.2s ease;
-}
-
-.reveal .controls div.enabled {
-	opacity: 0.7;
-	cursor: pointer;
-}
-
-.reveal .controls div.enabled:active {
-	margin-top: 1px;
-}
-
-	.reveal .controls div.navigate-left {
-		top: 42px;
-
-		border-right-width: 22px;
-		border-right-color: #eee;
-	}
-		.reveal .controls div.navigate-left.fragmented {
-			opacity: 0.3;
-		}
-
-	.reveal .controls div.navigate-right {
-		left: 74px;
-		top: 42px;
-
-		border-left-width: 22px;
-		border-left-color: #eee;
-	}
-		.reveal .controls div.navigate-right.fragmented {
-			opacity: 0.3;
-		}
-
-	.reveal .controls div.navigate-up {
-		left: 42px;
-
-		border-bottom-width: 22px;
-		border-bottom-color: #eee;
-	}
-		.reveal .controls div.navigate-up.fragmented {
-			opacity: 0.3;
-		}
-
-	.reveal .controls div.navigate-down {
-		left: 42px;
-		top: 74px;
-
-		border-top-width: 22px;
-		border-top-color: #eee;
-	}
-		.reveal .controls div.navigate-down.fragmented {
-			opacity: 0.3;
-		}
-
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-
-.reveal .progress {
-	position: fixed;
-	display: none;
-	height: 3px;
-	width: 100%;
-	bottom: 0;
-	left: 0;
-	z-index: 10;
-}
-	.reveal .progress:after {
-		content: '';
-		display: 'block';
-		position: absolute;
-		height: 20px;
-		width: 100%;
-		top: -20px;
-	}
-	.reveal .progress span {
-		display: block;
-		height: 100%;
-		width: 0px;
-
-		-webkit-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		   -moz-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		    -ms-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		     -o-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		        transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	}
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-
-.reveal .slide-number {
-	position: fixed;
-	display: block;
-	right: 15px;
-	bottom: 15px;
-	opacity: 0.5;
-	z-index: 31;
-	font-size: 12px;
-}
-
-/*********************************************
- * SLIDES
- *********************************************/
-
-.reveal {
-	position: relative;
-	width: 100%;
-	height: 100%;
-
-	-ms-touch-action: none;
-}
-
-.reveal .slides {
-	position: absolute;
-	width: 100%;
-	height: 100%;
-	left: 50%;
-	top: 50%;
-
-	overflow: visible;
-	z-index: 1;
-	text-align: center;
-
-	-webkit-transition: -webkit-perspective .4s ease;
-	   -moz-transition: -moz-perspective .4s ease;
-	    -ms-transition: -ms-perspective .4s ease;
-	     -o-transition: -o-perspective .4s ease;
-	        transition: perspective .4s ease;
-
-	-webkit-perspective: 600px;
-	   -moz-perspective: 600px;
-	    -ms-perspective: 600px;
-	        perspective: 600px;
-
-	-webkit-perspective-origin: 0px -100px;
-	   -moz-perspective-origin: 0px -100px;
-	    -ms-perspective-origin: 0px -100px;
-	        perspective-origin: 0px -100px;
-}
-
-.reveal .slides>section {
-	-ms-perspective: 600px;
-}
-
-.reveal .slides>section,
-.reveal .slides>section>section {
-	display: none;
-	position: absolute;
-	width: 100%;
-	padding: 20px 0px;
-
-	z-index: 10;
-	line-height: 1.2em;
-	font-weight: inherit;
-
-	-webkit-transform-style: preserve-3d;
-	   -moz-transform-style: preserve-3d;
-	    -ms-transform-style: preserve-3d;
-	        transform-style: preserve-3d;
-
-	-webkit-transition: -webkit-transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						-webkit-transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	   -moz-transition: -moz-transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						-moz-transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	    -ms-transition: -ms-transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						-ms-transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	     -o-transition: -o-transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						-o-transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	        transition: transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985),
-						opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-}
-
-/* Global transition speed settings */
-.reveal[data-transition-speed="fast"] .slides section {
-	-webkit-transition-duration: 400ms;
-	   -moz-transition-duration: 400ms;
-	    -ms-transition-duration: 400ms;
-	        transition-duration: 400ms;
-}
-.reveal[data-transition-speed="slow"] .slides section {
-	-webkit-transition-duration: 1200ms;
-	   -moz-transition-duration: 1200ms;
-	    -ms-transition-duration: 1200ms;
-	        transition-duration: 1200ms;
-}
-
-/* Slide-specific transition speed overrides */
-.reveal .slides section[data-transition-speed="fast"] {
-	-webkit-transition-duration: 400ms;
-	   -moz-transition-duration: 400ms;
-	    -ms-transition-duration: 400ms;
-	        transition-duration: 400ms;
-}
-.reveal .slides section[data-transition-speed="slow"] {
-	-webkit-transition-duration: 1200ms;
-	   -moz-transition-duration: 1200ms;
-	    -ms-transition-duration: 1200ms;
-	        transition-duration: 1200ms;
-}
-
-.reveal .slides>section {
-	left: -50%;
-	top: -50%;
-}
-
-.reveal .slides>section.stack {
-	padding-top: 0;
-	padding-bottom: 0;
-}
-
-.reveal .slides>section.present,
-.reveal .slides>section>section.present {
-	display: block;
-	z-index: 11;
-	opacity: 1;
-}
-
-.reveal.center,
-.reveal.center .slides,
-.reveal.center .slides section {
-	min-height: auto !important;
-}
-
-/* Don't allow interaction with invisible slides */
-.reveal .slides>section.future,
-.reveal .slides>section>section.future,
-.reveal .slides>section.past,
-.reveal .slides>section>section.past {
-	pointer-events: none;
-}
-
-.reveal.overview .slides>section,
-.reveal.overview .slides>section>section {
-	pointer-events: auto;
-}
-
-
-
-/*********************************************
- * DEFAULT TRANSITION
- *********************************************/
-
-.reveal .slides>section[data-transition=default].past,
-.reveal .slides>section.past {
-	display: block;
-	opacity: 0;
-
-	-webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	   -moz-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	    -ms-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	        transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-}
-.reveal .slides>section[data-transition=default].future,
-.reveal .slides>section.future {
-	display: block;
-	opacity: 0;
-
-	-webkit-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	   -moz-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	    -ms-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	        transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-}
-
-.reveal .slides>section>section[data-transition=default].past,
-.reveal .slides>section>section.past {
-	display: block;
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0);
-	   -moz-transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0);
-	    -ms-transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0);
-	        transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0);
-}
-.reveal .slides>section>section[data-transition=default].future,
-.reveal .slides>section>section.future {
-	display: block;
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0);
-	   -moz-transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0);
-	    -ms-transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0);
-	        transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0);
-}
-
-
-/*********************************************
- * CONCAVE TRANSITION
- *********************************************/
-
-.reveal .slides>section[data-transition=concave].past,
-.reveal.concave  .slides>section.past {
-	-webkit-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	   -moz-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	    -ms-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	        transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-}
-.reveal .slides>section[data-transition=concave].future,
-.reveal.concave .slides>section.future {
-	-webkit-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	   -moz-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	    -ms-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	        transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-}
-
-.reveal .slides>section>section[data-transition=concave].past,
-.reveal.concave .slides>section>section.past {
-	-webkit-transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0);
-	   -moz-transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0);
-	    -ms-transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0);
-	        transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0);
-}
-.reveal .slides>section>section[data-transition=concave].future,
-.reveal.concave .slides>section>section.future {
-	-webkit-transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0);
-	   -moz-transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0);
-	    -ms-transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0);
-	        transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0);
-}
-
-
-/*********************************************
- * ZOOM TRANSITION
- *********************************************/
-
-.reveal .slides>section[data-transition=zoom],
-.reveal.zoom .slides>section {
-	-webkit-transition-timing-function: ease;
-	   -moz-transition-timing-function: ease;
-	    -ms-transition-timing-function: ease;
-	     -o-transition-timing-function: ease;
-	        transition-timing-function: ease;
-}
-
-.reveal .slides>section[data-transition=zoom].past,
-.reveal.zoom .slides>section.past {
-	opacity: 0;
-	visibility: hidden;
-
-	-webkit-transform: scale(16);
-	   -moz-transform: scale(16);
-	    -ms-transform: scale(16);
-	     -o-transform: scale(16);
-	        transform: scale(16);
-}
-.reveal .slides>section[data-transition=zoom].future,
-.reveal.zoom .slides>section.future {
-	opacity: 0;
-	visibility: hidden;
-
-	-webkit-transform: scale(0.2);
-	   -moz-transform: scale(0.2);
-	    -ms-transform: scale(0.2);
-	     -o-transform: scale(0.2);
-	        transform: scale(0.2);
-}
-
-.reveal .slides>section>section[data-transition=zoom].past,
-.reveal.zoom .slides>section>section.past {
-	-webkit-transform: translate(0, -150%);
-	   -moz-transform: translate(0, -150%);
-	    -ms-transform: translate(0, -150%);
-	     -o-transform: translate(0, -150%);
-	        transform: translate(0, -150%);
-}
-.reveal .slides>section>section[data-transition=zoom].future,
-.reveal.zoom .slides>section>section.future {
-	-webkit-transform: translate(0, 150%);
-	   -moz-transform: translate(0, 150%);
-	    -ms-transform: translate(0, 150%);
-	     -o-transform: translate(0, 150%);
-	        transform: translate(0, 150%);
-}
-
-
-/*********************************************
- * LINEAR TRANSITION
- *********************************************/
-
-.reveal.linear section {
-	-webkit-backface-visibility: hidden;
-	   -moz-backface-visibility: hidden;
-	    -ms-backface-visibility: hidden;
-	        backface-visibility: hidden;
-}
-
-.reveal .slides>section[data-transition=linear].past,
-.reveal.linear .slides>section.past {
-	-webkit-transform: translate(-150%, 0);
-	   -moz-transform: translate(-150%, 0);
-	    -ms-transform: translate(-150%, 0);
-	     -o-transform: translate(-150%, 0);
-	        transform: translate(-150%, 0);
-}
-.reveal .slides>section[data-transition=linear].future,
-.reveal.linear .slides>section.future {
-	-webkit-transform: translate(150%, 0);
-	   -moz-transform: translate(150%, 0);
-	    -ms-transform: translate(150%, 0);
-	     -o-transform: translate(150%, 0);
-	        transform: translate(150%, 0);
-}
-
-.reveal .slides>section>section[data-transition=linear].past,
-.reveal.linear .slides>section>section.past {
-	-webkit-transform: translate(0, -150%);
-	   -moz-transform: translate(0, -150%);
-	    -ms-transform: translate(0, -150%);
-	     -o-transform: translate(0, -150%);
-	        transform: translate(0, -150%);
-}
-.reveal .slides>section>section[data-transition=linear].future,
-.reveal.linear .slides>section>section.future {
-	-webkit-transform: translate(0, 150%);
-	   -moz-transform: translate(0, 150%);
-	    -ms-transform: translate(0, 150%);
-	     -o-transform: translate(0, 150%);
-	        transform: translate(0, 150%);
-}
-
-
-/*********************************************
- * CUBE TRANSITION
- *********************************************/
-
-.reveal.cube .slides {
-	-webkit-perspective: 1300px;
-	   -moz-perspective: 1300px;
-	    -ms-perspective: 1300px;
-	        perspective: 1300px;
-}
-
-.reveal.cube .slides section {
-	padding: 30px;
-	min-height: 700px;
-
-	-webkit-backface-visibility: hidden;
-	   -moz-backface-visibility: hidden;
-	    -ms-backface-visibility: hidden;
-	        backface-visibility: hidden;
-
-	-webkit-box-sizing: border-box;
-	   -moz-box-sizing: border-box;
-	        box-sizing: border-box;
-}
-	.reveal.center.cube .slides section {
-		min-height: auto;
-	}
-	.reveal.cube .slides section:not(.stack):before {
-		content: '';
-		position: absolute;
-		display: block;
-		width: 100%;
-		height: 100%;
-		left: 0;
-		top: 0;
-		background: rgba(0,0,0,0.1);
-		border-radius: 4px;
-
-		-webkit-transform: translateZ( -20px );
-		   -moz-transform: translateZ( -20px );
-		    -ms-transform: translateZ( -20px );
-		     -o-transform: translateZ( -20px );
-		        transform: translateZ( -20px );
-	}
-	.reveal.cube .slides section:not(.stack):after {
-		content: '';
-		position: absolute;
-		display: block;
-		width: 90%;
-		height: 30px;
-		left: 5%;
-		bottom: 0;
-		background: none;
-		z-index: 1;
-
-		border-radius: 4px;
-		box-shadow: 0px 95px 25px rgba(0,0,0,0.2);
-
-		-webkit-transform: translateZ(-90px) rotateX( 65deg );
-		   -moz-transform: translateZ(-90px) rotateX( 65deg );
-		    -ms-transform: translateZ(-90px) rotateX( 65deg );
-		     -o-transform: translateZ(-90px) rotateX( 65deg );
-		        transform: translateZ(-90px) rotateX( 65deg );
-	}
-
-.reveal.cube .slides>section.stack {
-	padding: 0;
-	background: none;
-}
-
-.reveal.cube .slides>section.past {
-	-webkit-transform-origin: 100% 0%;
-	   -moz-transform-origin: 100% 0%;
-	    -ms-transform-origin: 100% 0%;
-	        transform-origin: 100% 0%;
-
-	-webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg);
-	   -moz-transform: translate3d(-100%, 0, 0) rotateY(-90deg);
-	    -ms-transform: translate3d(-100%, 0, 0) rotateY(-90deg);
-	        transform: translate3d(-100%, 0, 0) rotateY(-90deg);
-}
-
-.reveal.cube .slides>section.future {
-	-webkit-transform-origin: 0% 0%;
-	   -moz-transform-origin: 0% 0%;
-	    -ms-transform-origin: 0% 0%;
-	        transform-origin: 0% 0%;
-
-	-webkit-transform: translate3d(100%, 0, 0) rotateY(90deg);
-	   -moz-transform: translate3d(100%, 0, 0) rotateY(90deg);
-	    -ms-transform: translate3d(100%, 0, 0) rotateY(90deg);
-	        transform: translate3d(100%, 0, 0) rotateY(90deg);
-}
-
-.reveal.cube .slides>section>section.past {
-	-webkit-transform-origin: 0% 100%;
-	   -moz-transform-origin: 0% 100%;
-	    -ms-transform-origin: 0% 100%;
-	        transform-origin: 0% 100%;
-
-	-webkit-transform: translate3d(0, -100%, 0) rotateX(90deg);
-	   -moz-transform: translate3d(0, -100%, 0) rotateX(90deg);
-	    -ms-transform: translate3d(0, -100%, 0) rotateX(90deg);
-	        transform: translate3d(0, -100%, 0) rotateX(90deg);
-}
-
-.reveal.cube .slides>section>section.future {
-	-webkit-transform-origin: 0% 0%;
-	   -moz-transform-origin: 0% 0%;
-	    -ms-transform-origin: 0% 0%;
-	        transform-origin: 0% 0%;
-
-	-webkit-transform: translate3d(0, 100%, 0) rotateX(-90deg);
-	   -moz-transform: translate3d(0, 100%, 0) rotateX(-90deg);
-	    -ms-transform: translate3d(0, 100%, 0) rotateX(-90deg);
-	        transform: translate3d(0, 100%, 0) rotateX(-90deg);
-}
-
-
-/*********************************************
- * PAGE TRANSITION
- *********************************************/
-
-.reveal.page .slides {
-	-webkit-perspective-origin: 0% 50%;
-	   -moz-perspective-origin: 0% 50%;
-	    -ms-perspective-origin: 0% 50%;
-	        perspective-origin: 0% 50%;
-
-	-webkit-perspective: 3000px;
-	   -moz-perspective: 3000px;
-	    -ms-perspective: 3000px;
-	        perspective: 3000px;
-}
-
-.reveal.page .slides section {
-	padding: 30px;
-	min-height: 700px;
-
-	-webkit-box-sizing: border-box;
-	   -moz-box-sizing: border-box;
-	        box-sizing: border-box;
-}
-	.reveal.page .slides section.past {
-		z-index: 12;
-	}
-	.reveal.page .slides section:not(.stack):before {
-		content: '';
-		position: absolute;
-		display: block;
-		width: 100%;
-		height: 100%;
-		left: 0;
-		top: 0;
-		background: rgba(0,0,0,0.1);
-
-		-webkit-transform: translateZ( -20px );
-		   -moz-transform: translateZ( -20px );
-		    -ms-transform: translateZ( -20px );
-		     -o-transform: translateZ( -20px );
-		        transform: translateZ( -20px );
-	}
-	.reveal.page .slides section:not(.stack):after {
-		content: '';
-		position: absolute;
-		display: block;
-		width: 90%;
-		height: 30px;
-		left: 5%;
-		bottom: 0;
-		background: none;
-		z-index: 1;
-
-		border-radius: 4px;
-		box-shadow: 0px 95px 25px rgba(0,0,0,0.2);
-
-		-webkit-transform: translateZ(-90px) rotateX( 65deg );
-	}
-
-.reveal.page .slides>section.stack {
-	padding: 0;
-	background: none;
-}
-
-.reveal.page .slides>section.past {
-	-webkit-transform-origin: 0% 0%;
-	   -moz-transform-origin: 0% 0%;
-	    -ms-transform-origin: 0% 0%;
-	        transform-origin: 0% 0%;
-
-	-webkit-transform: translate3d(-40%, 0, 0) rotateY(-80deg);
-	   -moz-transform: translate3d(-40%, 0, 0) rotateY(-80deg);
-	    -ms-transform: translate3d(-40%, 0, 0) rotateY(-80deg);
-	        transform: translate3d(-40%, 0, 0) rotateY(-80deg);
-}
-
-.reveal.page .slides>section.future {
-	-webkit-transform-origin: 100% 0%;
-	   -moz-transform-origin: 100% 0%;
-	    -ms-transform-origin: 100% 0%;
-	        transform-origin: 100% 0%;
-
-	-webkit-transform: translate3d(0, 0, 0);
-	   -moz-transform: translate3d(0, 0, 0);
-	    -ms-transform: translate3d(0, 0, 0);
-	        transform: translate3d(0, 0, 0);
-}
-
-.reveal.page .slides>section>section.past {
-	-webkit-transform-origin: 0% 0%;
-	   -moz-transform-origin: 0% 0%;
-	    -ms-transform-origin: 0% 0%;
-	        transform-origin: 0% 0%;
-
-	-webkit-transform: translate3d(0, -40%, 0) rotateX(80deg);
-	   -moz-transform: translate3d(0, -40%, 0) rotateX(80deg);
-	    -ms-transform: translate3d(0, -40%, 0) rotateX(80deg);
-	        transform: translate3d(0, -40%, 0) rotateX(80deg);
-}
-
-.reveal.page .slides>section>section.future {
-	-webkit-transform-origin: 0% 100%;
-	   -moz-transform-origin: 0% 100%;
-	    -ms-transform-origin: 0% 100%;
-	        transform-origin: 0% 100%;
-
-	-webkit-transform: translate3d(0, 0, 0);
-	   -moz-transform: translate3d(0, 0, 0);
-	    -ms-transform: translate3d(0, 0, 0);
-	        transform: translate3d(0, 0, 0);
-}
-
-
-/*********************************************
- * FADE TRANSITION
- *********************************************/
-
-.reveal .slides section[data-transition=fade],
-.reveal.fade .slides section,
-.reveal.fade .slides>section>section {
-    -webkit-transform: none;
-	   -moz-transform: none;
-	    -ms-transform: none;
-	     -o-transform: none;
-	        transform: none;
-
-	-webkit-transition: opacity 0.5s;
-	   -moz-transition: opacity 0.5s;
-	    -ms-transition: opacity 0.5s;
-	     -o-transition: opacity 0.5s;
-	        transition: opacity 0.5s;
-}
-
-
-.reveal.fade.overview .slides section,
-.reveal.fade.overview .slides>section>section,
-.reveal.fade.overview-deactivating .slides section,
-.reveal.fade.overview-deactivating .slides>section>section {
-	-webkit-transition: none;
-	   -moz-transition: none;
-	    -ms-transition: none;
-	     -o-transition: none;
-	        transition: none;
-}
-
-
-/*********************************************
- * NO TRANSITION
- *********************************************/
-
-.reveal .slides section[data-transition=none],
-.reveal.none .slides section {
-	-webkit-transform: none;
-	   -moz-transform: none;
-	    -ms-transform: none;
-	     -o-transform: none;
-	        transform: none;
-
-	-webkit-transition: none;
-	   -moz-transition: none;
-	    -ms-transition: none;
-	     -o-transition: none;
-	        transition: none;
-}
-
-
-/*********************************************
- * OVERVIEW
- *********************************************/
-
-.reveal.overview .slides {
-	-webkit-perspective-origin: 0% 0%;
-	   -moz-perspective-origin: 0% 0%;
-	    -ms-perspective-origin: 0% 0%;
-	        perspective-origin: 0% 0%;
-
-	-webkit-perspective: 700px;
-	   -moz-perspective: 700px;
-	    -ms-perspective: 700px;
-	        perspective: 700px;
-}
-
-.reveal.overview .slides section {
-	height: 600px;
-	top: -300px !important;
-	overflow: hidden;
-	opacity: 1 !important;
-	visibility: visible !important;
-	cursor: pointer;
-	background: rgba(0,0,0,0.1);
-}
-.reveal.overview .slides section .fragment {
-	opacity: 1;
-}
-.reveal.overview .slides section:after,
-.reveal.overview .slides section:before {
-	display: none !important;
-}
-.reveal.overview .slides section>section {
-	opacity: 1;
-	cursor: pointer;
-}
-	.reveal.overview .slides section:hover {
-		background: rgba(0,0,0,0.3);
-	}
-	.reveal.overview .slides section.present {
-		background: rgba(0,0,0,0.3);
-	}
-.reveal.overview .slides>section.stack {
-	padding: 0;
-	top: 0 !important;
-	background: none;
-	overflow: visible;
-}
-
-
-/*********************************************
- * PAUSED MODE
- *********************************************/
-
-.reveal .pause-overlay {
-	position: absolute;
-	top: 0;
-	left: 0;
-	width: 100%;
-	height: 100%;
-	background: black;
-	visibility: hidden;
-	opacity: 0;
-	z-index: 100;
-
-	-webkit-transition: all 1s ease;
-	   -moz-transition: all 1s ease;
-	    -ms-transition: all 1s ease;
-	     -o-transition: all 1s ease;
-	        transition: all 1s ease;
-}
-.reveal.paused .pause-overlay {
-	visibility: visible;
-	opacity: 1;
-}
-
-
-/*********************************************
- * FALLBACK
- *********************************************/
-
-.no-transforms {
-	overflow-y: auto;
-}
-
-.no-transforms .reveal .slides {
-	position: relative;
-	width: 80%;
-	height: auto !important;
-	top: 0;
-	left: 50%;
-	margin: 0;
-	text-align: center;
-}
-
-.no-transforms .reveal .controls,
-.no-transforms .reveal .progress {
-	display: none !important;
-}
-
-.no-transforms .reveal .slides section {
-	display: block !important;
-	opacity: 1 !important;
-	position: relative !important;
-	height: auto;
-	min-height: auto;
-	top: 0;
-	left: -50%;
-	margin: 70px 0;
-
-	-webkit-transform: none;
-	   -moz-transform: none;
-	    -ms-transform: none;
-	     -o-transform: none;
-	        transform: none;
-}
-
-.no-transforms .reveal .slides section section {
-	left: 0;
-}
-
-.reveal .no-transition,
-.reveal .no-transition * {
-	-webkit-transition: none !important;
-	   -moz-transition: none !important;
-	    -ms-transition: none !important;
-	     -o-transition: none !important;
-	        transition: none !important;
-}
-
-
-/*********************************************
- * BACKGROUND STATES [DEPRECATED]
- *********************************************/
-
-.reveal .state-background {
-	position: absolute;
-	width: 100%;
-	height: 100%;
-	background: rgba( 0, 0, 0, 0 );
-
-	-webkit-transition: background 800ms ease;
-	   -moz-transition: background 800ms ease;
-	    -ms-transition: background 800ms ease;
-	     -o-transition: background 800ms ease;
-	        transition: background 800ms ease;
-}
-.alert .reveal .state-background {
-	background: rgba( 200, 50, 30, 0.6 );
-}
-.soothe .reveal .state-background {
-	background: rgba( 50, 200, 90, 0.4 );
-}
-.blackout .reveal .state-background {
-	background: rgba( 0, 0, 0, 0.6 );
-}
-.whiteout .reveal .state-background {
-	background: rgba( 255, 255, 255, 0.6 );
-}
-.cobalt .reveal .state-background {
-	background: rgba( 22, 152, 213, 0.6 );
-}
-.mint .reveal .state-background {
-	background: rgba( 22, 213, 75, 0.6 );
-}
-.submerge .reveal .state-background {
-	background: rgba( 12, 25, 77, 0.6);
-}
-.lila .reveal .state-background {
-	background: rgba( 180, 50, 140, 0.6 );
-}
-.sunset .reveal .state-background {
-	background: rgba( 255, 122, 0, 0.6 );
-}
-
-
-/*********************************************
- * PER-SLIDE BACKGROUNDS
- *********************************************/
-
-.reveal>.backgrounds {
-	position: absolute;
-	width: 100%;
-	height: 100%;
-
-	-webkit-perspective: 600px;
-	   -moz-perspective: 600px;
-	    -ms-perspective: 600px;
-	        perspective: 600px;
-}
-	.reveal .slide-background {
-		position: absolute;
-		width: 100%;
-		height: 100%;
-		opacity: 0;
-		visibility: hidden;
-
-		background-color: rgba( 0, 0, 0, 0 );
-		background-position: 50% 50%;
-		background-repeat: no-repeat;
-		background-size: cover;
-
-		-webkit-transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		   -moz-transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		    -ms-transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		     -o-transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		        transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	}
-	.reveal .slide-background.present {
-		opacity: 1;
-		visibility: visible;
-	}
-
-	.print-pdf .reveal .slide-background {
-		opacity: 1 !important;
-		visibility: visible !important;
-	}
-
-/* Immediate transition style */
-.reveal[data-background-transition=none]>.backgrounds .slide-background,
-.reveal>.backgrounds .slide-background[data-background-transition=none] {
-	-webkit-transition: none;
-	   -moz-transition: none;
-	    -ms-transition: none;
-	     -o-transition: none;
-	        transition: none;
-}
-
-/* 2D slide */
-.reveal[data-background-transition=slide]>.backgrounds .slide-background,
-.reveal>.backgrounds .slide-background[data-background-transition=slide] {
-	opacity: 1;
-
-	-webkit-backface-visibility: hidden;
-	   -moz-backface-visibility: hidden;
-	    -ms-backface-visibility: hidden;
-	        backface-visibility: hidden;
-}
-	.reveal[data-background-transition=slide]>.backgrounds .slide-background.past,
-	.reveal>.backgrounds .slide-background.past[data-background-transition=slide] {
-		-webkit-transform: translate(-100%, 0);
-		   -moz-transform: translate(-100%, 0);
-		    -ms-transform: translate(-100%, 0);
-		     -o-transform: translate(-100%, 0);
-		        transform: translate(-100%, 0);
-	}
-	.reveal[data-background-transition=slide]>.backgrounds .slide-background.future,
-	.reveal>.backgrounds .slide-background.future[data-background-transition=slide] {
-		-webkit-transform: translate(100%, 0);
-		   -moz-transform: translate(100%, 0);
-		    -ms-transform: translate(100%, 0);
-		     -o-transform: translate(100%, 0);
-		        transform: translate(100%, 0);
-	}
-
-	.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.past,
-	.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=slide] {
-		-webkit-transform: translate(0, -100%);
-		   -moz-transform: translate(0, -100%);
-		    -ms-transform: translate(0, -100%);
-		     -o-transform: translate(0, -100%);
-		        transform: translate(0, -100%);
-	}
-	.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.future,
-	.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=slide] {
-		-webkit-transform: translate(0, 100%);
-		   -moz-transform: translate(0, 100%);
-		    -ms-transform: translate(0, 100%);
-		     -o-transform: translate(0, 100%);
-		        transform: translate(0, 100%);
-	}
-
-
-/* Convex */
-.reveal[data-background-transition=convex]>.backgrounds .slide-background.past,
-.reveal>.backgrounds .slide-background.past[data-background-transition=convex] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	   -moz-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	    -ms-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-	        transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0);
-}
-.reveal[data-background-transition=convex]>.backgrounds .slide-background.future,
-.reveal>.backgrounds .slide-background.future[data-background-transition=convex] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	   -moz-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	    -ms-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-	        transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0);
-}
-
-.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.past,
-.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=convex] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0);
-	   -moz-transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0);
-	    -ms-transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0);
-	        transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0);
-}
-.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.future,
-.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=convex] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0);
-	   -moz-transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0);
-	    -ms-transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0);
-	        transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0);
-}
-
-
-/* Concave */
-.reveal[data-background-transition=concave]>.backgrounds .slide-background.past,
-.reveal>.backgrounds .slide-background.past[data-background-transition=concave] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	   -moz-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	    -ms-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-	        transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0);
-}
-.reveal[data-background-transition=concave]>.backgrounds .slide-background.future,
-.reveal>.backgrounds .slide-background.future[data-background-transition=concave] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	   -moz-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	    -ms-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-	        transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0);
-}
-
-.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.past,
-.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=concave] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0);
-	   -moz-transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0);
-	    -ms-transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0);
-	        transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0);
-}
-.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.future,
-.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=concave] {
-	opacity: 0;
-
-	-webkit-transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0);
-	   -moz-transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0);
-	    -ms-transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0);
-	        transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0);
-}
-
-/* Zoom */
-.reveal[data-background-transition=zoom]>.backgrounds .slide-background,
-.reveal>.backgrounds .slide-background[data-background-transition=zoom] {
-	-webkit-transition-timing-function: ease;
-	   -moz-transition-timing-function: ease;
-	    -ms-transition-timing-function: ease;
-	     -o-transition-timing-function: ease;
-	        transition-timing-function: ease;
-}
-
-.reveal[data-background-transition=zoom]>.backgrounds .slide-background.past,
-.reveal>.backgrounds .slide-background.past[data-background-transition=zoom] {
-	opacity: 0;
-	visibility: hidden;
-
-	-webkit-transform: scale(16);
-	   -moz-transform: scale(16);
-	    -ms-transform: scale(16);
-	     -o-transform: scale(16);
-	        transform: scale(16);
-}
-.reveal[data-background-transition=zoom]>.backgrounds .slide-background.future,
-.reveal>.backgrounds .slide-background.future[data-background-transition=zoom] {
-	opacity: 0;
-	visibility: hidden;
-
-	-webkit-transform: scale(0.2);
-	   -moz-transform: scale(0.2);
-	    -ms-transform: scale(0.2);
-	     -o-transform: scale(0.2);
-	        transform: scale(0.2);
-}
-
-.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.past,
-.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=zoom] {
-	opacity: 0;
-		visibility: hidden;
-
-		-webkit-transform: scale(16);
-		   -moz-transform: scale(16);
-		    -ms-transform: scale(16);
-		     -o-transform: scale(16);
-		        transform: scale(16);
-}
-.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.future,
-.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=zoom] {
-	opacity: 0;
-	visibility: hidden;
-
-	-webkit-transform: scale(0.2);
-	   -moz-transform: scale(0.2);
-	    -ms-transform: scale(0.2);
-	     -o-transform: scale(0.2);
-	        transform: scale(0.2);
-}
-
-
-/* Global transition speed settings */
-.reveal[data-transition-speed="fast"]>.backgrounds .slide-background {
-	-webkit-transition-duration: 400ms;
-	   -moz-transition-duration: 400ms;
-	    -ms-transition-duration: 400ms;
-	        transition-duration: 400ms;
-}
-.reveal[data-transition-speed="slow"]>.backgrounds .slide-background {
-	-webkit-transition-duration: 1200ms;
-	   -moz-transition-duration: 1200ms;
-	    -ms-transition-duration: 1200ms;
-	        transition-duration: 1200ms;
-}
-
-
-/*********************************************
- * RTL SUPPORT
- *********************************************/
-
-.reveal.rtl .slides,
-.reveal.rtl .slides h1,
-.reveal.rtl .slides h2,
-.reveal.rtl .slides h3,
-.reveal.rtl .slides h4,
-.reveal.rtl .slides h5,
-.reveal.rtl .slides h6 {
-	direction: rtl;
-	font-family: sans-serif;
-}
-
-.reveal.rtl pre,
-.reveal.rtl code {
-	direction: ltr;
-}
-
-.reveal.rtl ol,
-.reveal.rtl ul {
-	text-align: right;
-}
-
-.reveal.rtl .progress span {
-	float: right
-}
-
-/*********************************************
- * PARALLAX BACKGROUND
- *********************************************/
-
-.reveal.has-parallax-background .backgrounds {
-	-webkit-transition: all 0.8s ease;
-	   -moz-transition: all 0.8s ease;
-	    -ms-transition: all 0.8s ease;
-	        transition: all 0.8s ease;
-}
-
-/* Global transition speed settings */
-.reveal.has-parallax-background[data-transition-speed="fast"] .backgrounds {
-	-webkit-transition-duration: 400ms;
-	   -moz-transition-duration: 400ms;
-	    -ms-transition-duration: 400ms;
-	        transition-duration: 400ms;
-}
-.reveal.has-parallax-background[data-transition-speed="slow"] .backgrounds {
-	-webkit-transition-duration: 1200ms;
-	   -moz-transition-duration: 1200ms;
-	    -ms-transition-duration: 1200ms;
-	        transition-duration: 1200ms;
-}
-
-
-/*********************************************
- * LINK PREVIEW OVERLAY
- *********************************************/
-
- .reveal .preview-link-overlay {
- 	position: absolute;
- 	top: 0;
- 	left: 0;
- 	width: 100%;
- 	height: 100%;
- 	z-index: 1000;
- 	background: rgba( 0, 0, 0, 0.9 );
- 	opacity: 0;
- 	visibility: hidden;
-
- 	-webkit-transition: all 0.3s ease;
- 	   -moz-transition: all 0.3s ease;
- 	    -ms-transition: all 0.3s ease;
- 	        transition: all 0.3s ease;
- }
- 	.reveal .preview-link-overlay.visible {
- 		opacity: 1;
- 		visibility: visible;
- 	}
-
- 	.reveal .preview-link-overlay .spinner {
- 		position: absolute;
- 		display: block;
- 		top: 50%;
- 		left: 50%;
- 		width: 32px;
- 		height: 32px;
- 		margin: -16px 0 0 -16px;
- 		z-index: 10;
- 		background-image: url(data:image/gif;base64,R0lGODlhIAAgAPMAAJmZmf%2F%2F%2F6%2Bvr8nJybW1tcDAwOjo6Nvb26ioqKOjo7Ozs%2FLy8vz8%2FAAAAAAAAAAAACH%2FC05FVFNDQVBFMi4wAwEAAAAh%2FhpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh%2BQQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdp [...]
-
- 		visibility: visible;
- 		opacity: 0.6;
-
- 		-webkit-transition: all 0.3s ease;
- 		   -moz-transition: all 0.3s ease;
- 		    -ms-transition: all 0.3s ease;
- 		        transition: all 0.3s ease;
- 	}
-
- 	.reveal .preview-link-overlay header {
- 		position: absolute;
- 		left: 0;
- 		top: 0;
- 		width: 100%;
- 		height: 40px;
- 		z-index: 2;
- 		border-bottom: 1px solid #222;
- 	}
- 		.reveal .preview-link-overlay header a {
- 			display: inline-block;
- 			width: 40px;
- 			height: 40px;
- 			padding: 0 10px;
- 			float: right;
- 			opacity: 0.6;
-
- 			box-sizing: border-box;
- 		}
- 			.reveal .preview-link-overlay header a:hover {
- 				opacity: 1;
- 			}
- 			.reveal .preview-link-overlay header a .icon {
- 				display: inline-block;
- 				width: 20px;
- 				height: 20px;
-
- 				background-position: 50% 50%;
- 				background-size: 100%;
- 				background-repeat: no-repeat;
- 			}
- 			.reveal .preview-link-overlay header a.close .icon {
- 				background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABkklEQVRYR8WX4VHDMAxG6wnoJrABZQPYBCaBTWAD2g1gE5gg6OOsXuxIlr40d81dfrSJ9V4c2VLK7spHuTJ/5wpM07QXuXc5X0opX2tEJcadjHuV80li/FgxTIEK/5QBCICBD6xEhSMGHgQPgBgLiYVAB1dpSqKDawxTohFw4JSEA3clzgIBPCURwE2JucBR7rhPJJv5OpJwDX+SfDjgx1wACQeJG1aChP9K/IMmdZ8DtESV1WyP3Bt4MwM6sj4NMxMYiqUWHQu4KYA/SYkIjOsm3BXYWMKFDwU2khjCQ4ELJUJ4SmClRArOCmSXGuKma0fYD5CbzHxFpCSGAhfAVSSUGDUk2BWZaff2g6GE15BsBQ9nwmpIGDiyHQddwNTMKkbZaf9fajX [...]
- 			}
- 			.reveal .preview-link-overlay header a.external .icon {
- 				background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAcElEQVRYR+2WSQoAIQwEzf8f7XiOMkUQxUPlGkM3hVmiQfQR9GYnH1SsAQlI4DiBqkCMoNb9y2e90IAEJPAcgdznU9+engMaeJ7Azh5Y1U67gAho4DqBqmB1buAf0MB1AlVBek83ZPkmJMGc1wAR+AAqod/B97TRpQAAAABJRU5ErkJggg==);
- 			}
-
- 	.reveal .preview-link-overlay .viewport {
- 		position: absolute;
- 		top: 40px;
- 		right: 0;
- 		bottom: 0;
- 		left: 0;
- 	}
-
- 	.reveal .preview-link-overlay .viewport iframe {
- 		width: 100%;
- 		height: 100%;
- 		max-width: 100%;
- 		max-height: 100%;
- 		border: 0;
-
- 		opacity: 0;
- 		visibility: hidden;
-
- 		-webkit-transition: all 0.3s ease;
- 		   -moz-transition: all 0.3s ease;
- 		    -ms-transition: all 0.3s ease;
- 		        transition: all 0.3s ease;
- 	}
-
- 	.reveal .preview-link-overlay.loaded .viewport iframe {
- 		opacity: 1;
- 		visibility: visible;
- 	}
-
- 	.reveal .preview-link-overlay.loaded .spinner {
- 		opacity: 0;
- 		visibility: hidden;
-
- 		-webkit-transform: scale(0.2);
- 		   -moz-transform: scale(0.2);
- 		    -ms-transform: scale(0.2);
- 		        transform: scale(0.2);
- 	}
-
-
-
-/*********************************************
- * PLAYBACK COMPONENT
- *********************************************/
-
-.reveal .playback {
-	position: fixed;
-	left: 15px;
-	bottom: 15px;
-	z-index: 30;
-	cursor: pointer;
-
-	-webkit-transition: all 400ms ease;
-	   -moz-transition: all 400ms ease;
-	    -ms-transition: all 400ms ease;
-	        transition: all 400ms ease;
-}
-
-.reveal.overview .playback {
-	opacity: 0;
-	visibility: hidden;
-}
-
-
-/*********************************************
- * ROLLING LINKS
- *********************************************/
-
-.reveal .roll {
-	display: inline-block;
-	line-height: 1.2;
-	overflow: hidden;
-
-	vertical-align: top;
-
-	-webkit-perspective: 400px;
-	   -moz-perspective: 400px;
-	    -ms-perspective: 400px;
-	        perspective: 400px;
-
-	-webkit-perspective-origin: 50% 50%;
-	   -moz-perspective-origin: 50% 50%;
-	    -ms-perspective-origin: 50% 50%;
-	        perspective-origin: 50% 50%;
-}
-	.reveal .roll:hover {
-		background: none;
-		text-shadow: none;
-	}
-.reveal .roll span {
-	display: block;
-	position: relative;
-	padding: 0 2px;
-
-	pointer-events: none;
-
-	-webkit-transition: all 400ms ease;
-	   -moz-transition: all 400ms ease;
-	    -ms-transition: all 400ms ease;
-	        transition: all 400ms ease;
-
-	-webkit-transform-origin: 50% 0%;
-	   -moz-transform-origin: 50% 0%;
-	    -ms-transform-origin: 50% 0%;
-	        transform-origin: 50% 0%;
-
-	-webkit-transform-style: preserve-3d;
-	   -moz-transform-style: preserve-3d;
-	    -ms-transform-style: preserve-3d;
-	        transform-style: preserve-3d;
-
-	-webkit-backface-visibility: hidden;
-	   -moz-backface-visibility: hidden;
-	        backface-visibility: hidden;
-}
-	.reveal .roll:hover span {
-	    background: rgba(0,0,0,0.5);
-
-	    -webkit-transform: translate3d( 0px, 0px, -45px ) rotateX( 90deg );
-	       -moz-transform: translate3d( 0px, 0px, -45px ) rotateX( 90deg );
-	        -ms-transform: translate3d( 0px, 0px, -45px ) rotateX( 90deg );
-	            transform: translate3d( 0px, 0px, -45px ) rotateX( 90deg );
-	}
-.reveal .roll span:after {
-	content: attr(data-title);
-
-	display: block;
-	position: absolute;
-	left: 0;
-	top: 0;
-	padding: 0 2px;
-
-	-webkit-backface-visibility: hidden;
-	   -moz-backface-visibility: hidden;
-	        backface-visibility: hidden;
-
-	-webkit-transform-origin: 50% 0%;
-	   -moz-transform-origin: 50% 0%;
-	    -ms-transform-origin: 50% 0%;
-	        transform-origin: 50% 0%;
-
-	-webkit-transform: translate3d( 0px, 110%, 0px ) rotateX( -90deg );
-	   -moz-transform: translate3d( 0px, 110%, 0px ) rotateX( -90deg );
-	    -ms-transform: translate3d( 0px, 110%, 0px ) rotateX( -90deg );
-	        transform: translate3d( 0px, 110%, 0px ) rotateX( -90deg );
-}
-
-
-/*********************************************
- * SPEAKER NOTES
- *********************************************/
-
-.reveal aside.notes {
-	display: none;
-}
-
-
-/*********************************************
- * ZOOM PLUGIN
- *********************************************/
-
-.zoomed .reveal *,
-.zoomed .reveal *:before,
-.zoomed .reveal *:after {
-	-webkit-transform: none !important;
-	   -moz-transform: none !important;
-	    -ms-transform: none !important;
-	        transform: none !important;
-
-	-webkit-backface-visibility: visible !important;
-	   -moz-backface-visibility: visible !important;
-	    -ms-backface-visibility: visible !important;
-	        backface-visibility: visible !important;
-}
-
-.zoomed .reveal .progress,
-.zoomed .reveal .controls {
-	opacity: 0;
-}
-
-.zoomed .reveal .roll span {
-	background: none;
-}
-
-.zoomed .reveal .roll span:after {
-	visibility: hidden;
-}
-
-
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/reveal.min.css b/uflacs-merge-into-ffc/doc/roadmap/css/reveal.min.css
deleted file mode 100644
index 6808992..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/reveal.min.css
+++ /dev/null
@@ -1,7 +0,0 @@
- at charset "UTF-8";/*!
- * reveal.js
- * http://lab.hakim.se/reveal-js
- * MIT licensed
- *
- * Copyright (C) 2014 Hakim El Hattab, http://hakim.se
- */ html,body,.reveal div,.reveal span,.reveal applet,.reveal object,.reveal iframe,.reveal h1,.reveal h2,.reveal h3,.reveal h4,.reveal h5,.reveal h6,.reveal p,.reveal blockquote,.reveal pre,.reveal a,.reveal abbr,.reveal acronym,.reveal address,.reveal big,.reveal cite,.reveal code,.reveal del,.reveal dfn,.reveal em,.reveal img,.reveal ins,.reveal kbd,.reveal q,.reveal s,.reveal samp,.reveal small,.reveal strike,.reveal strong,.reveal sub,.reveal sup,.reveal tt,.reveal var,.reveal b,.re [...]
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/README.md b/uflacs-merge-into-ffc/doc/roadmap/css/theme/README.md
deleted file mode 100644
index 8237586..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Dependencies
-
-Themes are written using Sass to keep things modular and reduce the need for repeated selectors across files. Make sure that you have the reveal.js development environment including the Grunt dependencies installed before proceding: https://github.com/hakimel/reveal.js#full-setup
-
-You also need to install Ruby and then Sass (with `gem install sass`).
-
-## Creating a Theme
-
-To create your own theme, start by duplicating any ```.scss``` file in [/css/theme/source](https://github.com/hakimel/reveal.js/blob/master/css/theme/source) and adding it to the compilation list in the [Gruntfile](https://github.com/hakimel/reveal.js/blob/master/Gruntfile.js).
-
-Each theme file does four things in the following order:
-
-1. **Include [/css/theme/template/mixins.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/mixins.scss)**
-Shared utility functions.
-
-2. **Include [/css/theme/template/settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss)**
-Declares a set of custom variables that the template file (step 4) expects. Can be overridden in step 3.
-
-3. **Override**
-This is where you override the default theme. Either by specifying variables (see [settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss) for reference) or by adding full selectors with hardcoded styles.
-
-4. **Include [/css/theme/template/theme.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/theme.scss)**
-The template theme file which will generate final CSS output based on the currently defined variables.
-
-When you are done, run `grunt themes` to compile the Sass file to CSS and you are ready to use your new theme.
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/beige.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/beige.css
deleted file mode 100644
index 089cb7b..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/beige.css
+++ /dev/null
@@ -1,148 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-/**
- * Beige theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
- at font-face {
-  font-family: 'League Gothic';
-  src: url("../../lib/font/league_gothic-webfont.eot");
-  src: url("../../lib/font/league_gothic-webfont.eot?#iefix") format("embedded-opentype"), url("../../lib/font/league_gothic-webfont.woff") format("woff"), url("../../lib/font/league_gothic-webfont.ttf") format("truetype"), url("../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular") format("svg");
-  font-weight: normal;
-  font-style: normal; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #f7f2d3;
-  background: -moz-radial-gradient(center, circle cover, white 0%, #f7f2d3 100%);
-  background: -webkit-gradient(radial, center center, 0px, center center, 100%, color-stop(0%, white), color-stop(100%, #f7f2d3));
-  background: -webkit-radial-gradient(center, circle cover, white 0%, #f7f2d3 100%);
-  background: -o-radial-gradient(center, circle cover, white 0%, #f7f2d3 100%);
-  background: -ms-radial-gradient(center, circle cover, white 0%, #f7f2d3 100%);
-  background: radial-gradient(center, circle cover, white 0%, #f7f2d3 100%);
-  background-color: #f7f3de; }
-
-.reveal {
-  font-family: "Lato", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #333333; }
-
-::selection {
-  color: white;
-  background: rgba(79, 64, 28, 0.99);
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #333333;
-  font-family: "League Gothic", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: uppercase;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0 1px 0 #cccccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbbbbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaaaaa, 0 6px 1px rgba(0, 0, 0, 0.1), 0 0 5px rgba(0, 0, 0, 0.1), 0 1px 3px rgba(0, 0, 0, 0.3), 0 3px 5px rgba(0, 0, 0, 0.2), 0 5px 10px rgba(0, 0, 0, 0.25), 0 20px 20px rgba(0, 0, 0, 0.15); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #8b743d;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #c0a86e;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #564826; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #333333;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #8b743d;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #8b743d; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #8b743d; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #8b743d; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #8b743d; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #c0a86e; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #c0a86e; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #c0a86e; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #c0a86e; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #8b743d;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #8b743d; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/blood.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/blood.css
deleted file mode 100644
index 0aefdd9..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/blood.css
+++ /dev/null
@@ -1,175 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Ubuntu:300,700,300italic,700italic);
-/**
- * Blood theme for reveal.js
- * Author: Walther http://github.com/Walther
- *
- * Designed to be used with highlight.js theme
- * "monokai_sublime.css" available from
- * https://github.com/isagalaev/highlight.js/
- *
- * For other themes, change $codeBackground accordingly.
- *
- */
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #222222;
-  background: -moz-radial-gradient(center, circle cover, #626262 0%, #222222 100%);
-  background: -webkit-gradient(radial, center center, 0px, center center, 100%, color-stop(0%, #626262), color-stop(100%, #222222));
-  background: -webkit-radial-gradient(center, circle cover, #626262 0%, #222222 100%);
-  background: -o-radial-gradient(center, circle cover, #626262 0%, #222222 100%);
-  background: -ms-radial-gradient(center, circle cover, #626262 0%, #222222 100%);
-  background: radial-gradient(center, circle cover, #626262 0%, #222222 100%);
-  background-color: #2b2b2b; }
-
-.reveal {
-  font-family: Ubuntu, "sans-serif";
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #eeeeee; }
-
-::selection {
-  color: white;
-  background: #aa2233;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #eeeeee;
-  font-family: Ubuntu, "sans-serif";
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: uppercase;
-  text-shadow: 2px 2px 2px #222222; }
-
-.reveal h1 {
-  text-shadow: 0 1px 0 #cccccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbbbbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaaaaa, 0 6px 1px rgba(0, 0, 0, 0.1), 0 0 5px rgba(0, 0, 0, 0.1), 0 1px 3px rgba(0, 0, 0, 0.3), 0 3px 5px rgba(0, 0, 0, 0.2), 0 5px 10px rgba(0, 0, 0, 0.25), 0 20px 20px rgba(0, 0, 0, 0.15); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #aa2233;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #dd5566;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #6a1520; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #eeeeee;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #aa2233;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #aa2233; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #aa2233; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #aa2233; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #aa2233; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #dd5566; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #dd5566; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #dd5566; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #dd5566; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #aa2233;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #aa2233; }
-
-.reveal p {
-  font-weight: 300;
-  text-shadow: 1px 1px #222222; }
-
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  font-weight: 700; }
-
-.reveal a:not(.image),
-.reveal a:not(.image):hover {
-  text-shadow: 2px 2px 2px #000; }
-
-.reveal small a:not(.image),
-.reveal small a:not(.image):hover {
-  text-shadow: 1px 1px 1px #000; }
-
-.reveal p code {
-  background-color: #23241f;
-  display: inline-block;
-  border-radius: 7px; }
-
-.reveal small code {
-  vertical-align: baseline; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/default.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/default.css
deleted file mode 100644
index a234861..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/default.css
+++ /dev/null
@@ -1,148 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-/**
- * Default theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
- at font-face {
-  font-family: 'League Gothic';
-  src: url("../../lib/font/league_gothic-webfont.eot");
-  src: url("../../lib/font/league_gothic-webfont.eot?#iefix") format("embedded-opentype"), url("../../lib/font/league_gothic-webfont.woff") format("woff"), url("../../lib/font/league_gothic-webfont.ttf") format("truetype"), url("../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular") format("svg");
-  font-weight: normal;
-  font-style: normal; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #1c1e20;
-  background: -moz-radial-gradient(center, circle cover, #555a5f 0%, #1c1e20 100%);
-  background: -webkit-gradient(radial, center center, 0px, center center, 100%, color-stop(0%, #555a5f), color-stop(100%, #1c1e20));
-  background: -webkit-radial-gradient(center, circle cover, #555a5f 0%, #1c1e20 100%);
-  background: -o-radial-gradient(center, circle cover, #555a5f 0%, #1c1e20 100%);
-  background: -ms-radial-gradient(center, circle cover, #555a5f 0%, #1c1e20 100%);
-  background: radial-gradient(center, circle cover, #555a5f 0%, #1c1e20 100%);
-  background-color: #2b2b2b; }
-
-.reveal {
-  font-family: "Lato", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #eeeeee; }
-
-::selection {
-  color: white;
-  background: #ff5e99;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #eeeeee;
-  font-family: "League Gothic", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: uppercase;
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-.reveal h1 {
-  text-shadow: 0 1px 0 #cccccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbbbbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaaaaa, 0 6px 1px rgba(0, 0, 0, 0.1), 0 0 5px rgba(0, 0, 0, 0.1), 0 1px 3px rgba(0, 0, 0, 0.3), 0 3px 5px rgba(0, 0, 0, 0.2), 0 5px 10px rgba(0, 0, 0, 0.25), 0 20px 20px rgba(0, 0, 0, 0.15); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #13daec;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #71e9f4;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #0d99a5; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #eeeeee;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #13daec;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #13daec; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #13daec; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #13daec; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #13daec; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #71e9f4; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #71e9f4; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #71e9f4; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #71e9f4; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #13daec;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #13daec; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/moon.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/moon.css
deleted file mode 100644
index 3c15b00..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/moon.css
+++ /dev/null
@@ -1,148 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-/**
- * Solarized Dark theme for reveal.js.
- * Author: Achim Staebler
- */
- at font-face {
-  font-family: 'League Gothic';
-  src: url("../../lib/font/league_gothic-webfont.eot");
-  src: url("../../lib/font/league_gothic-webfont.eot?#iefix") format("embedded-opentype"), url("../../lib/font/league_gothic-webfont.woff") format("woff"), url("../../lib/font/league_gothic-webfont.ttf") format("truetype"), url("../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular") format("svg");
-  font-weight: normal;
-  font-style: normal; }
-
-/**
- * Solarized colors by Ethan Schoonover
- */
-html * {
-  color-profile: sRGB;
-  rendering-intent: auto; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #002b36;
-  background-color: #002b36; }
-
-.reveal {
-  font-family: "Lato", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #93a1a1; }
-
-::selection {
-  color: white;
-  background: #d33682;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #eee8d5;
-  font-family: "League Gothic", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: uppercase;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #268bd2;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #78b9e6;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #1a6091; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #93a1a1;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #268bd2;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #268bd2; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #268bd2; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #268bd2; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #268bd2; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #78b9e6; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #78b9e6; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #78b9e6; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #78b9e6; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #268bd2;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #268bd2; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/night.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/night.css
deleted file mode 100644
index e8703a5..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/night.css
+++ /dev/null
@@ -1,136 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Montserrat:700);
- at import url(https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic,700italic);
-/**
- * Black theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #111111;
-  background-color: #111111; }
-
-.reveal {
-  font-family: "Open Sans", sans-serif;
-  font-size: 30px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #eeeeee; }
-
-::selection {
-  color: white;
-  background: #e7ad52;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #eeeeee;
-  font-family: "Montserrat", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: -0.03em;
-  text-transform: none;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #e7ad52;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #f3d7ac;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #d08a1d; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #eeeeee;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #e7ad52;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #e7ad52; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #e7ad52; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #e7ad52; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #e7ad52; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #f3d7ac; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #f3d7ac; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #f3d7ac; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #f3d7ac; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #e7ad52;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #e7ad52; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/serif.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/serif.css
deleted file mode 100644
index d5f0ad8..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/serif.css
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * A simple theme for reveal.js presentations, similar
- * to the default theme. The accent color is brown.
- *
- * This theme is Copyright (C) 2012-2013 Owen Versteeg, http://owenversteeg.com - it is MIT licensed.
- */
-.reveal a:not(.image) {
-  line-height: 1.3em; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #f0f1eb;
-  background-color: #f0f1eb; }
-
-.reveal {
-  font-family: "Palatino Linotype", "Book Antiqua", Palatino, FreeSerif, serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: black; }
-
-::selection {
-  color: white;
-  background: #26351c;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #383d3d;
-  font-family: "Palatino Linotype", "Book Antiqua", Palatino, FreeSerif, serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: none;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #51483d;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #8b7c69;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #25211c; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid black;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #51483d;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #51483d; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #51483d; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #51483d; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #51483d; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #8b7c69; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #8b7c69; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #8b7c69; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #8b7c69; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #51483d;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #51483d; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/simple.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/simple.css
deleted file mode 100644
index 504606c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/simple.css
+++ /dev/null
@@ -1,138 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=News+Cycle:400,700);
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-/**
- * A simple theme for reveal.js presentations, similar
- * to the default theme. The accent color is darkblue.
- *
- * This theme is Copyright (C) 2012 Owen Versteeg, https://github.com/StereotypicalApps. It is MIT licensed.
- * reveal.js is Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: white;
-  background-color: white; }
-
-.reveal {
-  font-family: "Lato", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: black; }
-
-::selection {
-  color: white;
-  background: rgba(0, 0, 0, 0.99);
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: black;
-  font-family: "News Cycle", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: none;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: darkblue;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #0000f1;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #00003f; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid black;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: darkblue;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: darkblue; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: darkblue; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: darkblue; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: darkblue; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #0000f1; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #0000f1; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #0000f1; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #0000f1; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: darkblue;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: darkblue; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/sky.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/sky.css
deleted file mode 100644
index 1a44760..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/sky.css
+++ /dev/null
@@ -1,145 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Quicksand:400,700,400italic,700italic);
- at import url(https://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700);
-/**
- * Sky theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-.reveal a:not(.image) {
-  line-height: 1.3em; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #add9e4;
-  background: -moz-radial-gradient(center, circle cover, #f7fbfc 0%, #add9e4 100%);
-  background: -webkit-gradient(radial, center center, 0px, center center, 100%, color-stop(0%, #f7fbfc), color-stop(100%, #add9e4));
-  background: -webkit-radial-gradient(center, circle cover, #f7fbfc 0%, #add9e4 100%);
-  background: -o-radial-gradient(center, circle cover, #f7fbfc 0%, #add9e4 100%);
-  background: -ms-radial-gradient(center, circle cover, #f7fbfc 0%, #add9e4 100%);
-  background: radial-gradient(center, circle cover, #f7fbfc 0%, #add9e4 100%);
-  background-color: #f7fbfc; }
-
-.reveal {
-  font-family: "Open Sans", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #333333; }
-
-::selection {
-  color: white;
-  background: #134674;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #333333;
-  font-family: "Quicksand", sans-serif;
-  line-height: 0.9em;
-  letter-spacing: -0.08em;
-  text-transform: uppercase;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #3b759e;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #74a7cb;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #264c66; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #333333;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #3b759e;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #3b759e; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #3b759e; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #3b759e; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #3b759e; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #74a7cb; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #74a7cb; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #74a7cb; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #74a7cb; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #3b759e;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #3b759e; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/solarized.css b/uflacs-merge-into-ffc/doc/roadmap/css/theme/solarized.css
deleted file mode 100644
index c8bff7c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/solarized.css
+++ /dev/null
@@ -1,148 +0,0 @@
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-/**
- * Solarized Light theme for reveal.js.
- * Author: Achim Staebler
- */
- at font-face {
-  font-family: 'League Gothic';
-  src: url("../../lib/font/league_gothic-webfont.eot");
-  src: url("../../lib/font/league_gothic-webfont.eot?#iefix") format("embedded-opentype"), url("../../lib/font/league_gothic-webfont.woff") format("woff"), url("../../lib/font/league_gothic-webfont.ttf") format("truetype"), url("../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular") format("svg");
-  font-weight: normal;
-  font-style: normal; }
-
-/**
- * Solarized colors by Ethan Schoonover
- */
-html * {
-  color-profile: sRGB;
-  rendering-intent: auto; }
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-body {
-  background: #fdf6e3;
-  background-color: #fdf6e3; }
-
-.reveal {
-  font-family: "Lato", sans-serif;
-  font-size: 36px;
-  font-weight: normal;
-  letter-spacing: -0.02em;
-  color: #657b83; }
-
-::selection {
-  color: white;
-  background: #d33682;
-  text-shadow: none; }
-
-/*********************************************
- * HEADERS
- *********************************************/
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-  margin: 0 0 20px 0;
-  color: #586e75;
-  font-family: "League Gothic", Impact, sans-serif;
-  line-height: 0.9em;
-  letter-spacing: 0.02em;
-  text-transform: uppercase;
-  text-shadow: none; }
-
-.reveal h1 {
-  text-shadow: 0px 0px 6px rgba(0, 0, 0, 0.2); }
-
-/*********************************************
- * LINKS
- *********************************************/
-.reveal a:not(.image) {
-  color: #268bd2;
-  text-decoration: none;
-  -webkit-transition: color .15s ease;
-  -moz-transition: color .15s ease;
-  -ms-transition: color .15s ease;
-  -o-transition: color .15s ease;
-  transition: color .15s ease; }
-
-.reveal a:not(.image):hover {
-  color: #78b9e6;
-  text-shadow: none;
-  border: none; }
-
-.reveal .roll span:after {
-  color: #fff;
-  background: #1a6091; }
-
-/*********************************************
- * IMAGES
- *********************************************/
-.reveal section img {
-  margin: 15px 0px;
-  background: rgba(255, 255, 255, 0.12);
-  border: 4px solid #657b83;
-  box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-  -webkit-transition: all .2s linear;
-  -moz-transition: all .2s linear;
-  -ms-transition: all .2s linear;
-  -o-transition: all .2s linear;
-  transition: all .2s linear; }
-
-.reveal a:hover img {
-  background: rgba(255, 255, 255, 0.2);
-  border-color: #268bd2;
-  box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); }
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-  border-right-color: #268bd2; }
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-  border-left-color: #268bd2; }
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-  border-bottom-color: #268bd2; }
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-  border-top-color: #268bd2; }
-
-.reveal .controls div.navigate-left.enabled:hover {
-  border-right-color: #78b9e6; }
-
-.reveal .controls div.navigate-right.enabled:hover {
-  border-left-color: #78b9e6; }
-
-.reveal .controls div.navigate-up.enabled:hover {
-  border-bottom-color: #78b9e6; }
-
-.reveal .controls div.navigate-down.enabled:hover {
-  border-top-color: #78b9e6; }
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-.reveal .progress {
-  background: rgba(0, 0, 0, 0.2); }
-
-.reveal .progress span {
-  background: #268bd2;
-  -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -ms-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  -o-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985);
-  transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); }
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: #268bd2; }
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/beige.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/beige.scss
deleted file mode 100644
index c31956c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/beige.scss
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Beige theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at font-face {
-	font-family: 'League Gothic';
-	src: url('../../lib/font/league_gothic-webfont.eot');
-	src: url('../../lib/font/league_gothic-webfont.eot?#iefix') format('embedded-opentype'),
-		 url('../../lib/font/league_gothic-webfont.woff') format('woff'),
-		 url('../../lib/font/league_gothic-webfont.ttf') format('truetype'),
-		 url('../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular') format('svg');
-
-	font-weight: normal;
-	font-style: normal;
-}
-
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-
-
-// Override theme settings (see ../template/settings.scss)
-$mainColor: #333;
-$headingColor: #333;
-$headingTextShadow: none;
-$backgroundColor: #f7f3de;
-$linkColor: #8b743d;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: rgba(79, 64, 28, 0.99);
-$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
-
-// Background generator
- at mixin bodyBackground() {
-	@include radial-gradient( rgba(247,242,211,1), rgba(255,255,255,1) );
-}
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/blood.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/blood.scss
deleted file mode 100644
index a9925a1..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/blood.scss
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Blood theme for reveal.js
- * Author: Walther http://github.com/Walther
- *
- * Designed to be used with highlight.js theme
- * "monokai_sublime.css" available from
- * https://github.com/isagalaev/highlight.js/
- *
- * For other themes, change $codeBackground accordingly.
- *
- */
-
- // Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-// Include theme-specific fonts
-
- at import url(https://fonts.googleapis.com/css?family=Ubuntu:300,700,300italic,700italic);
-
-// Colors used in the theme
-$blood: #a23;
-$coal: #222;
-$codeBackground: #23241f;
-
-// Main text
-$mainFont: Ubuntu, 'sans-serif';
-$mainFontSize: 36px;
-$mainColor: #eee;
-
-// Headings
-$headingFont: Ubuntu, 'sans-serif';
-$headingTextShadow: 2px 2px 2px $coal;
-
-// h1 shadow, borrowed humbly from 
-// (c) Default theme by Hakim El Hattab
-$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
-
-// Links
-$linkColor: $blood;
-$linkColorHover: lighten( $linkColor, 20% );
-
-// Text selection
-$selectionBackgroundColor: $blood;
-$selectionColor: #fff;
-
-// Background generator
- at mixin bodyBackground() {
-    @include radial-gradient( $coal, lighten( $coal, 25% ) );
-}
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
-
-// some overrides after theme template import
-
-.reveal p {
-    font-weight: 300;
-    text-shadow: 1px 1px $coal;
-}
-
-.reveal h1, 
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-    font-weight: 700;
-}
-
-.reveal a:not(.image),
-.reveal a:not(.image):hover {
-    text-shadow: 2px 2px 2px #000;
-}
-
-.reveal small a:not(.image),
-.reveal small a:not(.image):hover {
-    text-shadow: 1px 1px 1px #000;
-}
-
-.reveal p code {
-    background-color: $codeBackground;
-    display: inline-block;
-    border-radius: 7px;
-}
-
-.reveal small code {
-    vertical-align: baseline;
-}
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/default.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/default.scss
deleted file mode 100644
index 1117b65..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/default.scss
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Default theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at font-face {
-	font-family: 'League Gothic';
-	src: url('../../lib/font/league_gothic-webfont.eot');
-	src: url('../../lib/font/league_gothic-webfont.eot?#iefix') format('embedded-opentype'),
-		 url('../../lib/font/league_gothic-webfont.woff') format('woff'),
-		 url('../../lib/font/league_gothic-webfont.ttf') format('truetype'),
-		 url('../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular') format('svg');
-
-	font-weight: normal;
-	font-style: normal;
-}
-
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-
-// Override theme settings (see ../template/settings.scss)
-$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
-
-// Background generator
- at mixin bodyBackground() {
-	@include radial-gradient( rgba(28,30,32,1), rgba(85,90,95,1) );
-}
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/moon.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/moon.scss
deleted file mode 100644
index a722adc..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/moon.scss
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Solarized Dark theme for reveal.js.
- * Author: Achim Staebler
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at font-face {
-	font-family: 'League Gothic';
-	src: url('../../lib/font/league_gothic-webfont.eot');
-	src: url('../../lib/font/league_gothic-webfont.eot?#iefix') format('embedded-opentype'),
-		 url('../../lib/font/league_gothic-webfont.woff') format('woff'),
-		 url('../../lib/font/league_gothic-webfont.ttf') format('truetype'),
-		 url('../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular') format('svg');
-
-	font-weight: normal;
-	font-style: normal;
-}
-
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-
-/**
- * Solarized colors by Ethan Schoonover
- */
-html * {
-	color-profile: sRGB;
-	rendering-intent: auto;
-}
-
-// Solarized colors
-$base03:    #002b36;
-$base02:    #073642;
-$base01:    #586e75;
-$base00:    #657b83;
-$base0:     #839496;
-$base1:     #93a1a1;
-$base2:     #eee8d5;
-$base3:     #fdf6e3;
-$yellow:    #b58900;
-$orange:    #cb4b16;
-$red:       #dc322f;
-$magenta:   #d33682;
-$violet:    #6c71c4;
-$blue:      #268bd2;
-$cyan:      #2aa198;
-$green:     #859900;
-
-// Override theme settings (see ../template/settings.scss)
-$mainColor: $base1;
-$headingColor: $base2;
-$headingTextShadow: none;
-$backgroundColor: $base03;
-$linkColor: $blue;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: $magenta;
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/night.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/night.scss
deleted file mode 100644
index b0cb57f..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/night.scss
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Black theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-// Include theme-specific fonts
- at import url(https://fonts.googleapis.com/css?family=Montserrat:700);
- at import url(https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic,700italic);
-
-
-// Override theme settings (see ../template/settings.scss)
-$backgroundColor: #111;
-
-$mainFont: 'Open Sans', sans-serif;
-$linkColor: #e7ad52;
-$linkColorHover: lighten( $linkColor, 20% );
-$headingFont: 'Montserrat', Impact, sans-serif;
-$headingTextShadow: none;
-$headingLetterSpacing: -0.03em;
-$headingTextTransform: none;
-$selectionBackgroundColor: #e7ad52;
-$mainFontSize: 30px;
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/serif.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/serif.scss
deleted file mode 100644
index 404b8bf..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/serif.scss
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * A simple theme for reveal.js presentations, similar
- * to the default theme. The accent color is brown.
- *
- * This theme is Copyright (C) 2012-2013 Owen Versteeg, http://owenversteeg.com - it is MIT licensed.
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Override theme settings (see ../template/settings.scss)
-$mainFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
-$mainColor: #000;
-$headingFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
-$headingColor: #383D3D;
-$headingTextShadow: none;
-$headingTextTransform: none;
-$backgroundColor: #F0F1EB;
-$linkColor: #51483D;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: #26351C;
-
-.reveal a:not(.image) {
-  line-height: 1.3em;
-}
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/simple.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/simple.scss
deleted file mode 100644
index 84c7d9b..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/simple.scss
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * A simple theme for reveal.js presentations, similar
- * to the default theme. The accent color is darkblue.
- *
- * This theme is Copyright (C) 2012 Owen Versteeg, https://github.com/StereotypicalApps. It is MIT licensed.
- * reveal.js is Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at import url(https://fonts.googleapis.com/css?family=News+Cycle:400,700);
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-
-
-// Override theme settings (see ../template/settings.scss)
-$mainFont: 'Lato', sans-serif;
-$mainColor: #000;
-$headingFont: 'News Cycle', Impact, sans-serif;
-$headingColor: #000;
-$headingTextShadow: none;
-$headingTextTransform: none;
-$backgroundColor: #fff;
-$linkColor: #00008B;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: rgba(0, 0, 0, 0.99);
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/sky.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/sky.scss
deleted file mode 100644
index 72a3a90..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/sky.scss
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Sky theme for reveal.js.
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at import url(https://fonts.googleapis.com/css?family=Quicksand:400,700,400italic,700italic);
- at import url(https://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700);
-
-
-// Override theme settings (see ../template/settings.scss)
-$mainFont: 'Open Sans', sans-serif;
-$mainColor: #333;
-$headingFont: 'Quicksand', sans-serif;
-$headingColor: #333;
-$headingLetterSpacing: -0.08em;
-$headingTextShadow: none;
-$backgroundColor: #f7fbfc;
-$linkColor: #3b759e;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: #134674;
-
-// Fix links so they are not cut off
-.reveal a:not(.image) {
-	line-height: 1.3em;
-}
-
-// Background generator
- at mixin bodyBackground() {
-	@include radial-gradient( #add9e4, #f7fbfc );
-}
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/solarized.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/solarized.scss
deleted file mode 100644
index 8217b8f..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/source/solarized.scss
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Solarized Light theme for reveal.js.
- * Author: Achim Staebler
- */
-
-
-// Default mixins and settings -----------------
- at import "../template/mixins";
- at import "../template/settings";
-// ---------------------------------------------
-
-
-
-// Include theme-specific fonts
- at font-face {
-	font-family: 'League Gothic';
-	src: url('../../lib/font/league_gothic-webfont.eot');
-	src: url('../../lib/font/league_gothic-webfont.eot?#iefix') format('embedded-opentype'),
-		 url('../../lib/font/league_gothic-webfont.woff') format('woff'),
-		 url('../../lib/font/league_gothic-webfont.ttf') format('truetype'),
-		 url('../../lib/font/league_gothic-webfont.svg#LeagueGothicRegular') format('svg');
-
-	font-weight: normal;
-	font-style: normal;
-}
-
- at import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
-
-
-/**
- * Solarized colors by Ethan Schoonover
- */
-html * {
-	color-profile: sRGB;
-	rendering-intent: auto;
-}
-
-// Solarized colors
-$base03:    #002b36;
-$base02:    #073642;
-$base01:    #586e75;
-$base00:    #657b83;
-$base0:     #839496;
-$base1:     #93a1a1;
-$base2:     #eee8d5;
-$base3:     #fdf6e3;
-$yellow:    #b58900;
-$orange:    #cb4b16;
-$red:       #dc322f;
-$magenta:   #d33682;
-$violet:    #6c71c4;
-$blue:      #268bd2;
-$cyan:      #2aa198;
-$green:     #859900;
-
-// Override theme settings (see ../template/settings.scss)
-$mainColor: $base00;
-$headingColor: $base01;
-$headingTextShadow: none;
-$backgroundColor: $base3;
-$linkColor: $blue;
-$linkColorHover: lighten( $linkColor, 20% );
-$selectionBackgroundColor: $magenta;
-
-// Background generator
-// @mixin bodyBackground() {
-// 	@include radial-gradient( rgba($base3,1), rgba(lighten($base3, 20%),1) );
-// }
-
-
-
-// Theme template ------------------------------
- at import "../template/theme";
-// ---------------------------------------------
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/mixins.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/mixins.scss
deleted file mode 100644
index e0c5606..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/mixins.scss
+++ /dev/null
@@ -1,29 +0,0 @@
- at mixin vertical-gradient( $top, $bottom ) {
-	background: $top;
-	background: -moz-linear-gradient( top, $top 0%, $bottom 100% );
-	background: -webkit-gradient( linear, left top, left bottom, color-stop(0%,$top), color-stop(100%,$bottom) );
-	background: -webkit-linear-gradient( top, $top 0%, $bottom 100% );
-	background: -o-linear-gradient( top, $top 0%, $bottom 100% );
-	background: -ms-linear-gradient( top, $top 0%, $bottom 100% );
-	background: linear-gradient( top, $top 0%, $bottom 100% );
-}
-
- at mixin horizontal-gradient( $top, $bottom ) {
-	background: $top;
-	background: -moz-linear-gradient( left, $top 0%, $bottom 100% );
-	background: -webkit-gradient( linear, left top, right top, color-stop(0%,$top), color-stop(100%,$bottom) );
-	background: -webkit-linear-gradient( left, $top 0%, $bottom 100% );
-	background: -o-linear-gradient( left, $top 0%, $bottom 100% );
-	background: -ms-linear-gradient( left, $top 0%, $bottom 100% );
-	background: linear-gradient( left, $top 0%, $bottom 100% );
-}
-
- at mixin radial-gradient( $outer, $inner, $type: circle ) {
-	background: $outer;
-	background: -moz-radial-gradient( center, $type cover,  $inner 0%, $outer 100% );
-	background: -webkit-gradient( radial, center center, 0px, center center, 100%, color-stop(0%,$inner), color-stop(100%,$outer) );
-	background: -webkit-radial-gradient( center, $type cover,  $inner 0%, $outer 100% );
-	background: -o-radial-gradient( center, $type cover,  $inner 0%, $outer 100% );
-	background: -ms-radial-gradient( center, $type cover,  $inner 0%, $outer 100% );
-	background: radial-gradient( center, $type cover,  $inner 0%, $outer 100% );
-}
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/settings.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/settings.scss
deleted file mode 100644
index 739a609..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/settings.scss
+++ /dev/null
@@ -1,34 +0,0 @@
-// Base settings for all themes that can optionally be
-// overridden by the super-theme
-
-// Background of the presentation
-$backgroundColor: #2b2b2b;
-
-// Primary/body text
-$mainFont: 'Lato', sans-serif;
-$mainFontSize: 36px;
-$mainColor: #eee;
-
-// Headings
-$headingMargin: 0 0 20px 0;
-$headingFont: 'League Gothic', Impact, sans-serif;
-$headingColor: #eee;
-$headingLineHeight: 0.9em;
-$headingLetterSpacing: 0.02em;
-$headingTextTransform: uppercase;
-$headingTextShadow: 0px 0px 6px rgba(0,0,0,0.2);
-$heading1TextShadow: $headingTextShadow;
-
-// Links and actions
-$linkColor: #13DAEC;
-$linkColorHover: lighten( $linkColor, 20% );
-
-// Text selection
-$selectionBackgroundColor: #FF5E99;
-$selectionColor: #fff;
-
-// Generates the presentation background, can be overridden
-// to return a background image or gradient
- at mixin bodyBackground() {
-	background: $backgroundColor;
-}
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/theme.scss b/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/theme.scss
deleted file mode 100644
index 1562b54..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/css/theme/template/theme.scss
+++ /dev/null
@@ -1,170 +0,0 @@
-// Base theme template for reveal.js
-
-/*********************************************
- * GLOBAL STYLES
- *********************************************/
-
-body {
-	@include bodyBackground();
-	background-color: $backgroundColor;
-}
-
-.reveal {
-	font-family: $mainFont;
-	font-size: $mainFontSize;
-	font-weight: normal;
-	letter-spacing: -0.02em;
-	color: $mainColor;
-}
-
-::selection {
-	color: $selectionColor;
-	background: $selectionBackgroundColor;
-	text-shadow: none;
-}
-
-/*********************************************
- * HEADERS
- *********************************************/
-
-.reveal h1,
-.reveal h2,
-.reveal h3,
-.reveal h4,
-.reveal h5,
-.reveal h6 {
-	margin: $headingMargin;
-	color: $headingColor;
-
-	font-family: $headingFont;
-	line-height: $headingLineHeight;
-	letter-spacing: $headingLetterSpacing;
-
-	text-transform: $headingTextTransform;
-	text-shadow: $headingTextShadow;
-}
-
-.reveal h1 {
-	text-shadow: $heading1TextShadow;
-}
-
-
-/*********************************************
- * LINKS
- *********************************************/
-
-.reveal a:not(.image) {
-	color: $linkColor;
-	text-decoration: none;
-
-	-webkit-transition: color .15s ease;
-	   -moz-transition: color .15s ease;
-	    -ms-transition: color .15s ease;
-	     -o-transition: color .15s ease;
-	        transition: color .15s ease;
-}
-	.reveal a:not(.image):hover {
-		color: $linkColorHover;
-
-		text-shadow: none;
-		border: none;
-	}
-
-.reveal .roll span:after {
-	color: #fff;
-	background: darken( $linkColor, 15% );
-}
-
-
-/*********************************************
- * IMAGES
- *********************************************/
-
-.reveal section img {
-	margin: 15px 0px;
-	background: rgba(255,255,255,0.12);
-	border: 4px solid $mainColor;
-
-	box-shadow: 0 0 10px rgba(0, 0, 0, 0.15);
-
-	-webkit-transition: all .2s linear;
-	   -moz-transition: all .2s linear;
-	    -ms-transition: all .2s linear;
-	     -o-transition: all .2s linear;
-	        transition: all .2s linear;
-}
-
-	.reveal a:hover img {
-		background: rgba(255,255,255,0.2);
-		border-color: $linkColor;
-
-		box-shadow: 0 0 20px rgba(0, 0, 0, 0.55);
-	}
-
-
-/*********************************************
- * NAVIGATION CONTROLS
- *********************************************/
-
-.reveal .controls div.navigate-left,
-.reveal .controls div.navigate-left.enabled {
-	border-right-color: $linkColor;
-}
-
-.reveal .controls div.navigate-right,
-.reveal .controls div.navigate-right.enabled {
-	border-left-color: $linkColor;
-}
-
-.reveal .controls div.navigate-up,
-.reveal .controls div.navigate-up.enabled {
-	border-bottom-color: $linkColor;
-}
-
-.reveal .controls div.navigate-down,
-.reveal .controls div.navigate-down.enabled {
-	border-top-color: $linkColor;
-}
-
-.reveal .controls div.navigate-left.enabled:hover {
-	border-right-color: $linkColorHover;
-}
-
-.reveal .controls div.navigate-right.enabled:hover {
-	border-left-color: $linkColorHover;
-}
-
-.reveal .controls div.navigate-up.enabled:hover {
-	border-bottom-color: $linkColorHover;
-}
-
-.reveal .controls div.navigate-down.enabled:hover {
-	border-top-color: $linkColorHover;
-}
-
-
-/*********************************************
- * PROGRESS BAR
- *********************************************/
-
-.reveal .progress {
-	background: rgba(0,0,0,0.2);
-}
-	.reveal .progress span {
-		background: $linkColor;
-
-		-webkit-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		   -moz-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		    -ms-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		     -o-transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-		        transition: width 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985);
-	}
-
-/*********************************************
- * SLIDE NUMBER
- *********************************************/
-.reveal .slide-number {
-  color: $linkColor;
-}
-
-
diff --git a/uflacs-merge-into-ffc/doc/roadmap/images/image.jpg b/uflacs-merge-into-ffc/doc/roadmap/images/image.jpg
deleted file mode 100644
index a52f515..0000000
Binary files a/uflacs-merge-into-ffc/doc/roadmap/images/image.jpg and /dev/null differ
diff --git a/uflacs-merge-into-ffc/doc/roadmap/index.html b/uflacs-merge-into-ffc/doc/roadmap/index.html
deleted file mode 100644
index 3c30966..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/index.html
+++ /dev/null
@@ -1,407 +0,0 @@
-<!doctype html>
-<html lang="en">
-<head>
-  <meta charset="utf-8">
-
-  <title>Roadmap of uflacs development</title>
-  <meta name="description" content="Roadmap for development of uflacs towards feature completeness.">
-  <meta name="author" content="Martin Sandve Alnæs">
-
-  <link rel="stylesheet" href="css/reveal.min.css">
-  <link rel="stylesheet" href="css/theme/night.css" id="theme">
-
-  <!-- Add support for old IE -->
-  <!--[if lt IE 9]>
-  <script src="lib/js/html5shiv.js"></script>
-  <![endif]-->
-
-</head>
-
-<body>
-  <div class="reveal">
-    <div class="slides">
-
-      <section id="title-slide">
-        <h2><script>document.write(document.title);</script></h2>
-        <h4>Martin Sandve Alnæs</h4>
-        <h4>10/6 2014</h4>
-      </section>
-
-
-      <!-- Work in progress, current focus -->
-      <section id="wip">
-
-        <section id="wip-priorities">
-          <a href="#/wip-priorities"><h3>Work in progress - current top priorities</h3></a>
-          <ul>
-            <li>...</li>
-          </ul>
-        </section>
-
-        <section id="wip-backlog">
-          <a href="#/wip-backlog"><h3>Main points left for release</h3></a>
-          <ul>
-            <li>Geometry - Circumradius, CellVolume, FacetArea, MaxFacetEdgeLength, MinFacetEdgeLength.</li>
-            <li>Conditionals.</li>
-            <li>Point integral.</li>
-            <li>Custom integral.</li>
-            <li>Non-affine vector-element mappings - implement ReferenceValueOf in UFL.</li>
-            <li>RestrictedElement.</li>
-          </ul>
-        </section>
-
-        <section id="wip-optimize-compiler">
-          <a href="#/wip-optimize-compiler"><h3>Optimization of code generation</h3></a>
-          <ul>
-            <li>Factorization algorithm in unoptimized and slow.</li>
-            <li>Profile!</li>
-          </ul>
-        </section>
-
-        <section id="wip-optimize-kernel">
-          <a href="#/wip-optimize-kernel"><h3>Optimization of generated code</h3></a>
-          <ul>
-            <li>At this point performance is competitive but has potential.</li>
-            <li>Extract constant-to-a-scale tensor parts out of quadrature loop.</li>
-            <li>Fuse loops of same size.</li>
-            <li>Add nonzero table column support similar to quadrature representation.</li>
-            <li>Auto-vectorization with AVX2 intrinsics.</li>
-            <li>Tune unstructured expressions better - generate FMAs?</li>
-          </ul>
-        </section>
-
-      </section>
-
-
-      <!-- Features section -->
-      <section id="status-geometry">
-
-        <section>
-          <h3>Status of geometry types</h3>
-          <ul>
-            <li>25 types</li>
-            <li>3 cell/facet types</li>
-            <li>5 integral types</li>
-            <li>= 375 of combinations to test...</li>
-            <li>Overview below is not up to date!</li>
-          </ul>
-        </section>
-
-        <section>
-          <h3>Implemented geometry types</h3>
-        </section>
-        <section tagcloud>
-          SpatialCoordinate
-          FacetNormal
-        </section>
-
-        <section>
-          <h3>Geometry types likely working</h3>
-        </section>
-        <section tagcloud>
-          CellCoordinate
-          FacetCoordinate
-          CellOrigin
-          FacetOrigin
-          CellFacetOrigin
-          Jacobian
-          JacobianDeterminant
-          JacobianInverse
-          FacetJacobian
-          FacetJacobianDeterminant
-          FacetJacobianInverse
-          CellFacetJacobian
-          CellFacetJacobianDeterminant
-          CellFacetJacobianInverse
-          CellNormal
-          CellOrientation
-          FacetOrientation
-          QuadratureWeight
-        </section>
-
-        <section>
-          <h3>Missing geometry types</h3>
-        </section>
-        <section tagcloud>
-          CellVolume
-          Circumradius
-          FacetArea
-          MinFacetEdgeLength
-          MaxFacetEdgeLength
-        </section>
-
-      </section>
-
-
-      <!-- Features section -->
-      <section id="status-integrals">
-
-        <section>
-          <h3>Status of integral types</h3>
-          <ul>
-            <li>Need unit testing of geometry.</li>
-            <li>Need unit testing of gradient mappings.</li>
-            <li>Need unit testing of integration scaling.</li>
-          </ul>
-        </section>
-
-        <section tagcloud>
-          cell
-          exterior_facet
-          interior_facet
-          custom
-          point
-        </section>
-
-        </section>
-
-      </section>
-
-
-      <!-- Testing techniques -->
-      <section id="testing-commands">
-
-        <section>
-          <h3>Commands to test uflacs with ffc:</h3>
-          <pre><code data-trim contenteditable>
-ffc -r uflacs
-./test.py --ext-uflacs --skip-download --permissive --print-timing
-./test.py --ext-uflacs --skip-download --permissive --print-timing --bench
-          </code></pre>
-        </section>
-
-        <section>
-          <h3>Python unit testing status</h3>
-        </section>
-
-        <section>
-          <h3>C++ unit testing status</h3>
-        </section>
-
-      </section>
-
-
-      <!-- FFC regression demos working -->
-      <section id="regression-correct">
-
-        <section>
-          <a href="#/regression-correct">
-          <h3>Overview of which FFC regression test demos that produce CORRECT results</h3>
-          </a>
-        </section>
-
-        <section tagcloud>
-          Mass
-          Poisson1D
-          Poisson
-          VectorPoisson
-          TensorWeightedPoisson
-          Heat
-          Stokes
-          NavierStokes
-          Division
-          NeumannProblem
-          StabilisedStokes
-          Optimization
-          MixedMixedElement
-          Mini
-          SpatialCoordinates
-          SubDomain
-          P5tet
-          P5tri
-          Constant
-          Equation
-          ReactionDiffusion
-          EnergyNorm
-          AlgebraOperators
-          Components
-          MathFunctions
-          QuadratureElement
-          MetaData
-          CoefficientOperators
-          Elasticity
-          HyperElasticity
-          SubDomains
-          FacetIntegrals
-          FacetRestrictionAD
-        </section>
-
-      </section>
-
-
-      <!-- FFC regression demos not working -->
-      <section id="regression-fail">
-
-        <section>
-          <a href="#/regression-fail">
-          <h3>Overview of which FFC regression test demos that produce
-          incorrect results or fail to generate code</h3>
-          </a>
-          <p>Note:</p>
-          Biharmonic and PoissonDG dolfin demos work if replacing CellSize with a Constant.
-        </section>
-
-        <!-- Split by reason -->
-
-        <section>
-          <h3>Missing geometry</h3>
-          CellGeometry
-          (Biharmonic)
-          (PoissonDG)
-        </section>
-
-        <section>
-          <h3>Missing element mappings</h3>
-          MixedPoissonDual
-          MixedPoisson
-        </section>
-
-        <section>
-          <h3>Missing integral type handling</h3>
-          PointMeasure
-          CustomIntegral
-        </section>
-
-        <section>
-          <h3>Incorrect manifold dimensions</h3>
-          ProjectionManifold
-        </section>
-
-        <section>
-          <h3>Missing conditional operators</h3>
-          Conditional
-        </section>
-
-        <section>
-          <h3>Missing element table definitions</h3>
-          AdaptivePoisson
-          VectorLaplaceGradCurl
-        </section>
-
-        <section>
-          <h3>Missing element type handling</h3>
-          RestrictedElement
-        </section>
-
-      </section>
-
-      <!-- Future code improvement -->
-      <section>
-
-        <section>
-          <h3>Clean up graph building code</h3>
-        </section>
-
-        <section>
-          <h3>Rewrite ffc table generation</h3>
-        </section>
-
-        <section>
-          <h3>Merge into ffc?</h3>
-        </section>
-
-      </section>
-
-
-      <!-- Future optimization -->
-      <section>
-
-        <section>
-          <h3>Loop invariant code motion</h3>
-        </section>
-
-        <section>
-          <h3>Loop fusion</h3>
-        </section>
-
-        <section>
-          <h3>Identify relation to tensor representation</h3>
-        </section>
-
-        <section>
-          <h3>AVX vectorization</h3>
-        </section>
-
-        <section>
-          <h3>BLAS</h3>
-        </section>
-
-      </section>
-
-
-      <!-- End of talk -->
-
-
-<!-- Example slides
-      <section>
-
-        <section>
-          <h3>Example slides below</h3>
-        </section>
-
-        <section>
-          <h3>Code example</h3>
-          <pre><code data-trim contenteditable>
-              foo = bar();
-          </code></pre>
-        </section>
-
-        <section>
-          <h3>Point list</h3>
-          <ul>
-            <li>...</li>
-            <li>...</li>
-            <li>...</li>
-          </ul>
-        </section>
-
-        <section>
-          <h3>Image</h3>
-          <img src="images/image.jpg">
-        </section>
-
-        <section>
-          <h3>Image and pointlist (fix alignment)</h3>
-          <div style="text-align: left;">
-            <ul>
-              <li>...</li>
-              <li>...</li>
-              <li>...</li>
-            </ul>
-          </div>
-          <div style="text-align: right;">
-            <img src="images/image.jpg">
-          </div>
-        </section>
-
-      </section>
-
--->
-
-    </div>
-
-    <script src="lib/js/head.min.js"></script>
-    <script src="js/reveal.min.js"></script>
-
-    <script>
-        // Required, even if empty
-        Reveal.initialize({
-          //controls: true,
-          //progress: true,
-          //history=true,
-          //center=false,
-
-          math: {
-            // mathjax: 'http://cdn.mathjax.org/mathjax/latest/MathJax.js',
-            config: 'TeX-AMS_HTML-full'
-          },
-
-          dependencies: [
-            { src: 'plugin/highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
-            { src: 'plugin/tagcloud/tagcloud.js', async: true },
-            { src: 'plugin/math/math.js', async: true },
-          ]
-        })
-    </script>
-</body>
-</html>
diff --git a/uflacs-merge-into-ffc/doc/roadmap/js/reveal.js b/uflacs-merge-into-ffc/doc/roadmap/js/reveal.js
deleted file mode 100644
index 5cbb3ff..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/js/reveal.js
+++ /dev/null
@@ -1,3382 +0,0 @@
-/*!
- * reveal.js
- * http://lab.hakim.se/reveal-js
- * MIT licensed
- *
- * Copyright (C) 2014 Hakim El Hattab, http://hakim.se
- */
-var Reveal = (function(){
-
-	'use strict';
-
-	var SLIDES_SELECTOR = '.reveal .slides section',
-		HORIZONTAL_SLIDES_SELECTOR = '.reveal .slides>section',
-		VERTICAL_SLIDES_SELECTOR = '.reveal .slides>section.present>section',
-		HOME_SLIDE_SELECTOR = '.reveal .slides>section:first-of-type',
-
-		// Configurations defaults, can be overridden at initialization time
-		config = {
-
-			// The "normal" size of the presentation, aspect ratio will be preserved
-			// when the presentation is scaled to fit different resolutions
-			width: 960,
-			height: 700,
-
-			// Factor of the display size that should remain empty around the content
-			margin: 0.1,
-
-			// Bounds for smallest/largest possible scale to apply to content
-			minScale: 0.2,
-			maxScale: 1.0,
-
-			// Display controls in the bottom right corner
-			controls: true,
-
-			// Display a presentation progress bar
-			progress: true,
-
-			// Display the page number of the current slide
-			slideNumber: false,
-
-			// Push each slide change to the browser history
-			history: false,
-
-			// Enable keyboard shortcuts for navigation
-			keyboard: true,
-
-			// Enable the slide overview mode
-			overview: true,
-
-			// Vertical centering of slides
-			center: true,
-
-			// Enables touch navigation on devices with touch input
-			touch: true,
-
-			// Loop the presentation
-			loop: false,
-
-			// Change the presentation direction to be RTL
-			rtl: false,
-
-			// Turns fragments on and off globally
-			fragments: true,
-
-			// Flags if the presentation is running in an embedded mode,
-			// i.e. contained within a limited portion of the screen
-			embedded: false,
-
-			// Number of milliseconds between automatically proceeding to the
-			// next slide, disabled when set to 0, this value can be overwritten
-			// by using a data-autoslide attribute on your slides
-			autoSlide: 0,
-
-			// Stop auto-sliding after user input
-			autoSlideStoppable: true,
-
-			// Enable slide navigation via mouse wheel
-			mouseWheel: false,
-
-			// Apply a 3D roll to links on hover
-			rollingLinks: false,
-
-			// Hides the address bar on mobile devices
-			hideAddressBar: true,
-
-			// Opens links in an iframe preview overlay
-			previewLinks: false,
-
-			// Focuses body when page changes visiblity to ensure keyboard shortcuts work
-			focusBodyOnPageVisiblityChange: true,
-
-			// Theme (see /css/theme)
-			theme: null,
-
-			// Transition style
-			transition: 'default', // default/cube/page/concave/zoom/linear/fade/none
-
-			// Transition speed
-			transitionSpeed: 'default', // default/fast/slow
-
-			// Transition style for full page slide backgrounds
-			backgroundTransition: 'default', // default/linear/none
-
-			// Parallax background image
-			parallaxBackgroundImage: '', // CSS syntax, e.g. "a.jpg"
-
-			// Parallax background size
-			parallaxBackgroundSize: '', // CSS syntax, e.g. "3000px 2000px"
-
-			// Number of slides away from the current that are visible
-			viewDistance: 3,
-
-			// Script dependencies to load
-			dependencies: []
-
-		},
-
-		// Flags if reveal.js is loaded (has dispatched the 'ready' event)
-		loaded = false,
-
-		// The horizontal and vertical index of the currently active slide
-		indexh,
-		indexv,
-
-		// The previous and current slide HTML elements
-		previousSlide,
-		currentSlide,
-
-		previousBackground,
-
-		// Slides may hold a data-state attribute which we pick up and apply
-		// as a class to the body. This list contains the combined state of
-		// all current slides.
-		state = [],
-
-		// The current scale of the presentation (see width/height config)
-		scale = 1,
-
-		// Cached references to DOM elements
-		dom = {},
-
-		// Features supported by the browser, see #checkCapabilities()
-		features = {},
-
-		// Client is a mobile device, see #checkCapabilities()
-		isMobileDevice,
-
-		// Throttles mouse wheel navigation
-		lastMouseWheelStep = 0,
-
-		// Delays updates to the URL due to a Chrome thumbnailer bug
-		writeURLTimeout = 0,
-
-		// A delay used to activate the overview mode
-		activateOverviewTimeout = 0,
-
-		// A delay used to deactivate the overview mode
-		deactivateOverviewTimeout = 0,
-
-		// Flags if the interaction event listeners are bound
-		eventsAreBound = false,
-
-		// The current auto-slide duration
-		autoSlide = 0,
-
-		// Auto slide properties
-		autoSlidePlayer,
-		autoSlideTimeout = 0,
-		autoSlideStartTime = -1,
-		autoSlidePaused = false,
-
-		// Holds information about the currently ongoing touch input
-		touch = {
-			startX: 0,
-			startY: 0,
-			startSpan: 0,
-			startCount: 0,
-			captured: false,
-			threshold: 40
-		};
-
-	/**
-	 * Starts up the presentation if the client is capable.
-	 */
-	function initialize( options ) {
-
-		checkCapabilities();
-
-		if( !features.transforms2d && !features.transforms3d ) {
-			document.body.setAttribute( 'class', 'no-transforms' );
-
-			// If the browser doesn't support core features we won't be
-			// using JavaScript to control the presentation
-			return;
-		}
-
-		// Force a layout when the whole page, incl fonts, has loaded
-		window.addEventListener( 'load', layout, false );
-
-		var query = Reveal.getQueryHash();
-
-		// Do not accept new dependencies via query config to avoid
-		// the potential of malicious script injection
-		if( typeof query['dependencies'] !== 'undefined' ) delete query['dependencies'];
-
-		// Copy options over to our config object
-		extend( config, options );
-		extend( config, query );
-
-		// Hide the address bar in mobile browsers
-		hideAddressBar();
-
-		// Loads the dependencies and continues to #start() once done
-		load();
-
-	}
-
-	/**
-	 * Inspect the client to see what it's capable of, this
-	 * should only happens once per runtime.
-	 */
-	function checkCapabilities() {
-
-		features.transforms3d = 'WebkitPerspective' in document.body.style ||
-								'MozPerspective' in document.body.style ||
-								'msPerspective' in document.body.style ||
-								'OPerspective' in document.body.style ||
-								'perspective' in document.body.style;
-
-		features.transforms2d = 'WebkitTransform' in document.body.style ||
-								'MozTransform' in document.body.style ||
-								'msTransform' in document.body.style ||
-								'OTransform' in document.body.style ||
-								'transform' in document.body.style;
-
-		features.requestAnimationFrameMethod = window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame;
-		features.requestAnimationFrame = typeof features.requestAnimationFrameMethod === 'function';
-
-		features.canvas = !!document.createElement( 'canvas' ).getContext;
-
-		isMobileDevice = navigator.userAgent.match( /(iphone|ipod|android)/gi );
-
-	}
-
-
-    /**
-     * Loads the dependencies of reveal.js. Dependencies are
-     * defined via the configuration option 'dependencies'
-     * and will be loaded prior to starting/binding reveal.js.
-     * Some dependencies may have an 'async' flag, if so they
-     * will load after reveal.js has been started up.
-     */
-	function load() {
-
-		var scripts = [],
-			scriptsAsync = [],
-			scriptsToPreload = 0;
-
-		// Called once synchronous scripts finish loading
-		function proceed() {
-			if( scriptsAsync.length ) {
-				// Load asynchronous scripts
-				head.js.apply( null, scriptsAsync );
-			}
-
-			start();
-		}
-
-		function loadScript( s ) {
-			head.ready( s.src.match( /([\w\d_\-]*)\.?js$|[^\\\/]*$/i )[0], function() {
-				// Extension may contain callback functions
-				if( typeof s.callback === 'function' ) {
-					s.callback.apply( this );
-				}
-
-				if( --scriptsToPreload === 0 ) {
-					proceed();
-				}
-			});
-		}
-
-		for( var i = 0, len = config.dependencies.length; i < len; i++ ) {
-			var s = config.dependencies[i];
-
-			// Load if there's no condition or the condition is truthy
-			if( !s.condition || s.condition() ) {
-				if( s.async ) {
-					scriptsAsync.push( s.src );
-				}
-				else {
-					scripts.push( s.src );
-				}
-
-				loadScript( s );
-			}
-		}
-
-		if( scripts.length ) {
-			scriptsToPreload = scripts.length;
-
-			// Load synchronous scripts
-			head.js.apply( null, scripts );
-		}
-		else {
-			proceed();
-		}
-
-	}
-
-	/**
-	 * Starts up reveal.js by binding input events and navigating
-	 * to the current URL deeplink if there is one.
-	 */
-	function start() {
-
-		// Make sure we've got all the DOM elements we need
-		setupDOM();
-
-		// Resets all vertical slides so that only the first is visible
-		resetVerticalSlides();
-
-		// Updates the presentation to match the current configuration values
-		configure();
-
-		// Read the initial hash
-		readURL();
-
-		// Update all backgrounds
-		updateBackground( true );
-
-		// Notify listeners that the presentation is ready but use a 1ms
-		// timeout to ensure it's not fired synchronously after #initialize()
-		setTimeout( function() {
-			// Enable transitions now that we're loaded
-			dom.slides.classList.remove( 'no-transition' );
-
-			loaded = true;
-
-			dispatchEvent( 'ready', {
-				'indexh': indexh,
-				'indexv': indexv,
-				'currentSlide': currentSlide
-			} );
-		}, 1 );
-
-	}
-
-	/**
-	 * Finds and stores references to DOM elements which are
-	 * required by the presentation. If a required element is
-	 * not found, it is created.
-	 */
-	function setupDOM() {
-
-		// Cache references to key DOM elements
-		dom.theme = document.querySelector( '#theme' );
-		dom.wrapper = document.querySelector( '.reveal' );
-		dom.slides = document.querySelector( '.reveal .slides' );
-
-		// Prevent transitions while we're loading
-		dom.slides.classList.add( 'no-transition' );
-
-		// Background element
-		dom.background = createSingletonNode( dom.wrapper, 'div', 'backgrounds', null );
-
-		// Progress bar
-		dom.progress = createSingletonNode( dom.wrapper, 'div', 'progress', '<span></span>' );
-		dom.progressbar = dom.progress.querySelector( 'span' );
-
-		// Arrow controls
-		createSingletonNode( dom.wrapper, 'aside', 'controls',
-			'<div class="navigate-left"></div>' +
-			'<div class="navigate-right"></div>' +
-			'<div class="navigate-up"></div>' +
-			'<div class="navigate-down"></div>' );
-
-		// Slide number
-		dom.slideNumber = createSingletonNode( dom.wrapper, 'div', 'slide-number', '' );
-
-		// State background element [DEPRECATED]
-		createSingletonNode( dom.wrapper, 'div', 'state-background', null );
-
-		// Overlay graphic which is displayed during the paused mode
-		createSingletonNode( dom.wrapper, 'div', 'pause-overlay', null );
-
-		// Cache references to elements
-		dom.controls = document.querySelector( '.reveal .controls' );
-
-		// There can be multiple instances of controls throughout the page
-		dom.controlsLeft = toArray( document.querySelectorAll( '.navigate-left' ) );
-		dom.controlsRight = toArray( document.querySelectorAll( '.navigate-right' ) );
-		dom.controlsUp = toArray( document.querySelectorAll( '.navigate-up' ) );
-		dom.controlsDown = toArray( document.querySelectorAll( '.navigate-down' ) );
-		dom.controlsPrev = toArray( document.querySelectorAll( '.navigate-prev' ) );
-		dom.controlsNext = toArray( document.querySelectorAll( '.navigate-next' ) );
-
-	}
-
-	/**
-	 * Creates an HTML element and returns a reference to it.
-	 * If the element already exists the existing instance will
-	 * be returned.
-	 */
-	function createSingletonNode( container, tagname, classname, innerHTML ) {
-
-		var node = container.querySelector( '.' + classname );
-		if( !node ) {
-			node = document.createElement( tagname );
-			node.classList.add( classname );
-			if( innerHTML !== null ) {
-				node.innerHTML = innerHTML;
-			}
-			container.appendChild( node );
-		}
-		return node;
-
-	}
-
-	/**
-	 * Creates the slide background elements and appends them
-	 * to the background container. One element is created per
-	 * slide no matter if the given slide has visible background.
-	 */
-	function createBackgrounds() {
-
-		if( isPrintingPDF() ) {
-			document.body.classList.add( 'print-pdf' );
-		}
-
-		// Clear prior backgrounds
-		dom.background.innerHTML = '';
-		dom.background.classList.add( 'no-transition' );
-
-		// Helper method for creating a background element for the
-		// given slide
-		function _createBackground( slide, container ) {
-
-			var data = {
-				background: slide.getAttribute( 'data-background' ),
-				backgroundSize: slide.getAttribute( 'data-background-size' ),
-				backgroundImage: slide.getAttribute( 'data-background-image' ),
-				backgroundColor: slide.getAttribute( 'data-background-color' ),
-				backgroundRepeat: slide.getAttribute( 'data-background-repeat' ),
-				backgroundPosition: slide.getAttribute( 'data-background-position' ),
-				backgroundTransition: slide.getAttribute( 'data-background-transition' )
-			};
-
-			var element = document.createElement( 'div' );
-			element.className = 'slide-background';
-
-			if( data.background ) {
-				// Auto-wrap image urls in url(...)
-				if( /^(http|file|\/\/)/gi.test( data.background ) || /\.(svg|png|jpg|jpeg|gif|bmp)$/gi.test( data.background ) ) {
-					element.style.backgroundImage = 'url('+ data.background +')';
-				}
-				else {
-					element.style.background = data.background;
-				}
-			}
-
-			if( data.background || data.backgroundColor || data.backgroundImage ) {
-				element.setAttribute( 'data-background-hash', data.background + data.backgroundSize + data.backgroundImage + data.backgroundColor + data.backgroundRepeat + data.backgroundPosition + data.backgroundTransition );
-			}
-
-			// Additional and optional background properties
-			if( data.backgroundSize ) element.style.backgroundSize = data.backgroundSize;
-			if( data.backgroundImage ) element.style.backgroundImage = 'url("' + data.backgroundImage + '")';
-			if( data.backgroundColor ) element.style.backgroundColor = data.backgroundColor;
-			if( data.backgroundRepeat ) element.style.backgroundRepeat = data.backgroundRepeat;
-			if( data.backgroundPosition ) element.style.backgroundPosition = data.backgroundPosition;
-			if( data.backgroundTransition ) element.setAttribute( 'data-background-transition', data.backgroundTransition );
-
-			container.appendChild( element );
-
-			return element;
-
-		}
-
-		// Iterate over all horizontal slides
-		toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ).forEach( function( slideh ) {
-
-			var backgroundStack;
-
-			if( isPrintingPDF() ) {
-				backgroundStack = _createBackground( slideh, slideh );
-			}
-			else {
-				backgroundStack = _createBackground( slideh, dom.background );
-			}
-
-			// Iterate over all vertical slides
-			toArray( slideh.querySelectorAll( 'section' ) ).forEach( function( slidev ) {
-
-				if( isPrintingPDF() ) {
-					_createBackground( slidev, slidev );
-				}
-				else {
-					_createBackground( slidev, backgroundStack );
-				}
-
-			} );
-
-		} );
-
-		// Add parallax background if specified
-		if( config.parallaxBackgroundImage ) {
-
-			dom.background.style.backgroundImage = 'url("' + config.parallaxBackgroundImage + '")';
-			dom.background.style.backgroundSize = config.parallaxBackgroundSize;
-
-			// Make sure the below properties are set on the element - these properties are
-			// needed for proper transitions to be set on the element via CSS. To remove
-			// annoying background slide-in effect when the presentation starts, apply
-			// these properties after short time delay
-			setTimeout( function() {
-				dom.wrapper.classList.add( 'has-parallax-background' );
-			}, 1 );
-
-		}
-		else {
-
-			dom.background.style.backgroundImage = '';
-			dom.wrapper.classList.remove( 'has-parallax-background' );
-
-		}
-
-	}
-
-	/**
-	 * Applies the configuration settings from the config
-	 * object. May be called multiple times.
-	 */
-	function configure( options ) {
-
-		var numberOfSlides = document.querySelectorAll( SLIDES_SELECTOR ).length;
-
-		dom.wrapper.classList.remove( config.transition );
-
-		// New config options may be passed when this method
-		// is invoked through the API after initialization
-		if( typeof options === 'object' ) extend( config, options );
-
-		// Force linear transition based on browser capabilities
-		if( features.transforms3d === false ) config.transition = 'linear';
-
-		dom.wrapper.classList.add( config.transition );
-
-		dom.wrapper.setAttribute( 'data-transition-speed', config.transitionSpeed );
-		dom.wrapper.setAttribute( 'data-background-transition', config.backgroundTransition );
-
-		dom.controls.style.display = config.controls ? 'block' : 'none';
-		dom.progress.style.display = config.progress ? 'block' : 'none';
-
-		if( config.rtl ) {
-			dom.wrapper.classList.add( 'rtl' );
-		}
-		else {
-			dom.wrapper.classList.remove( 'rtl' );
-		}
-
-		if( config.center ) {
-			dom.wrapper.classList.add( 'center' );
-		}
-		else {
-			dom.wrapper.classList.remove( 'center' );
-		}
-
-		if( config.mouseWheel ) {
-			document.addEventListener( 'DOMMouseScroll', onDocumentMouseScroll, false ); // FF
-			document.addEventListener( 'mousewheel', onDocumentMouseScroll, false );
-		}
-		else {
-			document.removeEventListener( 'DOMMouseScroll', onDocumentMouseScroll, false ); // FF
-			document.removeEventListener( 'mousewheel', onDocumentMouseScroll, false );
-		}
-
-		// Rolling 3D links
-		if( config.rollingLinks ) {
-			enableRollingLinks();
-		}
-		else {
-			disableRollingLinks();
-		}
-
-		// Iframe link previews
-		if( config.previewLinks ) {
-			enablePreviewLinks();
-		}
-		else {
-			disablePreviewLinks();
-			enablePreviewLinks( '[data-preview-link]' );
-		}
-
-		// Auto-slide playback controls
-		if( numberOfSlides > 1 && config.autoSlide && config.autoSlideStoppable && features.canvas && features.requestAnimationFrame ) {
-			autoSlidePlayer = new Playback( dom.wrapper, function() {
-				return Math.min( Math.max( ( Date.now() - autoSlideStartTime ) / autoSlide, 0 ), 1 );
-			} );
-
-			autoSlidePlayer.on( 'click', onAutoSlidePlayerClick );
-			autoSlidePaused = false;
-		}
-		else if( autoSlidePlayer ) {
-			autoSlidePlayer.destroy();
-			autoSlidePlayer = null;
-		}
-
-		// Load the theme in the config, if it's not already loaded
-		if( config.theme && dom.theme ) {
-			var themeURL = dom.theme.getAttribute( 'href' );
-			var themeFinder = /[^\/]*?(?=\.css)/;
-			var themeName = themeURL.match(themeFinder)[0];
-
-			if(  config.theme !== themeName ) {
-				themeURL = themeURL.replace(themeFinder, config.theme);
-				dom.theme.setAttribute( 'href', themeURL );
-			}
-		}
-
-		sync();
-
-	}
-
-	/**
-	 * Binds all event listeners.
-	 */
-	function addEventListeners() {
-
-		eventsAreBound = true;
-
-		window.addEventListener( 'hashchange', onWindowHashChange, false );
-		window.addEventListener( 'resize', onWindowResize, false );
-
-		if( config.touch ) {
-			dom.wrapper.addEventListener( 'touchstart', onTouchStart, false );
-			dom.wrapper.addEventListener( 'touchmove', onTouchMove, false );
-			dom.wrapper.addEventListener( 'touchend', onTouchEnd, false );
-
-			// Support pointer-style touch interaction as well
-			if( window.navigator.msPointerEnabled ) {
-				dom.wrapper.addEventListener( 'MSPointerDown', onPointerDown, false );
-				dom.wrapper.addEventListener( 'MSPointerMove', onPointerMove, false );
-				dom.wrapper.addEventListener( 'MSPointerUp', onPointerUp, false );
-			}
-		}
-
-		if( config.keyboard ) {
-			document.addEventListener( 'keydown', onDocumentKeyDown, false );
-		}
-
-		if( config.progress && dom.progress ) {
-			dom.progress.addEventListener( 'click', onProgressClicked, false );
-		}
-
-		if( config.focusBodyOnPageVisiblityChange ) {
-			var visibilityChange;
-
-			if( 'hidden' in document ) {
-				visibilityChange = 'visibilitychange';
-			}
-			else if( 'msHidden' in document ) {
-				visibilityChange = 'msvisibilitychange';
-			}
-			else if( 'webkitHidden' in document ) {
-				visibilityChange = 'webkitvisibilitychange';
-			}
-
-			if( visibilityChange ) {
-				document.addEventListener( visibilityChange, onPageVisibilityChange, false );
-			}
-		}
-
-		[ 'touchstart', 'click' ].forEach( function( eventName ) {
-			dom.controlsLeft.forEach( function( el ) { el.addEventListener( eventName, onNavigateLeftClicked, false ); } );
-			dom.controlsRight.forEach( function( el ) { el.addEventListener( eventName, onNavigateRightClicked, false ); } );
-			dom.controlsUp.forEach( function( el ) { el.addEventListener( eventName, onNavigateUpClicked, false ); } );
-			dom.controlsDown.forEach( function( el ) { el.addEventListener( eventName, onNavigateDownClicked, false ); } );
-			dom.controlsPrev.forEach( function( el ) { el.addEventListener( eventName, onNavigatePrevClicked, false ); } );
-			dom.controlsNext.forEach( function( el ) { el.addEventListener( eventName, onNavigateNextClicked, false ); } );
-		} );
-
-	}
-
-	/**
-	 * Unbinds all event listeners.
-	 */
-	function removeEventListeners() {
-
-		eventsAreBound = false;
-
-		document.removeEventListener( 'keydown', onDocumentKeyDown, false );
-		window.removeEventListener( 'hashchange', onWindowHashChange, false );
-		window.removeEventListener( 'resize', onWindowResize, false );
-
-		dom.wrapper.removeEventListener( 'touchstart', onTouchStart, false );
-		dom.wrapper.removeEventListener( 'touchmove', onTouchMove, false );
-		dom.wrapper.removeEventListener( 'touchend', onTouchEnd, false );
-
-		if( window.navigator.msPointerEnabled ) {
-			dom.wrapper.removeEventListener( 'MSPointerDown', onPointerDown, false );
-			dom.wrapper.removeEventListener( 'MSPointerMove', onPointerMove, false );
-			dom.wrapper.removeEventListener( 'MSPointerUp', onPointerUp, false );
-		}
-
-		if ( config.progress && dom.progress ) {
-			dom.progress.removeEventListener( 'click', onProgressClicked, false );
-		}
-
-		[ 'touchstart', 'click' ].forEach( function( eventName ) {
-			dom.controlsLeft.forEach( function( el ) { el.removeEventListener( eventName, onNavigateLeftClicked, false ); } );
-			dom.controlsRight.forEach( function( el ) { el.removeEventListener( eventName, onNavigateRightClicked, false ); } );
-			dom.controlsUp.forEach( function( el ) { el.removeEventListener( eventName, onNavigateUpClicked, false ); } );
-			dom.controlsDown.forEach( function( el ) { el.removeEventListener( eventName, onNavigateDownClicked, false ); } );
-			dom.controlsPrev.forEach( function( el ) { el.removeEventListener( eventName, onNavigatePrevClicked, false ); } );
-			dom.controlsNext.forEach( function( el ) { el.removeEventListener( eventName, onNavigateNextClicked, false ); } );
-		} );
-
-	}
-
-	/**
-	 * Extend object a with the properties of object b.
-	 * If there's a conflict, object b takes precedence.
-	 */
-	function extend( a, b ) {
-
-		for( var i in b ) {
-			a[ i ] = b[ i ];
-		}
-
-	}
-
-	/**
-	 * Converts the target object to an array.
-	 */
-	function toArray( o ) {
-
-		return Array.prototype.slice.call( o );
-
-	}
-
-	/**
-	 * Measures the distance in pixels between point a
-	 * and point b.
-	 *
-	 * @param {Object} a point with x/y properties
-	 * @param {Object} b point with x/y properties
-	 */
-	function distanceBetween( a, b ) {
-
-		var dx = a.x - b.x,
-			dy = a.y - b.y;
-
-		return Math.sqrt( dx*dx + dy*dy );
-
-	}
-
-	/**
-	 * Applies a CSS transform to the target element.
-	 */
-	function transformElement( element, transform ) {
-
-		element.style.WebkitTransform = transform;
-		element.style.MozTransform = transform;
-		element.style.msTransform = transform;
-		element.style.OTransform = transform;
-		element.style.transform = transform;
-
-	}
-
-	/**
-	 * Retrieves the height of the given element by looking
-	 * at the position and height of its immediate children.
-	 */
-	function getAbsoluteHeight( element ) {
-
-		var height = 0;
-
-		if( element ) {
-			var absoluteChildren = 0;
-
-			toArray( element.childNodes ).forEach( function( child ) {
-
-				if( typeof child.offsetTop === 'number' && child.style ) {
-					// Count # of abs children
-					if( child.style.position === 'absolute' ) {
-						absoluteChildren += 1;
-					}
-
-					height = Math.max( height, child.offsetTop + child.offsetHeight );
-				}
-
-			} );
-
-			// If there are no absolute children, use offsetHeight
-			if( absoluteChildren === 0 ) {
-				height = element.offsetHeight;
-			}
-
-		}
-
-		return height;
-
-	}
-
-	/**
-	 * Returns the remaining height within the parent of the
-	 * target element after subtracting the height of all
-	 * siblings.
-	 *
-	 * remaining height = [parent height] - [ siblings height]
-	 */
-	function getRemainingHeight( element, height ) {
-
-		height = height || 0;
-
-		if( element ) {
-			var parent = element.parentNode;
-			var siblings = parent.childNodes;
-
-			// Subtract the height of each sibling
-			toArray( siblings ).forEach( function( sibling ) {
-
-				if( typeof sibling.offsetHeight === 'number' && sibling !== element ) {
-
-					var styles = window.getComputedStyle( sibling ),
-						marginTop = parseInt( styles.marginTop, 10 ),
-						marginBottom = parseInt( styles.marginBottom, 10 );
-
-					height -= sibling.offsetHeight + marginTop + marginBottom;
-
-				}
-
-			} );
-
-			var elementStyles = window.getComputedStyle( element );
-
-			// Subtract the margins of the target element
-			height -= parseInt( elementStyles.marginTop, 10 ) +
-						parseInt( elementStyles.marginBottom, 10 );
-
-		}
-
-		return height;
-
-	}
-
-	/**
-	 * Checks if this instance is being used to print a PDF.
-	 */
-	function isPrintingPDF() {
-
-		return ( /print-pdf/gi ).test( window.location.search );
-
-	}
-
-	/**
-	 * Hides the address bar if we're on a mobile device.
-	 */
-	function hideAddressBar() {
-
-		if( config.hideAddressBar && isMobileDevice ) {
-			// Events that should trigger the address bar to hide
-			window.addEventListener( 'load', removeAddressBar, false );
-			window.addEventListener( 'orientationchange', removeAddressBar, false );
-		}
-
-	}
-
-	/**
-	 * Causes the address bar to hide on mobile devices,
-	 * more vertical space ftw.
-	 */
-	function removeAddressBar() {
-
-		setTimeout( function() {
-			window.scrollTo( 0, 1 );
-		}, 10 );
-
-	}
-
-	/**
-	 * Dispatches an event of the specified type from the
-	 * reveal DOM element.
-	 */
-	function dispatchEvent( type, properties ) {
-
-		var event = document.createEvent( "HTMLEvents", 1, 2 );
-		event.initEvent( type, true, true );
-		extend( event, properties );
-		dom.wrapper.dispatchEvent( event );
-
-	}
-
-	/**
-	 * Wrap all links in 3D goodness.
-	 */
-	function enableRollingLinks() {
-
-		if( features.transforms3d && !( 'msPerspective' in document.body.style ) ) {
-			var anchors = document.querySelectorAll( SLIDES_SELECTOR + ' a:not(.image)' );
-
-			for( var i = 0, len = anchors.length; i < len; i++ ) {
-				var anchor = anchors[i];
-
-				if( anchor.textContent && !anchor.querySelector( '*' ) && ( !anchor.className || !anchor.classList.contains( anchor, 'roll' ) ) ) {
-					var span = document.createElement('span');
-					span.setAttribute('data-title', anchor.text);
-					span.innerHTML = anchor.innerHTML;
-
-					anchor.classList.add( 'roll' );
-					anchor.innerHTML = '';
-					anchor.appendChild(span);
-				}
-			}
-		}
-
-	}
-
-	/**
-	 * Unwrap all 3D links.
-	 */
-	function disableRollingLinks() {
-
-		var anchors = document.querySelectorAll( SLIDES_SELECTOR + ' a.roll' );
-
-		for( var i = 0, len = anchors.length; i < len; i++ ) {
-			var anchor = anchors[i];
-			var span = anchor.querySelector( 'span' );
-
-			if( span ) {
-				anchor.classList.remove( 'roll' );
-				anchor.innerHTML = span.innerHTML;
-			}
-		}
-
-	}
-
-	/**
-	 * Bind preview frame links.
-	 */
-	function enablePreviewLinks( selector ) {
-
-		var anchors = toArray( document.querySelectorAll( selector ? selector : 'a' ) );
-
-		anchors.forEach( function( element ) {
-			if( /^(http|www)/gi.test( element.getAttribute( 'href' ) ) ) {
-				element.addEventListener( 'click', onPreviewLinkClicked, false );
-			}
-		} );
-
-	}
-
-	/**
-	 * Unbind preview frame links.
-	 */
-	function disablePreviewLinks() {
-
-		var anchors = toArray( document.querySelectorAll( 'a' ) );
-
-		anchors.forEach( function( element ) {
-			if( /^(http|www)/gi.test( element.getAttribute( 'href' ) ) ) {
-				element.removeEventListener( 'click', onPreviewLinkClicked, false );
-			}
-		} );
-
-	}
-
-	/**
-	 * Opens a preview window for the target URL.
-	 */
-	function openPreview( url ) {
-
-		closePreview();
-
-		dom.preview = document.createElement( 'div' );
-		dom.preview.classList.add( 'preview-link-overlay' );
-		dom.wrapper.appendChild( dom.preview );
-
-		dom.preview.innerHTML = [
-			'<header>',
-				'<a class="close" href="#"><span class="icon"></span></a>',
-				'<a class="external" href="'+ url +'" target="_blank"><span class="icon"></span></a>',
-			'</header>',
-			'<div class="spinner"></div>',
-			'<div class="viewport">',
-				'<iframe src="'+ url +'"></iframe>',
-			'</div>'
-		].join('');
-
-		dom.preview.querySelector( 'iframe' ).addEventListener( 'load', function( event ) {
-			dom.preview.classList.add( 'loaded' );
-		}, false );
-
-		dom.preview.querySelector( '.close' ).addEventListener( 'click', function( event ) {
-			closePreview();
-			event.preventDefault();
-		}, false );
-
-		dom.preview.querySelector( '.external' ).addEventListener( 'click', function( event ) {
-			closePreview();
-		}, false );
-
-		setTimeout( function() {
-			dom.preview.classList.add( 'visible' );
-		}, 1 );
-
-	}
-
-	/**
-	 * Closes the iframe preview window.
-	 */
-	function closePreview() {
-
-		if( dom.preview ) {
-			dom.preview.setAttribute( 'src', '' );
-			dom.preview.parentNode.removeChild( dom.preview );
-			dom.preview = null;
-		}
-
-	}
-
-	/**
-	 * Applies JavaScript-controlled layout rules to the
-	 * presentation.
-	 */
-	function layout() {
-
-		if( dom.wrapper && !isPrintingPDF() ) {
-
-			// Available space to scale within
-			var availableWidth = dom.wrapper.offsetWidth,
-				availableHeight = dom.wrapper.offsetHeight;
-
-			// Reduce available space by margin
-			availableWidth -= ( availableHeight * config.margin );
-			availableHeight -= ( availableHeight * config.margin );
-
-			// Dimensions of the content
-			var slideWidth = config.width,
-				slideHeight = config.height,
-				slidePadding = 20; // TODO Dig this out of DOM
-
-			// Layout the contents of the slides
-			layoutSlideContents( config.width, config.height, slidePadding );
-
-			// Slide width may be a percentage of available width
-			if( typeof slideWidth === 'string' && /%$/.test( slideWidth ) ) {
-				slideWidth = parseInt( slideWidth, 10 ) / 100 * availableWidth;
-			}
-
-			// Slide height may be a percentage of available height
-			if( typeof slideHeight === 'string' && /%$/.test( slideHeight ) ) {
-				slideHeight = parseInt( slideHeight, 10 ) / 100 * availableHeight;
-			}
-
-			dom.slides.style.width = slideWidth + 'px';
-			dom.slides.style.height = slideHeight + 'px';
-
-			// Determine scale of content to fit within available space
-			scale = Math.min( availableWidth / slideWidth, availableHeight / slideHeight );
-
-			// Respect max/min scale settings
-			scale = Math.max( scale, config.minScale );
-			scale = Math.min( scale, config.maxScale );
-
-			// Prefer applying scale via zoom since Chrome blurs scaled content
-			// with nested transforms
-			if( typeof dom.slides.style.zoom !== 'undefined' && !navigator.userAgent.match( /(iphone|ipod|ipad|android)/gi ) ) {
-				dom.slides.style.zoom = scale;
-			}
-			// Apply scale transform as a fallback
-			else {
-				transformElement( dom.slides, 'translate(-50%, -50%) scale('+ scale +') translate(50%, 50%)' );
-			}
-
-			// Select all slides, vertical and horizontal
-			var slides = toArray( document.querySelectorAll( SLIDES_SELECTOR ) );
-
-			for( var i = 0, len = slides.length; i < len; i++ ) {
-				var slide = slides[ i ];
-
-				// Don't bother updating invisible slides
-				if( slide.style.display === 'none' ) {
-					continue;
-				}
-
-				if( config.center || slide.classList.contains( 'center' ) ) {
-					// Vertical stacks are not centred since their section
-					// children will be
-					if( slide.classList.contains( 'stack' ) ) {
-						slide.style.top = 0;
-					}
-					else {
-						slide.style.top = Math.max( - ( getAbsoluteHeight( slide ) / 2 ) - slidePadding, -slideHeight / 2 ) + 'px';
-					}
-				}
-				else {
-					slide.style.top = '';
-				}
-
-			}
-
-			updateProgress();
-			updateParallax();
-
-		}
-
-	}
-
-	/**
-	 * Applies layout logic to the contents of all slides in
-	 * the presentation.
-	 */
-	function layoutSlideContents( width, height, padding ) {
-
-		// Handle sizing of elements with the 'stretch' class
-		toArray( dom.slides.querySelectorAll( 'section > .stretch' ) ).forEach( function( element ) {
-
-			// Determine how much vertical space we can use
-			var remainingHeight = getRemainingHeight( element, ( height - ( padding * 2 ) ) );
-
-			// Consider the aspect ratio of media elements
-			if( /(img|video)/gi.test( element.nodeName ) ) {
-				var nw = element.naturalWidth || element.videoWidth,
-					nh = element.naturalHeight || element.videoHeight;
-
-				var es = Math.min( width / nw, remainingHeight / nh );
-
-				element.style.width = ( nw * es ) + 'px';
-				element.style.height = ( nh * es ) + 'px';
-
-			}
-			else {
-				element.style.width = width + 'px';
-				element.style.height = remainingHeight + 'px';
-			}
-
-		} );
-
-	}
-
-	/**
-	 * Stores the vertical index of a stack so that the same
-	 * vertical slide can be selected when navigating to and
-	 * from the stack.
-	 *
-	 * @param {HTMLElement} stack The vertical stack element
-	 * @param {int} v Index to memorize
-	 */
-	function setPreviousVerticalIndex( stack, v ) {
-
-		if( typeof stack === 'object' && typeof stack.setAttribute === 'function' ) {
-			stack.setAttribute( 'data-previous-indexv', v || 0 );
-		}
-
-	}
-
-	/**
-	 * Retrieves the vertical index which was stored using
-	 * #setPreviousVerticalIndex() or 0 if no previous index
-	 * exists.
-	 *
-	 * @param {HTMLElement} stack The vertical stack element
-	 */
-	function getPreviousVerticalIndex( stack ) {
-
-		if( typeof stack === 'object' && typeof stack.setAttribute === 'function' && stack.classList.contains( 'stack' ) ) {
-			// Prefer manually defined start-indexv
-			var attributeName = stack.hasAttribute( 'data-start-indexv' ) ? 'data-start-indexv' : 'data-previous-indexv';
-
-			return parseInt( stack.getAttribute( attributeName ) || 0, 10 );
-		}
-
-		return 0;
-
-	}
-
-	/**
-	 * Displays the overview of slides (quick nav) by
-	 * scaling down and arranging all slide elements.
-	 *
-	 * Experimental feature, might be dropped if perf
-	 * can't be improved.
-	 */
-	function activateOverview() {
-
-		// Only proceed if enabled in config
-		if( config.overview ) {
-
-			// Don't auto-slide while in overview mode
-			cancelAutoSlide();
-
-			var wasActive = dom.wrapper.classList.contains( 'overview' );
-
-			// Vary the depth of the overview based on screen size
-			var depth = window.innerWidth < 400 ? 1000 : 2500;
-
-			dom.wrapper.classList.add( 'overview' );
-			dom.wrapper.classList.remove( 'overview-deactivating' );
-
-			clearTimeout( activateOverviewTimeout );
-			clearTimeout( deactivateOverviewTimeout );
-
-			// Not the pretties solution, but need to let the overview
-			// class apply first so that slides are measured accurately
-			// before we can position them
-			activateOverviewTimeout = setTimeout( function() {
-
-				var horizontalSlides = document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR );
-
-				for( var i = 0, len1 = horizontalSlides.length; i < len1; i++ ) {
-					var hslide = horizontalSlides[i],
-						hoffset = config.rtl ? -105 : 105;
-
-					hslide.setAttribute( 'data-index-h', i );
-
-					// Apply CSS transform
-					transformElement( hslide, 'translateZ(-'+ depth +'px) translate(' + ( ( i - indexh ) * hoffset ) + '%, 0%)' );
-
-					if( hslide.classList.contains( 'stack' ) ) {
-
-						var verticalSlides = hslide.querySelectorAll( 'section' );
-
-						for( var j = 0, len2 = verticalSlides.length; j < len2; j++ ) {
-							var verticalIndex = i === indexh ? indexv : getPreviousVerticalIndex( hslide );
-
-							var vslide = verticalSlides[j];
-
-							vslide.setAttribute( 'data-index-h', i );
-							vslide.setAttribute( 'data-index-v', j );
-
-							// Apply CSS transform
-							transformElement( vslide, 'translate(0%, ' + ( ( j - verticalIndex ) * 105 ) + '%)' );
-
-							// Navigate to this slide on click
-							vslide.addEventListener( 'click', onOverviewSlideClicked, true );
-						}
-
-					}
-					else {
-
-						// Navigate to this slide on click
-						hslide.addEventListener( 'click', onOverviewSlideClicked, true );
-
-					}
-				}
-
-				updateSlidesVisibility();
-
-				layout();
-
-				if( !wasActive ) {
-					// Notify observers of the overview showing
-					dispatchEvent( 'overviewshown', {
-						'indexh': indexh,
-						'indexv': indexv,
-						'currentSlide': currentSlide
-					} );
-				}
-
-			}, 10 );
-
-		}
-
-	}
-
-	/**
-	 * Exits the slide overview and enters the currently
-	 * active slide.
-	 */
-	function deactivateOverview() {
-
-		// Only proceed if enabled in config
-		if( config.overview ) {
-
-			clearTimeout( activateOverviewTimeout );
-			clearTimeout( deactivateOverviewTimeout );
-
-			dom.wrapper.classList.remove( 'overview' );
-
-			// Temporarily add a class so that transitions can do different things
-			// depending on whether they are exiting/entering overview, or just
-			// moving from slide to slide
-			dom.wrapper.classList.add( 'overview-deactivating' );
-
-			deactivateOverviewTimeout = setTimeout( function () {
-				dom.wrapper.classList.remove( 'overview-deactivating' );
-			}, 1 );
-
-			// Select all slides
-			toArray( document.querySelectorAll( SLIDES_SELECTOR ) ).forEach( function( slide ) {
-				// Resets all transforms to use the external styles
-				transformElement( slide, '' );
-
-				slide.removeEventListener( 'click', onOverviewSlideClicked, true );
-			} );
-
-			slide( indexh, indexv );
-
-			cueAutoSlide();
-
-			// Notify observers of the overview hiding
-			dispatchEvent( 'overviewhidden', {
-				'indexh': indexh,
-				'indexv': indexv,
-				'currentSlide': currentSlide
-			} );
-
-		}
-	}
-
-	/**
-	 * Toggles the slide overview mode on and off.
-	 *
-	 * @param {Boolean} override Optional flag which overrides the
-	 * toggle logic and forcibly sets the desired state. True means
-	 * overview is open, false means it's closed.
-	 */
-	function toggleOverview( override ) {
-
-		if( typeof override === 'boolean' ) {
-			override ? activateOverview() : deactivateOverview();
-		}
-		else {
-			isOverview() ? deactivateOverview() : activateOverview();
-		}
-
-	}
-
-	/**
-	 * Checks if the overview is currently active.
-	 *
-	 * @return {Boolean} true if the overview is active,
-	 * false otherwise
-	 */
-	function isOverview() {
-
-		return dom.wrapper.classList.contains( 'overview' );
-
-	}
-
-	/**
-	 * Checks if the current or specified slide is vertical
-	 * (nested within another slide).
-	 *
-	 * @param {HTMLElement} slide [optional] The slide to check
-	 * orientation of
-	 */
-	function isVerticalSlide( slide ) {
-
-		// Prefer slide argument, otherwise use current slide
-		slide = slide ? slide : currentSlide;
-
-		return slide && slide.parentNode && !!slide.parentNode.nodeName.match( /section/i );
-
-	}
-
-	/**
-	 * Handling the fullscreen functionality via the fullscreen API
-	 *
-	 * @see http://fullscreen.spec.whatwg.org/
-	 * @see https://developer.mozilla.org/en-US/docs/DOM/Using_fullscreen_mode
-	 */
-	function enterFullscreen() {
-
-		var element = document.body;
-
-		// Check which implementation is available
-		var requestMethod = element.requestFullScreen ||
-							element.webkitRequestFullscreen ||
-							element.webkitRequestFullScreen ||
-							element.mozRequestFullScreen ||
-							element.msRequestFullScreen;
-
-		if( requestMethod ) {
-			requestMethod.apply( element );
-		}
-
-	}
-
-	/**
-	 * Enters the paused mode which fades everything on screen to
-	 * black.
-	 */
-	function pause() {
-
-		var wasPaused = dom.wrapper.classList.contains( 'paused' );
-
-		cancelAutoSlide();
-		dom.wrapper.classList.add( 'paused' );
-
-		if( wasPaused === false ) {
-			dispatchEvent( 'paused' );
-		}
-
-	}
-
-	/**
-	 * Exits from the paused mode.
-	 */
-	function resume() {
-
-		var wasPaused = dom.wrapper.classList.contains( 'paused' );
-		dom.wrapper.classList.remove( 'paused' );
-
-		cueAutoSlide();
-
-		if( wasPaused ) {
-			dispatchEvent( 'resumed' );
-		}
-
-	}
-
-	/**
-	 * Toggles the paused mode on and off.
-	 */
-	function togglePause() {
-
-		if( isPaused() ) {
-			resume();
-		}
-		else {
-			pause();
-		}
-
-	}
-
-	/**
-	 * Checks if we are currently in the paused mode.
-	 */
-	function isPaused() {
-
-		return dom.wrapper.classList.contains( 'paused' );
-
-	}
-
-	/**
-	 * Steps from the current point in the presentation to the
-	 * slide which matches the specified horizontal and vertical
-	 * indices.
-	 *
-	 * @param {int} h Horizontal index of the target slide
-	 * @param {int} v Vertical index of the target slide
-	 * @param {int} f Optional index of a fragment within the
-	 * target slide to activate
-	 * @param {int} o Optional origin for use in multimaster environments
-	 */
-	function slide( h, v, f, o ) {
-
-		// Remember where we were at before
-		previousSlide = currentSlide;
-
-		// Query all horizontal slides in the deck
-		var horizontalSlides = document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR );
-
-		// If no vertical index is specified and the upcoming slide is a
-		// stack, resume at its previous vertical index
-		if( v === undefined ) {
-			v = getPreviousVerticalIndex( horizontalSlides[ h ] );
-		}
-
-		// If we were on a vertical stack, remember what vertical index
-		// it was on so we can resume at the same position when returning
-		if( previousSlide && previousSlide.parentNode && previousSlide.parentNode.classList.contains( 'stack' ) ) {
-			setPreviousVerticalIndex( previousSlide.parentNode, indexv );
-		}
-
-		// Remember the state before this slide
-		var stateBefore = state.concat();
-
-		// Reset the state array
-		state.length = 0;
-
-		var indexhBefore = indexh || 0,
-			indexvBefore = indexv || 0;
-
-		// Activate and transition to the new slide
-		indexh = updateSlides( HORIZONTAL_SLIDES_SELECTOR, h === undefined ? indexh : h );
-		indexv = updateSlides( VERTICAL_SLIDES_SELECTOR, v === undefined ? indexv : v );
-
-		// Update the visibility of slides now that the indices have changed
-		updateSlidesVisibility();
-
-		layout();
-
-		// Apply the new state
-		stateLoop: for( var i = 0, len = state.length; i < len; i++ ) {
-			// Check if this state existed on the previous slide. If it
-			// did, we will avoid adding it repeatedly
-			for( var j = 0; j < stateBefore.length; j++ ) {
-				if( stateBefore[j] === state[i] ) {
-					stateBefore.splice( j, 1 );
-					continue stateLoop;
-				}
-			}
-
-			document.documentElement.classList.add( state[i] );
-
-			// Dispatch custom event matching the state's name
-			dispatchEvent( state[i] );
-		}
-
-		// Clean up the remains of the previous state
-		while( stateBefore.length ) {
-			document.documentElement.classList.remove( stateBefore.pop() );
-		}
-
-		// If the overview is active, re-activate it to update positions
-		if( isOverview() ) {
-			activateOverview();
-		}
-
-		// Find the current horizontal slide and any possible vertical slides
-		// within it
-		var currentHorizontalSlide = horizontalSlides[ indexh ],
-			currentVerticalSlides = currentHorizontalSlide.querySelectorAll( 'section' );
-
-		// Store references to the previous and current slides
-		currentSlide = currentVerticalSlides[ indexv ] || currentHorizontalSlide;
-
-		// Show fragment, if specified
-		if( typeof f !== 'undefined' ) {
-			navigateFragment( f );
-		}
-
-		// Dispatch an event if the slide changed
-		var slideChanged = ( indexh !== indexhBefore || indexv !== indexvBefore );
-		if( slideChanged ) {
-			dispatchEvent( 'slidechanged', {
-				'indexh': indexh,
-				'indexv': indexv,
-				'previousSlide': previousSlide,
-				'currentSlide': currentSlide,
-				'origin': o
-			} );
-		}
-		else {
-			// Ensure that the previous slide is never the same as the current
-			previousSlide = null;
-		}
-
-		// Solves an edge case where the previous slide maintains the
-		// 'present' class when navigating between adjacent vertical
-		// stacks
-		if( previousSlide ) {
-			previousSlide.classList.remove( 'present' );
-
-			// Reset all slides upon navigate to home
-			// Issue: #285
-			if ( document.querySelector( HOME_SLIDE_SELECTOR ).classList.contains( 'present' ) ) {
-				// Launch async task
-				setTimeout( function () {
-					var slides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR + '.stack') ), i;
-					for( i in slides ) {
-						if( slides[i] ) {
-							// Reset stack
-							setPreviousVerticalIndex( slides[i], 0 );
-						}
-					}
-				}, 0 );
-			}
-		}
-
-		// Handle embedded content
-		if( slideChanged ) {
-			stopEmbeddedContent( previousSlide );
-			startEmbeddedContent( currentSlide );
-		}
-
-		updateControls();
-		updateProgress();
-		updateBackground();
-		updateParallax();
-		updateSlideNumber();
-
-		// Update the URL hash
-		writeURL();
-
-		cueAutoSlide();
-
-	}
-
-	/**
-	 * Syncs the presentation with the current DOM. Useful
-	 * when new slides or control elements are added or when
-	 * the configuration has changed.
-	 */
-	function sync() {
-
-		// Subscribe to input
-		removeEventListeners();
-		addEventListeners();
-
-		// Force a layout to make sure the current config is accounted for
-		layout();
-
-		// Reflect the current autoSlide value
-		autoSlide = config.autoSlide;
-
-		// Start auto-sliding if it's enabled
-		cueAutoSlide();
-
-		// Re-create the slide backgrounds
-		createBackgrounds();
-
-		sortAllFragments();
-
-		updateControls();
-		updateProgress();
-		updateBackground( true );
-		updateSlideNumber();
-
-	}
-
-	/**
-	 * Resets all vertical slides so that only the first
-	 * is visible.
-	 */
-	function resetVerticalSlides() {
-
-		var horizontalSlides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) );
-		horizontalSlides.forEach( function( horizontalSlide ) {
-
-			var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) );
-			verticalSlides.forEach( function( verticalSlide, y ) {
-
-				if( y > 0 ) {
-					verticalSlide.classList.remove( 'present' );
-					verticalSlide.classList.remove( 'past' );
-					verticalSlide.classList.add( 'future' );
-				}
-
-			} );
-
-		} );
-
-	}
-
-	/**
-	 * Sorts and formats all of fragments in the
-	 * presentation.
-	 */
-	function sortAllFragments() {
-
-		var horizontalSlides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) );
-		horizontalSlides.forEach( function( horizontalSlide ) {
-
-			var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) );
-			verticalSlides.forEach( function( verticalSlide, y ) {
-
-				sortFragments( verticalSlide.querySelectorAll( '.fragment' ) );
-
-			} );
-
-			if( verticalSlides.length === 0 ) sortFragments( horizontalSlide.querySelectorAll( '.fragment' ) );
-
-		} );
-
-	}
-
-	/**
-	 * Updates one dimension of slides by showing the slide
-	 * with the specified index.
-	 *
-	 * @param {String} selector A CSS selector that will fetch
-	 * the group of slides we are working with
-	 * @param {Number} index The index of the slide that should be
-	 * shown
-	 *
-	 * @return {Number} The index of the slide that is now shown,
-	 * might differ from the passed in index if it was out of
-	 * bounds.
-	 */
-	function updateSlides( selector, index ) {
-
-		// Select all slides and convert the NodeList result to
-		// an array
-		var slides = toArray( document.querySelectorAll( selector ) ),
-			slidesLength = slides.length;
-
-		if( slidesLength ) {
-
-			// Should the index loop?
-			if( config.loop ) {
-				index %= slidesLength;
-
-				if( index < 0 ) {
-					index = slidesLength + index;
-				}
-			}
-
-			// Enforce max and minimum index bounds
-			index = Math.max( Math.min( index, slidesLength - 1 ), 0 );
-
-			for( var i = 0; i < slidesLength; i++ ) {
-				var element = slides[i];
-
-				var reverse = config.rtl && !isVerticalSlide( element );
-
-				element.classList.remove( 'past' );
-				element.classList.remove( 'present' );
-				element.classList.remove( 'future' );
-
-				// http://www.w3.org/html/wg/drafts/html/master/editing.html#the-hidden-attribute
-				element.setAttribute( 'hidden', '' );
-
-				if( i < index ) {
-					// Any element previous to index is given the 'past' class
-					element.classList.add( reverse ? 'future' : 'past' );
-
-					var pastFragments = toArray( element.querySelectorAll( '.fragment' ) );
-
-					// Show all fragments on prior slides
-					while( pastFragments.length ) {
-						var pastFragment = pastFragments.pop();
-						pastFragment.classList.add( 'visible' );
-						pastFragment.classList.remove( 'current-fragment' );
-					}
-				}
-				else if( i > index ) {
-					// Any element subsequent to index is given the 'future' class
-					element.classList.add( reverse ? 'past' : 'future' );
-
-					var futureFragments = toArray( element.querySelectorAll( '.fragment.visible' ) );
-
-					// No fragments in future slides should be visible ahead of time
-					while( futureFragments.length ) {
-						var futureFragment = futureFragments.pop();
-						futureFragment.classList.remove( 'visible' );
-						futureFragment.classList.remove( 'current-fragment' );
-					}
-				}
-
-				// If this element contains vertical slides
-				if( element.querySelector( 'section' ) ) {
-					element.classList.add( 'stack' );
-				}
-			}
-
-			// Mark the current slide as present
-			slides[index].classList.add( 'present' );
-			slides[index].removeAttribute( 'hidden' );
-
-			// If this slide has a state associated with it, add it
-			// onto the current state of the deck
-			var slideState = slides[index].getAttribute( 'data-state' );
-			if( slideState ) {
-				state = state.concat( slideState.split( ' ' ) );
-			}
-
-		}
-		else {
-			// Since there are no slides we can't be anywhere beyond the
-			// zeroth index
-			index = 0;
-		}
-
-		return index;
-
-	}
-
-	/**
-	 * Optimization method; hide all slides that are far away
-	 * from the present slide.
-	 */
-	function updateSlidesVisibility() {
-
-		// Select all slides and convert the NodeList result to
-		// an array
-		var horizontalSlides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ),
-			horizontalSlidesLength = horizontalSlides.length,
-			distanceX,
-			distanceY;
-
-		if( horizontalSlidesLength ) {
-
-			// The number of steps away from the present slide that will
-			// be visible
-			var viewDistance = isOverview() ? 10 : config.viewDistance;
-
-			// Limit view distance on weaker devices
-			if( isMobileDevice ) {
-				viewDistance = isOverview() ? 6 : 1;
-			}
-
-			for( var x = 0; x < horizontalSlidesLength; x++ ) {
-				var horizontalSlide = horizontalSlides[x];
-
-				var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) ),
-					verticalSlidesLength = verticalSlides.length;
-
-				// Loops so that it measures 1 between the first and last slides
-				distanceX = Math.abs( ( indexh - x ) % ( horizontalSlidesLength - viewDistance ) ) || 0;
-
-				// Show the horizontal slide if it's within the view distance
-				horizontalSlide.style.display = distanceX > viewDistance ? 'none' : 'block';
-
-				if( verticalSlidesLength ) {
-
-					var oy = getPreviousVerticalIndex( horizontalSlide );
-
-					for( var y = 0; y < verticalSlidesLength; y++ ) {
-						var verticalSlide = verticalSlides[y];
-
-						distanceY = x === indexh ? Math.abs( indexv - y ) : Math.abs( y - oy );
-
-						verticalSlide.style.display = ( distanceX + distanceY ) > viewDistance ? 'none' : 'block';
-					}
-
-				}
-			}
-
-		}
-
-	}
-
-	/**
-	 * Updates the progress bar to reflect the current slide.
-	 */
-	function updateProgress() {
-
-		// Update progress if enabled
-		if( config.progress && dom.progress ) {
-
-			var horizontalSlides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) );
-
-			// The number of past and total slides
-			var totalCount = document.querySelectorAll( SLIDES_SELECTOR + ':not(.stack)' ).length;
-			var pastCount = 0;
-
-			// Step through all slides and count the past ones
-			mainLoop: for( var i = 0; i < horizontalSlides.length; i++ ) {
-
-				var horizontalSlide = horizontalSlides[i];
-				var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) );
-
-				for( var j = 0; j < verticalSlides.length; j++ ) {
-
-					// Stop as soon as we arrive at the present
-					if( verticalSlides[j].classList.contains( 'present' ) ) {
-						break mainLoop;
-					}
-
-					pastCount++;
-
-				}
-
-				// Stop as soon as we arrive at the present
-				if( horizontalSlide.classList.contains( 'present' ) ) {
-					break;
-				}
-
-				// Don't count the wrapping section for vertical slides
-				if( horizontalSlide.classList.contains( 'stack' ) === false ) {
-					pastCount++;
-				}
-
-			}
-
-			dom.progressbar.style.width = ( pastCount / ( totalCount - 1 ) ) * window.innerWidth + 'px';
-
-		}
-
-	}
-
-	/**
-	 * Updates the slide number div to reflect the current slide.
-	 */
-	function updateSlideNumber() {
-
-		// Update slide number if enabled
-		if( config.slideNumber && dom.slideNumber) {
-
-			// Display the number of the page using 'indexh - indexv' format
-			var indexString = indexh;
-			if( indexv > 0 ) {
-				indexString += ' - ' + indexv;
-			}
-
-			dom.slideNumber.innerHTML = indexString;
-		}
-
-	}
-
-	/**
-	 * Updates the state of all control/navigation arrows.
-	 */
-	function updateControls() {
-
-		var routes = availableRoutes();
-		var fragments = availableFragments();
-
-		// Remove the 'enabled' class from all directions
-		dom.controlsLeft.concat( dom.controlsRight )
-						.concat( dom.controlsUp )
-						.concat( dom.controlsDown )
-						.concat( dom.controlsPrev )
-						.concat( dom.controlsNext ).forEach( function( node ) {
-			node.classList.remove( 'enabled' );
-			node.classList.remove( 'fragmented' );
-		} );
-
-		// Add the 'enabled' class to the available routes
-		if( routes.left ) dom.controlsLeft.forEach( function( el ) { el.classList.add( 'enabled' );	} );
-		if( routes.right ) dom.controlsRight.forEach( function( el ) { el.classList.add( 'enabled' ); } );
-		if( routes.up ) dom.controlsUp.forEach( function( el ) { el.classList.add( 'enabled' );	} );
-		if( routes.down ) dom.controlsDown.forEach( function( el ) { el.classList.add( 'enabled' ); } );
-
-		// Prev/next buttons
-		if( routes.left || routes.up ) dom.controlsPrev.forEach( function( el ) { el.classList.add( 'enabled' ); } );
-		if( routes.right || routes.down ) dom.controlsNext.forEach( function( el ) { el.classList.add( 'enabled' ); } );
-
-		// Highlight fragment directions
-		if( currentSlide ) {
-
-			// Always apply fragment decorator to prev/next buttons
-			if( fragments.prev ) dom.controlsPrev.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-			if( fragments.next ) dom.controlsNext.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-
-			// Apply fragment decorators to directional buttons based on
-			// what slide axis they are in
-			if( isVerticalSlide( currentSlide ) ) {
-				if( fragments.prev ) dom.controlsUp.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-				if( fragments.next ) dom.controlsDown.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-			}
-			else {
-				if( fragments.prev ) dom.controlsLeft.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-				if( fragments.next ) dom.controlsRight.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); } );
-			}
-
-		}
-
-	}
-
-	/**
-	 * Updates the background elements to reflect the current
-	 * slide.
-	 *
-	 * @param {Boolean} includeAll If true, the backgrounds of
-	 * all vertical slides (not just the present) will be updated.
-	 */
-	function updateBackground( includeAll ) {
-
-		var currentBackground = null;
-
-		// Reverse past/future classes when in RTL mode
-		var horizontalPast = config.rtl ? 'future' : 'past',
-			horizontalFuture = config.rtl ? 'past' : 'future';
-
-		// Update the classes of all backgrounds to match the
-		// states of their slides (past/present/future)
-		toArray( dom.background.childNodes ).forEach( function( backgroundh, h ) {
-
-			if( h < indexh ) {
-				backgroundh.className = 'slide-background ' + horizontalPast;
-			}
-			else if ( h > indexh ) {
-				backgroundh.className = 'slide-background ' + horizontalFuture;
-			}
-			else {
-				backgroundh.className = 'slide-background present';
-
-				// Store a reference to the current background element
-				currentBackground = backgroundh;
-			}
-
-			if( includeAll || h === indexh ) {
-				toArray( backgroundh.childNodes ).forEach( function( backgroundv, v ) {
-
-					if( v < indexv ) {
-						backgroundv.className = 'slide-background past';
-					}
-					else if ( v > indexv ) {
-						backgroundv.className = 'slide-background future';
-					}
-					else {
-						backgroundv.className = 'slide-background present';
-
-						// Only if this is the present horizontal and vertical slide
-						if( h === indexh ) currentBackground = backgroundv;
-					}
-
-				} );
-			}
-
-		} );
-
-		// Don't transition between identical backgrounds. This
-		// prevents unwanted flicker.
-		if( currentBackground ) {
-			var previousBackgroundHash = previousBackground ? previousBackground.getAttribute( 'data-background-hash' ) : null;
-			var currentBackgroundHash = currentBackground.getAttribute( 'data-background-hash' );
-			if( currentBackgroundHash && currentBackgroundHash === previousBackgroundHash && currentBackground !== previousBackground ) {
-				dom.background.classList.add( 'no-transition' );
-			}
-
-			previousBackground = currentBackground;
-		}
-
-		// Allow the first background to apply without transition
-		setTimeout( function() {
-			dom.background.classList.remove( 'no-transition' );
-		}, 1 );
-
-	}
-
-	/**
-	 * Updates the position of the parallax background based
-	 * on the current slide index.
-	 */
-	function updateParallax() {
-
-		if( config.parallaxBackgroundImage ) {
-
-			var horizontalSlides = document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ),
-				verticalSlides = document.querySelectorAll( VERTICAL_SLIDES_SELECTOR );
-
-			var backgroundSize = dom.background.style.backgroundSize.split( ' ' ),
-				backgroundWidth, backgroundHeight;
-
-			if( backgroundSize.length === 1 ) {
-				backgroundWidth = backgroundHeight = parseInt( backgroundSize[0], 10 );
-			}
-			else {
-				backgroundWidth = parseInt( backgroundSize[0], 10 );
-				backgroundHeight = parseInt( backgroundSize[1], 10 );
-			}
-
-			var slideWidth = dom.background.offsetWidth;
-			var horizontalSlideCount = horizontalSlides.length;
-			var horizontalOffset = -( backgroundWidth - slideWidth ) / ( horizontalSlideCount-1 ) * indexh;
-
-			var slideHeight = dom.background.offsetHeight;
-			var verticalSlideCount = verticalSlides.length;
-			var verticalOffset = verticalSlideCount > 0 ? -( backgroundHeight - slideHeight ) / ( verticalSlideCount-1 ) * indexv : 0;
-
-			dom.background.style.backgroundPosition = horizontalOffset + 'px ' + verticalOffset + 'px';
-
-		}
-
-	}
-
-	/**
-	 * Determine what available routes there are for navigation.
-	 *
-	 * @return {Object} containing four booleans: left/right/up/down
-	 */
-	function availableRoutes() {
-
-		var horizontalSlides = document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ),
-			verticalSlides = document.querySelectorAll( VERTICAL_SLIDES_SELECTOR );
-
-		var routes = {
-			left: indexh > 0 || config.loop,
-			right: indexh < horizontalSlides.length - 1 || config.loop,
-			up: indexv > 0,
-			down: indexv < verticalSlides.length - 1
-		};
-
-		// reverse horizontal controls for rtl
-		if( config.rtl ) {
-			var left = routes.left;
-			routes.left = routes.right;
-			routes.right = left;
-		}
-
-		return routes;
-
-	}
-
-	/**
-	 * Returns an object describing the available fragment
-	 * directions.
-	 *
-	 * @return {Object} two boolean properties: prev/next
-	 */
-	function availableFragments() {
-
-		if( currentSlide && config.fragments ) {
-			var fragments = currentSlide.querySelectorAll( '.fragment' );
-			var hiddenFragments = currentSlide.querySelectorAll( '.fragment:not(.visible)' );
-
-			return {
-				prev: fragments.length - hiddenFragments.length > 0,
-				next: !!hiddenFragments.length
-			};
-		}
-		else {
-			return { prev: false, next: false };
-		}
-
-	}
-
-	/**
-	 * Start playback of any embedded content inside of
-	 * the targeted slide.
-	 */
-	function startEmbeddedContent( slide ) {
-
-		if( slide && !isSpeakerNotes() ) {
-			// HTML5 media elements
-			toArray( slide.querySelectorAll( 'video, audio' ) ).forEach( function( el ) {
-				if( el.hasAttribute( 'data-autoplay' ) ) {
-					el.play();
-				}
-			} );
-
-			// iframe embeds
-			toArray( slide.querySelectorAll( 'iframe' ) ).forEach( function( el ) {
-				el.contentWindow.postMessage( 'slide:start', '*' );
-			});
-
-			// YouTube embeds
-			toArray( slide.querySelectorAll( 'iframe[src*="youtube.com/embed/"]' ) ).forEach( function( el ) {
-				if( el.hasAttribute( 'data-autoplay' ) ) {
-					el.contentWindow.postMessage( '{"event":"command","func":"playVideo","args":""}', '*' );
-				}
-			});
-		}
-
-	}
-
-	/**
-	 * Stop playback of any embedded content inside of
-	 * the targeted slide.
-	 */
-	function stopEmbeddedContent( slide ) {
-
-		if( slide ) {
-			// HTML5 media elements
-			toArray( slide.querySelectorAll( 'video, audio' ) ).forEach( function( el ) {
-				if( !el.hasAttribute( 'data-ignore' ) ) {
-					el.pause();
-				}
-			} );
-
-			// iframe embeds
-			toArray( slide.querySelectorAll( 'iframe' ) ).forEach( function( el ) {
-				el.contentWindow.postMessage( 'slide:stop', '*' );
-			});
-
-			// YouTube embeds
-			toArray( slide.querySelectorAll( 'iframe[src*="youtube.com/embed/"]' ) ).forEach( function( el ) {
-				if( !el.hasAttribute( 'data-ignore' ) && typeof el.contentWindow.postMessage === 'function' ) {
-					el.contentWindow.postMessage( '{"event":"command","func":"pauseVideo","args":""}', '*' );
-				}
-			});
-		}
-
-	}
-
-	/**
-	 * Checks if this presentation is running inside of the
-	 * speaker notes window.
-	 */
-	function isSpeakerNotes() {
-
-		return !!window.location.search.match( /receiver/gi );
-
-	}
-
-	/**
-	 * Reads the current URL (hash) and navigates accordingly.
-	 */
-	function readURL() {
-
-		var hash = window.location.hash;
-
-		// Attempt to parse the hash as either an index or name
-		var bits = hash.slice( 2 ).split( '/' ),
-			name = hash.replace( /#|\//gi, '' );
-
-		// If the first bit is invalid and there is a name we can
-		// assume that this is a named link
-		if( isNaN( parseInt( bits[0], 10 ) ) && name.length ) {
-			// Find the slide with the specified name
-			var element = document.querySelector( '#' + name );
-
-			if( element ) {
-				// Find the position of the named slide and navigate to it
-				var indices = Reveal.getIndices( element );
-				slide( indices.h, indices.v );
-			}
-			// If the slide doesn't exist, navigate to the current slide
-			else {
-				slide( indexh || 0, indexv || 0 );
-			}
-		}
-		else {
-			// Read the index components of the hash
-			var h = parseInt( bits[0], 10 ) || 0,
-				v = parseInt( bits[1], 10 ) || 0;
-
-			if( h !== indexh || v !== indexv ) {
-				slide( h, v );
-			}
-		}
-
-	}
-
-	/**
-	 * Updates the page URL (hash) to reflect the current
-	 * state.
-	 *
-	 * @param {Number} delay The time in ms to wait before
-	 * writing the hash
-	 */
-	function writeURL( delay ) {
-
-		if( config.history ) {
-
-			// Make sure there's never more than one timeout running
-			clearTimeout( writeURLTimeout );
-
-			// If a delay is specified, timeout this call
-			if( typeof delay === 'number' ) {
-				writeURLTimeout = setTimeout( writeURL, delay );
-			}
-			else {
-				var url = '/';
-
-				// If the current slide has an ID, use that as a named link
-				if( currentSlide && typeof currentSlide.getAttribute( 'id' ) === 'string' ) {
-					url = '/' + currentSlide.getAttribute( 'id' );
-				}
-				// Otherwise use the /h/v index
-				else {
-					if( indexh > 0 || indexv > 0 ) url += indexh;
-					if( indexv > 0 ) url += '/' + indexv;
-				}
-
-				window.location.hash = url;
-			}
-		}
-
-	}
-
-	/**
-	 * Retrieves the h/v location of the current, or specified,
-	 * slide.
-	 *
-	 * @param {HTMLElement} slide If specified, the returned
-	 * index will be for this slide rather than the currently
-	 * active one
-	 *
-	 * @return {Object} { h: <int>, v: <int>, f: <int> }
-	 */
-	function getIndices( slide ) {
-
-		// By default, return the current indices
-		var h = indexh,
-			v = indexv,
-			f;
-
-		// If a slide is specified, return the indices of that slide
-		if( slide ) {
-			var isVertical = isVerticalSlide( slide );
-			var slideh = isVertical ? slide.parentNode : slide;
-
-			// Select all horizontal slides
-			var horizontalSlides = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) );
-
-			// Now that we know which the horizontal slide is, get its index
-			h = Math.max( horizontalSlides.indexOf( slideh ), 0 );
-
-			// If this is a vertical slide, grab the vertical index
-			if( isVertical ) {
-				v = Math.max( toArray( slide.parentNode.querySelectorAll( 'section' ) ).indexOf( slide ), 0 );
-			}
-		}
-
-		if( !slide && currentSlide ) {
-			var hasFragments = currentSlide.querySelectorAll( '.fragment' ).length > 0;
-			if( hasFragments ) {
-				var visibleFragments = currentSlide.querySelectorAll( '.fragment.visible' );
-				f = visibleFragments.length - 1;
-			}
-		}
-
-		return { h: h, v: v, f: f };
-
-	}
-
-	/**
-	 * Return a sorted fragments list, ordered by an increasing
-	 * "data-fragment-index" attribute.
-	 *
-	 * Fragments will be revealed in the order that they are returned by
-	 * this function, so you can use the index attributes to control the
-	 * order of fragment appearance.
-	 *
-	 * To maintain a sensible default fragment order, fragments are presumed
-	 * to be passed in document order. This function adds a "fragment-index"
-	 * attribute to each node if such an attribute is not already present,
-	 * and sets that attribute to an integer value which is the position of
-	 * the fragment within the fragments list.
-	 */
-	function sortFragments( fragments ) {
-
-		fragments = toArray( fragments );
-
-		var ordered = [],
-			unordered = [],
-			sorted = [];
-
-		// Group ordered and unordered elements
-		fragments.forEach( function( fragment, i ) {
-			if( fragment.hasAttribute( 'data-fragment-index' ) ) {
-				var index = parseInt( fragment.getAttribute( 'data-fragment-index' ), 10 );
-
-				if( !ordered[index] ) {
-					ordered[index] = [];
-				}
-
-				ordered[index].push( fragment );
-			}
-			else {
-				unordered.push( [ fragment ] );
-			}
-		} );
-
-		// Append fragments without explicit indices in their
-		// DOM order
-		ordered = ordered.concat( unordered );
-
-		// Manually count the index up per group to ensure there
-		// are no gaps
-		var index = 0;
-
-		// Push all fragments in their sorted order to an array,
-		// this flattens the groups
-		ordered.forEach( function( group ) {
-			group.forEach( function( fragment ) {
-				sorted.push( fragment );
-				fragment.setAttribute( 'data-fragment-index', index );
-			} );
-
-			index ++;
-		} );
-
-		return sorted;
-
-	}
-
-	/**
-	 * Navigate to the specified slide fragment.
-	 *
-	 * @param {Number} index The index of the fragment that
-	 * should be shown, -1 means all are invisible
-	 * @param {Number} offset Integer offset to apply to the
-	 * fragment index
-	 *
-	 * @return {Boolean} true if a change was made in any
-	 * fragments visibility as part of this call
-	 */
-	function navigateFragment( index, offset ) {
-
-		if( currentSlide && config.fragments ) {
-
-			var fragments = sortFragments( currentSlide.querySelectorAll( '.fragment' ) );
-			if( fragments.length ) {
-
-				// If no index is specified, find the current
-				if( typeof index !== 'number' ) {
-					var lastVisibleFragment = sortFragments( currentSlide.querySelectorAll( '.fragment.visible' ) ).pop();
-
-					if( lastVisibleFragment ) {
-						index = parseInt( lastVisibleFragment.getAttribute( 'data-fragment-index' ) || 0, 10 );
-					}
-					else {
-						index = -1;
-					}
-				}
-
-				// If an offset is specified, apply it to the index
-				if( typeof offset === 'number' ) {
-					index += offset;
-				}
-
-				var fragmentsShown = [],
-					fragmentsHidden = [];
-
-				toArray( fragments ).forEach( function( element, i ) {
-
-					if( element.hasAttribute( 'data-fragment-index' ) ) {
-						i = parseInt( element.getAttribute( 'data-fragment-index' ), 10 );
-					}
-
-					// Visible fragments
-					if( i <= index ) {
-						if( !element.classList.contains( 'visible' ) ) fragmentsShown.push( element );
-						element.classList.add( 'visible' );
-						element.classList.remove( 'current-fragment' );
-
-						if( i === index ) {
-							element.classList.add( 'current-fragment' );
-						}
-					}
-					// Hidden fragments
-					else {
-						if( element.classList.contains( 'visible' ) ) fragmentsHidden.push( element );
-						element.classList.remove( 'visible' );
-						element.classList.remove( 'current-fragment' );
-					}
-
-
-				} );
-
-				if( fragmentsHidden.length ) {
-					dispatchEvent( 'fragmenthidden', { fragment: fragmentsHidden[0], fragments: fragmentsHidden } );
-				}
-
-				if( fragmentsShown.length ) {
-					dispatchEvent( 'fragmentshown', { fragment: fragmentsShown[0], fragments: fragmentsShown } );
-				}
-
-				updateControls();
-
-				return !!( fragmentsShown.length || fragmentsHidden.length );
-
-			}
-
-		}
-
-		return false;
-
-	}
-
-	/**
-	 * Navigate to the next slide fragment.
-	 *
-	 * @return {Boolean} true if there was a next fragment,
-	 * false otherwise
-	 */
-	function nextFragment() {
-
-		return navigateFragment( null, 1 );
-
-	}
-
-	/**
-	 * Navigate to the previous slide fragment.
-	 *
-	 * @return {Boolean} true if there was a previous fragment,
-	 * false otherwise
-	 */
-	function previousFragment() {
-
-		return navigateFragment( null, -1 );
-
-	}
-
-	/**
-	 * Cues a new automated slide if enabled in the config.
-	 */
-	function cueAutoSlide() {
-
-		cancelAutoSlide();
-
-		if( currentSlide ) {
-
-			var parentAutoSlide = currentSlide.parentNode ? currentSlide.parentNode.getAttribute( 'data-autoslide' ) : null;
-			var slideAutoSlide = currentSlide.getAttribute( 'data-autoslide' );
-
-			// Pick value in the following priority order:
-			// 1. Current slide's data-autoslide
-			// 2. Parent slide's data-autoslide
-			// 3. Global autoSlide setting
-			if( slideAutoSlide ) {
-				autoSlide = parseInt( slideAutoSlide, 10 );
-			}
-			else if( parentAutoSlide ) {
-				autoSlide = parseInt( parentAutoSlide, 10 );
-			}
-			else {
-				autoSlide = config.autoSlide;
-			}
-
-			// If there are media elements with data-autoplay,
-			// automatically set the autoSlide duration to the
-			// length of that media
-			toArray( currentSlide.querySelectorAll( 'video, audio' ) ).forEach( function( el ) {
-				if( el.hasAttribute( 'data-autoplay' ) ) {
-					if( autoSlide && el.duration * 1000 > autoSlide ) {
-						autoSlide = ( el.duration * 1000 ) + 1000;
-					}
-				}
-			} );
-
-			// Cue the next auto-slide if:
-			// - There is an autoSlide value
-			// - Auto-sliding isn't paused by the user
-			// - The presentation isn't paused
-			// - The overview isn't active
-			// - The presentation isn't over
-			if( autoSlide && !autoSlidePaused && !isPaused() && !isOverview() && ( !Reveal.isLastSlide() || config.loop === true ) ) {
-				autoSlideTimeout = setTimeout( navigateNext, autoSlide );
-				autoSlideStartTime = Date.now();
-			}
-
-			if( autoSlidePlayer ) {
-				autoSlidePlayer.setPlaying( autoSlideTimeout !== -1 );
-			}
-
-		}
-
-	}
-
-	/**
-	 * Cancels any ongoing request to auto-slide.
-	 */
-	function cancelAutoSlide() {
-
-		clearTimeout( autoSlideTimeout );
-		autoSlideTimeout = -1;
-
-	}
-
-	function pauseAutoSlide() {
-
-		autoSlidePaused = true;
-		clearTimeout( autoSlideTimeout );
-
-		if( autoSlidePlayer ) {
-			autoSlidePlayer.setPlaying( false );
-		}
-
-	}
-
-	function resumeAutoSlide() {
-
-		autoSlidePaused = false;
-		cueAutoSlide();
-
-	}
-
-	function navigateLeft() {
-
-		// Reverse for RTL
-		if( config.rtl ) {
-			if( ( isOverview() || nextFragment() === false ) && availableRoutes().left ) {
-				slide( indexh + 1 );
-			}
-		}
-		// Normal navigation
-		else if( ( isOverview() || previousFragment() === false ) && availableRoutes().left ) {
-			slide( indexh - 1 );
-		}
-
-	}
-
-	function navigateRight() {
-
-		// Reverse for RTL
-		if( config.rtl ) {
-			if( ( isOverview() || previousFragment() === false ) && availableRoutes().right ) {
-				slide( indexh - 1 );
-			}
-		}
-		// Normal navigation
-		else if( ( isOverview() || nextFragment() === false ) && availableRoutes().right ) {
-			slide( indexh + 1 );
-		}
-
-	}
-
-	function navigateUp() {
-
-		// Prioritize hiding fragments
-		if( ( isOverview() || previousFragment() === false ) && availableRoutes().up ) {
-			slide( indexh, indexv - 1 );
-		}
-
-	}
-
-	function navigateDown() {
-
-		// Prioritize revealing fragments
-		if( ( isOverview() || nextFragment() === false ) && availableRoutes().down ) {
-			slide( indexh, indexv + 1 );
-		}
-
-	}
-
-	/**
-	 * Navigates backwards, prioritized in the following order:
-	 * 1) Previous fragment
-	 * 2) Previous vertical slide
-	 * 3) Previous horizontal slide
-	 */
-	function navigatePrev() {
-
-		// Prioritize revealing fragments
-		if( previousFragment() === false ) {
-			if( availableRoutes().up ) {
-				navigateUp();
-			}
-			else {
-				// Fetch the previous horizontal slide, if there is one
-				var previousSlide = document.querySelector( HORIZONTAL_SLIDES_SELECTOR + '.past:nth-child(' + indexh + ')' );
-
-				if( previousSlide ) {
-					var v = ( previousSlide.querySelectorAll( 'section' ).length - 1 ) || undefined;
-					var h = indexh - 1;
-					slide( h, v );
-				}
-			}
-		}
-
-	}
-
-	/**
-	 * Same as #navigatePrev() but navigates forwards.
-	 */
-	function navigateNext() {
-
-		// Prioritize revealing fragments
-		if( nextFragment() === false ) {
-			availableRoutes().down ? navigateDown() : navigateRight();
-		}
-
-		// If auto-sliding is enabled we need to cue up
-		// another timeout
-		cueAutoSlide();
-
-	}
-
-
-	// --------------------------------------------------------------------//
-	// ----------------------------- EVENTS -------------------------------//
-	// --------------------------------------------------------------------//
-
-	/**
-	 * Called by all event handlers that are based on user
-	 * input.
-	 */
-	function onUserInput( event ) {
-
-		if( config.autoSlideStoppable ) {
-			pauseAutoSlide();
-		}
-
-	}
-
-	/**
-	 * Handler for the document level 'keydown' event.
-	 */
-	function onDocumentKeyDown( event ) {
-
-		onUserInput( event );
-
-		// Check if there's a focused element that could be using
-		// the keyboard
-		var activeElement = document.activeElement;
-		var hasFocus = !!( document.activeElement && ( document.activeElement.type || document.activeElement.href || document.activeElement.contentEditable !== 'inherit' ) );
-
-		// Disregard the event if there's a focused element or a
-		// keyboard modifier key is present
-		if( hasFocus || (event.shiftKey && event.keyCode !== 32) || event.altKey || event.ctrlKey || event.metaKey ) return;
-
-		// While paused only allow "unpausing" keyboard events (b and .)
-		if( isPaused() && [66,190,191].indexOf( event.keyCode ) === -1 ) {
-			return false;
-		}
-
-		var triggered = false;
-
-		// 1. User defined key bindings
-		if( typeof config.keyboard === 'object' ) {
-
-			for( var key in config.keyboard ) {
-
-				// Check if this binding matches the pressed key
-				if( parseInt( key, 10 ) === event.keyCode ) {
-
-					var value = config.keyboard[ key ];
-
-					// Callback function
-					if( typeof value === 'function' ) {
-						value.apply( null, [ event ] );
-					}
-					// String shortcuts to reveal.js API
-					else if( typeof value === 'string' && typeof Reveal[ value ] === 'function' ) {
-						Reveal[ value ].call();
-					}
-
-					triggered = true;
-
-				}
-
-			}
-
-		}
-
-		// 2. System defined key bindings
-		if( triggered === false ) {
-
-			// Assume true and try to prove false
-			triggered = true;
-
-			switch( event.keyCode ) {
-				// p, page up
-				case 80: case 33: navigatePrev(); break;
-				// n, page down
-				case 78: case 34: navigateNext(); break;
-				// h, left
-				case 72: case 37: navigateLeft(); break;
-				// l, right
-				case 76: case 39: navigateRight(); break;
-				// k, up
-				case 75: case 38: navigateUp(); break;
-				// j, down
-				case 74: case 40: navigateDown(); break;
-				// home
-				case 36: slide( 0 ); break;
-				// end
-				case 35: slide( Number.MAX_VALUE ); break;
-				// space
-				case 32: isOverview() ? deactivateOverview() : event.shiftKey ? navigatePrev() : navigateNext(); break;
-				// return
-				case 13: isOverview() ? deactivateOverview() : triggered = false; break;
-				// b, period, Logitech presenter tools "black screen" button
-				case 66: case 190: case 191: togglePause(); break;
-				// f
-				case 70: enterFullscreen(); break;
-				default:
-					triggered = false;
-			}
-
-		}
-
-		// If the input resulted in a triggered action we should prevent
-		// the browsers default behavior
-		if( triggered ) {
-			event.preventDefault();
-		}
-		// ESC or O key
-		else if ( ( event.keyCode === 27 || event.keyCode === 79 ) && features.transforms3d ) {
-			if( dom.preview ) {
-				closePreview();
-			}
-			else {
-				toggleOverview();
-			}
-
-			event.preventDefault();
-		}
-
-		// If auto-sliding is enabled we need to cue up
-		// another timeout
-		cueAutoSlide();
-
-	}
-
-	/**
-	 * Handler for the 'touchstart' event, enables support for
-	 * swipe and pinch gestures.
-	 */
-	function onTouchStart( event ) {
-
-		touch.startX = event.touches[0].clientX;
-		touch.startY = event.touches[0].clientY;
-		touch.startCount = event.touches.length;
-
-		// If there's two touches we need to memorize the distance
-		// between those two points to detect pinching
-		if( event.touches.length === 2 && config.overview ) {
-			touch.startSpan = distanceBetween( {
-				x: event.touches[1].clientX,
-				y: event.touches[1].clientY
-			}, {
-				x: touch.startX,
-				y: touch.startY
-			} );
-		}
-
-	}
-
-	/**
-	 * Handler for the 'touchmove' event.
-	 */
-	function onTouchMove( event ) {
-
-		// Each touch should only trigger one action
-		if( !touch.captured ) {
-			onUserInput( event );
-
-			var currentX = event.touches[0].clientX;
-			var currentY = event.touches[0].clientY;
-
-			// If the touch started with two points and still has
-			// two active touches; test for the pinch gesture
-			if( event.touches.length === 2 && touch.startCount === 2 && config.overview ) {
-
-				// The current distance in pixels between the two touch points
-				var currentSpan = distanceBetween( {
-					x: event.touches[1].clientX,
-					y: event.touches[1].clientY
-				}, {
-					x: touch.startX,
-					y: touch.startY
-				} );
-
-				// If the span is larger than the desire amount we've got
-				// ourselves a pinch
-				if( Math.abs( touch.startSpan - currentSpan ) > touch.threshold ) {
-					touch.captured = true;
-
-					if( currentSpan < touch.startSpan ) {
-						activateOverview();
-					}
-					else {
-						deactivateOverview();
-					}
-				}
-
-				event.preventDefault();
-
-			}
-			// There was only one touch point, look for a swipe
-			else if( event.touches.length === 1 && touch.startCount !== 2 ) {
-
-				var deltaX = currentX - touch.startX,
-					deltaY = currentY - touch.startY;
-
-				if( deltaX > touch.threshold && Math.abs( deltaX ) > Math.abs( deltaY ) ) {
-					touch.captured = true;
-					navigateLeft();
-				}
-				else if( deltaX < -touch.threshold && Math.abs( deltaX ) > Math.abs( deltaY ) ) {
-					touch.captured = true;
-					navigateRight();
-				}
-				else if( deltaY > touch.threshold ) {
-					touch.captured = true;
-					navigateUp();
-				}
-				else if( deltaY < -touch.threshold ) {
-					touch.captured = true;
-					navigateDown();
-				}
-
-				// If we're embedded, only block touch events if they have
-				// triggered an action
-				if( config.embedded ) {
-					if( touch.captured || isVerticalSlide( currentSlide ) ) {
-						event.preventDefault();
-					}
-				}
-				// Not embedded? Block them all to avoid needless tossing
-				// around of the viewport in iOS
-				else {
-					event.preventDefault();
-				}
-
-			}
-		}
-		// There's a bug with swiping on some Android devices unless
-		// the default action is always prevented
-		else if( navigator.userAgent.match( /android/gi ) ) {
-			event.preventDefault();
-		}
-
-	}
-
-	/**
-	 * Handler for the 'touchend' event.
-	 */
-	function onTouchEnd( event ) {
-
-		touch.captured = false;
-
-	}
-
-	/**
-	 * Convert pointer down to touch start.
-	 */
-	function onPointerDown( event ) {
-
-		if( event.pointerType === event.MSPOINTER_TYPE_TOUCH ) {
-			event.touches = [{ clientX: event.clientX, clientY: event.clientY }];
-			onTouchStart( event );
-		}
-
-	}
-
-	/**
-	 * Convert pointer move to touch move.
-	 */
-	function onPointerMove( event ) {
-
-		if( event.pointerType === event.MSPOINTER_TYPE_TOUCH ) {
-			event.touches = [{ clientX: event.clientX, clientY: event.clientY }];
-			onTouchMove( event );
-		}
-
-	}
-
-	/**
-	 * Convert pointer up to touch end.
-	 */
-	function onPointerUp( event ) {
-
-		if( event.pointerType === event.MSPOINTER_TYPE_TOUCH ) {
-			event.touches = [{ clientX: event.clientX, clientY: event.clientY }];
-			onTouchEnd( event );
-		}
-
-	}
-
-	/**
-	 * Handles mouse wheel scrolling, throttled to avoid skipping
-	 * multiple slides.
-	 */
-	function onDocumentMouseScroll( event ) {
-
-		if( Date.now() - lastMouseWheelStep > 600 ) {
-
-			lastMouseWheelStep = Date.now();
-
-			var delta = event.detail || -event.wheelDelta;
-			if( delta > 0 ) {
-				navigateNext();
-			}
-			else {
-				navigatePrev();
-			}
-
-		}
-
-	}
-
-	/**
-	 * Clicking on the progress bar results in a navigation to the
-	 * closest approximate horizontal slide using this equation:
-	 *
-	 * ( clickX / presentationWidth ) * numberOfSlides
-	 */
-	function onProgressClicked( event ) {
-
-		onUserInput( event );
-
-		event.preventDefault();
-
-		var slidesTotal = toArray( document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ).length;
-		var slideIndex = Math.floor( ( event.clientX / dom.wrapper.offsetWidth ) * slidesTotal );
-
-		slide( slideIndex );
-
-	}
-
-	/**
-	 * Event handler for navigation control buttons.
-	 */
-	function onNavigateLeftClicked( event ) { event.preventDefault(); onUserInput(); navigateLeft(); }
-	function onNavigateRightClicked( event ) { event.preventDefault(); onUserInput(); navigateRight(); }
-	function onNavigateUpClicked( event ) { event.preventDefault(); onUserInput(); navigateUp(); }
-	function onNavigateDownClicked( event ) { event.preventDefault(); onUserInput(); navigateDown(); }
-	function onNavigatePrevClicked( event ) { event.preventDefault(); onUserInput(); navigatePrev(); }
-	function onNavigateNextClicked( event ) { event.preventDefault(); onUserInput(); navigateNext(); }
-
-	/**
-	 * Handler for the window level 'hashchange' event.
-	 */
-	function onWindowHashChange( event ) {
-
-		readURL();
-
-	}
-
-	/**
-	 * Handler for the window level 'resize' event.
-	 */
-	function onWindowResize( event ) {
-
-		layout();
-
-	}
-
-	/**
-	 * Handle for the window level 'visibilitychange' event.
-	 */
-	function onPageVisibilityChange( event ) {
-
-		var isHidden =  document.webkitHidden ||
-						document.msHidden ||
-						document.hidden;
-
-		// If, after clicking a link or similar and we're coming back,
-		// focus the document.body to ensure we can use keyboard shortcuts
-		if( isHidden === false && document.activeElement !== document.body ) {
-			document.activeElement.blur();
-			document.body.focus();
-		}
-
-	}
-
-	/**
-	 * Invoked when a slide is and we're in the overview.
-	 */
-	function onOverviewSlideClicked( event ) {
-
-		// TODO There's a bug here where the event listeners are not
-		// removed after deactivating the overview.
-		if( eventsAreBound && isOverview() ) {
-			event.preventDefault();
-
-			var element = event.target;
-
-			while( element && !element.nodeName.match( /section/gi ) ) {
-				element = element.parentNode;
-			}
-
-			if( element && !element.classList.contains( 'disabled' ) ) {
-
-				deactivateOverview();
-
-				if( element.nodeName.match( /section/gi ) ) {
-					var h = parseInt( element.getAttribute( 'data-index-h' ), 10 ),
-						v = parseInt( element.getAttribute( 'data-index-v' ), 10 );
-
-					slide( h, v );
-				}
-
-			}
-		}
-
-	}
-
-	/**
-	 * Handles clicks on links that are set to preview in the
-	 * iframe overlay.
-	 */
-	function onPreviewLinkClicked( event ) {
-
-		var url = event.target.getAttribute( 'href' );
-		if( url ) {
-			openPreview( url );
-			event.preventDefault();
-		}
-
-	}
-
-	/**
-	 * Handles click on the auto-sliding controls element.
-	 */
-	function onAutoSlidePlayerClick( event ) {
-
-		// Replay
-		if( Reveal.isLastSlide() && config.loop === false ) {
-			slide( 0, 0 );
-			resumeAutoSlide();
-		}
-		// Resume
-		else if( autoSlidePaused ) {
-			resumeAutoSlide();
-		}
-		// Pause
-		else {
-			pauseAutoSlide();
-		}
-
-	}
-
-
-	// --------------------------------------------------------------------//
-	// ------------------------ PLAYBACK COMPONENT ------------------------//
-	// --------------------------------------------------------------------//
-
-
-	/**
-	 * Constructor for the playback component, which displays
-	 * play/pause/progress controls.
-	 *
-	 * @param {HTMLElement} container The component will append
-	 * itself to this
-	 * @param {Function} progressCheck A method which will be
-	 * called frequently to get the current progress on a range
-	 * of 0-1
-	 */
-	function Playback( container, progressCheck ) {
-
-		// Cosmetics
-		this.diameter = 50;
-		this.thickness = 3;
-
-		// Flags if we are currently playing
-		this.playing = false;
-
-		// Current progress on a 0-1 range
-		this.progress = 0;
-
-		// Used to loop the animation smoothly
-		this.progressOffset = 1;
-
-		this.container = container;
-		this.progressCheck = progressCheck;
-
-		this.canvas = document.createElement( 'canvas' );
-		this.canvas.className = 'playback';
-		this.canvas.width = this.diameter;
-		this.canvas.height = this.diameter;
-		this.context = this.canvas.getContext( '2d' );
-
-		this.container.appendChild( this.canvas );
-
-		this.render();
-
-	}
-
-	Playback.prototype.setPlaying = function( value ) {
-
-		var wasPlaying = this.playing;
-
-		this.playing = value;
-
-		// Start repainting if we weren't already
-		if( !wasPlaying && this.playing ) {
-			this.animate();
-		}
-		else {
-			this.render();
-		}
-
-	};
-
-	Playback.prototype.animate = function() {
-
-		var progressBefore = this.progress;
-
-		this.progress = this.progressCheck();
-
-		// When we loop, offset the progress so that it eases
-		// smoothly rather than immediately resetting
-		if( progressBefore > 0.8 && this.progress < 0.2 ) {
-			this.progressOffset = this.progress;
-		}
-
-		this.render();
-
-		if( this.playing ) {
-			features.requestAnimationFrameMethod.call( window, this.animate.bind( this ) );
-		}
-
-	};
-
-	/**
-	 * Renders the current progress and playback state.
-	 */
-	Playback.prototype.render = function() {
-
-		var progress = this.playing ? this.progress : 0,
-			radius = ( this.diameter / 2 ) - this.thickness,
-			x = this.diameter / 2,
-			y = this.diameter / 2,
-			iconSize = 14;
-
-		// Ease towards 1
-		this.progressOffset += ( 1 - this.progressOffset ) * 0.1;
-
-		var endAngle = ( - Math.PI / 2 ) + ( progress * ( Math.PI * 2 ) );
-		var startAngle = ( - Math.PI / 2 ) + ( this.progressOffset * ( Math.PI * 2 ) );
-
-		this.context.save();
-		this.context.clearRect( 0, 0, this.diameter, this.diameter );
-
-		// Solid background color
-		this.context.beginPath();
-		this.context.arc( x, y, radius + 2, 0, Math.PI * 2, false );
-		this.context.fillStyle = 'rgba( 0, 0, 0, 0.4 )';
-		this.context.fill();
-
-		// Draw progress track
-		this.context.beginPath();
-		this.context.arc( x, y, radius, 0, Math.PI * 2, false );
-		this.context.lineWidth = this.thickness;
-		this.context.strokeStyle = '#666';
-		this.context.stroke();
-
-		if( this.playing ) {
-			// Draw progress on top of track
-			this.context.beginPath();
-			this.context.arc( x, y, radius, startAngle, endAngle, false );
-			this.context.lineWidth = this.thickness;
-			this.context.strokeStyle = '#fff';
-			this.context.stroke();
-		}
-
-		this.context.translate( x - ( iconSize / 2 ), y - ( iconSize / 2 ) );
-
-		// Draw play/pause icons
-		if( this.playing ) {
-			this.context.fillStyle = '#fff';
-			this.context.fillRect( 0, 0, iconSize / 2 - 2, iconSize );
-			this.context.fillRect( iconSize / 2 + 2, 0, iconSize / 2 - 2, iconSize );
-		}
-		else {
-			this.context.beginPath();
-			this.context.translate( 2, 0 );
-			this.context.moveTo( 0, 0 );
-			this.context.lineTo( iconSize - 2, iconSize / 2 );
-			this.context.lineTo( 0, iconSize );
-			this.context.fillStyle = '#fff';
-			this.context.fill();
-		}
-
-		this.context.restore();
-
-	};
-
-	Playback.prototype.on = function( type, listener ) {
-		this.canvas.addEventListener( type, listener, false );
-	};
-
-	Playback.prototype.off = function( type, listener ) {
-		this.canvas.removeEventListener( type, listener, false );
-	};
-
-	Playback.prototype.destroy = function() {
-
-		this.playing = false;
-
-		if( this.canvas.parentNode ) {
-			this.container.removeChild( this.canvas );
-		}
-
-	};
-
-
-	// --------------------------------------------------------------------//
-	// ------------------------------- API --------------------------------//
-	// --------------------------------------------------------------------//
-
-
-	return {
-		initialize: initialize,
-		configure: configure,
-		sync: sync,
-
-		// Navigation methods
-		slide: slide,
-		left: navigateLeft,
-		right: navigateRight,
-		up: navigateUp,
-		down: navigateDown,
-		prev: navigatePrev,
-		next: navigateNext,
-
-		// Fragment methods
-		navigateFragment: navigateFragment,
-		prevFragment: previousFragment,
-		nextFragment: nextFragment,
-
-		// Deprecated aliases
-		navigateTo: slide,
-		navigateLeft: navigateLeft,
-		navigateRight: navigateRight,
-		navigateUp: navigateUp,
-		navigateDown: navigateDown,
-		navigatePrev: navigatePrev,
-		navigateNext: navigateNext,
-
-		// Forces an update in slide layout
-		layout: layout,
-
-		// Returns an object with the available routes as booleans (left/right/top/bottom)
-		availableRoutes: availableRoutes,
-
-		// Returns an object with the available fragments as booleans (prev/next)
-		availableFragments: availableFragments,
-
-		// Toggles the overview mode on/off
-		toggleOverview: toggleOverview,
-
-		// Toggles the "black screen" mode on/off
-		togglePause: togglePause,
-
-		// State checks
-		isOverview: isOverview,
-		isPaused: isPaused,
-
-		// Adds or removes all internal event listeners (such as keyboard)
-		addEventListeners: addEventListeners,
-		removeEventListeners: removeEventListeners,
-
-		// Returns the indices of the current, or specified, slide
-		getIndices: getIndices,
-
-		// Returns the slide at the specified index, y is optional
-		getSlide: function( x, y ) {
-			var horizontalSlide = document.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR )[ x ];
-			var verticalSlides = horizontalSlide && horizontalSlide.querySelectorAll( 'section' );
-
-			if( typeof y !== 'undefined' ) {
-				return verticalSlides ? verticalSlides[ y ] : undefined;
-			}
-
-			return horizontalSlide;
-		},
-
-		// Returns the previous slide element, may be null
-		getPreviousSlide: function() {
-			return previousSlide;
-		},
-
-		// Returns the current slide element
-		getCurrentSlide: function() {
-			return currentSlide;
-		},
-
-		// Returns the current scale of the presentation content
-		getScale: function() {
-			return scale;
-		},
-
-		// Returns the current configuration object
-		getConfig: function() {
-			return config;
-		},
-
-		// Helper method, retrieves query string as a key/value hash
-		getQueryHash: function() {
-			var query = {};
-
-			location.search.replace( /[A-Z0-9]+?=([\w\.%-]*)/gi, function(a) {
-				query[ a.split( '=' ).shift() ] = a.split( '=' ).pop();
-			} );
-
-			// Basic deserialization
-			for( var i in query ) {
-				var value = query[ i ];
-
-				query[ i ] = unescape( value );
-
-				if( value === 'null' ) query[ i ] = null;
-				else if( value === 'true' ) query[ i ] = true;
-				else if( value === 'false' ) query[ i ] = false;
-				else if( value.match( /^\d+$/ ) ) query[ i ] = parseFloat( value );
-			}
-
-			return query;
-		},
-
-		// Returns true if we're currently on the first slide
-		isFirstSlide: function() {
-			return document.querySelector( SLIDES_SELECTOR + '.past' ) == null ? true : false;
-		},
-
-		// Returns true if we're currently on the last slide
-		isLastSlide: function() {
-			if( currentSlide ) {
-				// Does this slide has next a sibling?
-				if( currentSlide.nextElementSibling ) return false;
-
-				// If it's vertical, does its parent have a next sibling?
-				if( isVerticalSlide( currentSlide ) && currentSlide.parentNode.nextElementSibling ) return false;
-
-				return true;
-			}
-
-			return false;
-		},
-
-		// Checks if reveal.js has been loaded and is ready for use
-		isReady: function() {
-			return loaded;
-		},
-
-		// Forward event binding to the reveal DOM element
-		addEventListener: function( type, listener, useCapture ) {
-			if( 'addEventListener' in window ) {
-				( dom.wrapper || document.querySelector( '.reveal' ) ).addEventListener( type, listener, useCapture );
-			}
-		},
-		removeEventListener: function( type, listener, useCapture ) {
-			if( 'addEventListener' in window ) {
-				( dom.wrapper || document.querySelector( '.reveal' ) ).removeEventListener( type, listener, useCapture );
-			}
-		}
-	};
-
-})();
diff --git a/uflacs-merge-into-ffc/doc/roadmap/js/reveal.min.js b/uflacs-merge-into-ffc/doc/roadmap/js/reveal.min.js
deleted file mode 100644
index a13bd48..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/js/reveal.min.js
+++ /dev/null
@@ -1,9 +0,0 @@
-/*!
- * reveal.js 2.6.1 (2014-03-13, 09:22)
- * http://lab.hakim.se/reveal-js
- * MIT licensed
- *
- * Copyright (C) 2014 Hakim El Hattab, http://hakim.se
- */
-var Reveal=function(){"use strict";function a(a){if(b(),!ec.transforms2d&&!ec.transforms3d)return document.body.setAttribute("class","no-transforms"),void 0;window.addEventListener("load",A,!1);var d=Reveal.getQueryHash();"undefined"!=typeof d.dependencies&&delete d.dependencies,k(_b,a),k(_b,d),r(),c()}function b(){ec.transforms3d="WebkitPerspective"in document.body.style||"MozPerspective"in document.body.style||"msPerspective"in document.body.style||"OPerspective"in document.body.style| [...]
-return"undefined"!=typeof b?d?d[b]:void 0:c},getPreviousSlide:function(){return Sb},getCurrentSlide:function(){return Tb},getScale:function(){return cc},getConfig:function(){return _b},getQueryHash:function(){var a={};location.search.replace(/[A-Z0-9]+?=([\w\.%-]*)/gi,function(b){a[b.split("=").shift()]=b.split("=").pop()});for(var b in a){var c=a[b];a[b]=unescape(c),"null"===c?a[b]=null:"true"===c?a[b]=!0:"false"===c?a[b]=!1:c.match(/^\d+$/)&&(a[b]=parseFloat(c))}return a},isFirstSlide: [...]
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/css/zenburn.css b/uflacs-merge-into-ffc/doc/roadmap/lib/css/zenburn.css
deleted file mode 100644
index ab74139..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/css/zenburn.css
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-
-Zenburn style from voldmar.ru (c) Vladimir Epifanov <voldmar at voldmar.ru>
-based on dark.css by Ivan Sagalaev
-
-*/
-
-pre code {
-  display: block; padding: 0.5em;
-  background: #3F3F3F;
-  color: #DCDCDC;
-}
-
-pre .keyword,
-pre .tag,
-pre .css .class,
-pre .css .id,
-pre .lisp .title,
-pre .nginx .title,
-pre .request,
-pre .status,
-pre .clojure .attribute {
-  color: #E3CEAB;
-}
-
-pre .django .template_tag,
-pre .django .variable,
-pre .django .filter .argument {
-  color: #DCDCDC;
-}
-
-pre .number,
-pre .date {
-  color: #8CD0D3;
-}
-
-pre .dos .envvar,
-pre .dos .stream,
-pre .variable,
-pre .apache .sqbracket {
-  color: #EFDCBC;
-}
-
-pre .dos .flow,
-pre .diff .change,
-pre .python .exception,
-pre .python .built_in,
-pre .literal,
-pre .tex .special {
-  color: #EFEFAF;
-}
-
-pre .diff .chunk,
-pre .subst {
-  color: #8F8F8F;
-}
-
-pre .dos .keyword,
-pre .python .decorator,
-pre .title,
-pre .haskell .type,
-pre .diff .header,
-pre .ruby .class .parent,
-pre .apache .tag,
-pre .nginx .built_in,
-pre .tex .command,
-pre .prompt {
-    color: #efef8f;
-}
-
-pre .dos .winutils,
-pre .ruby .symbol,
-pre .ruby .symbol .string,
-pre .ruby .string {
-  color: #DCA3A3;
-}
-
-pre .diff .deletion,
-pre .string,
-pre .tag .value,
-pre .preprocessor,
-pre .built_in,
-pre .sql .aggregate,
-pre .javadoc,
-pre .smalltalk .class,
-pre .smalltalk .localvars,
-pre .smalltalk .array,
-pre .css .rules .value,
-pre .attr_selector,
-pre .pseudo,
-pre .apache .cbracket,
-pre .tex .formula {
-  color: #CC9393;
-}
-
-pre .shebang,
-pre .diff .addition,
-pre .comment,
-pre .java .annotation,
-pre .template_comment,
-pre .pi,
-pre .doctype {
-  color: #7F9F7F;
-}
-
-pre .coffeescript .javascript,
-pre .javascript .xml,
-pre .tex .formula,
-pre .xml .javascript,
-pre .xml .vbscript,
-pre .xml .css,
-pre .xml .cdata {
-  opacity: 0.5;
-}
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.eot b/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.eot
deleted file mode 100755
index 598dcbc..0000000
Binary files a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.eot and /dev/null differ
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.svg b/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.svg
deleted file mode 100644
index 201cfe1..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.svg
+++ /dev/null
@@ -1,230 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata></metadata>
-<defs>
-<font id="LeagueGothicRegular" horiz-adv-x="724" >
-<font-face units-per-em="2048" ascent="1505" descent="-543" />
-<missing-glyph horiz-adv-x="315" />
-<glyph horiz-adv-x="0" />
-<glyph horiz-adv-x="2048" />
-<glyph unicode="&#xd;" horiz-adv-x="682" />
-<glyph unicode=" "  horiz-adv-x="315" />
-<glyph unicode="&#x09;" horiz-adv-x="315" />
-<glyph unicode="&#xa0;" horiz-adv-x="315" />
-<glyph unicode="!" horiz-adv-x="387" d="M74 1505h239l-55 -1099h-129zM86 0v227h215v-227h-215z" />
-<glyph unicode="&#x22;" horiz-adv-x="329" d="M57 1505h215l-30 -551h-154z" />
-<glyph unicode="#" horiz-adv-x="1232" d="M49 438l27 195h198l37 258h-196l26 194h197l57 420h197l-57 -420h260l57 420h197l-58 -420h193l-27 -194h-192l-37 -258h190l-26 -195h-191l-59 -438h-197l60 438h-261l-59 -438h-197l60 438h-199zM471 633h260l37 258h-260z" />
-<glyph unicode="$" horiz-adv-x="692" d="M37 358l192 13q12 -186 129 -187q88 0 93 185q0 74 -61 175q-21 36 -34 53l-40 55q-28 38 -65.5 90t-70.5 101.5t-70.5 141.5t-37.5 170q4 293 215 342v131h123v-125q201 -23 235 -282l-192 -25q-14 129 -93 125q-80 -2 -84 -162q0 -102 94 -227l41 -59q30 -42 37 -52 t33 -48l37 -52q41 -57 68 -109l26 -55q43 -94 43 -186q-4 -338 -245 -369v-217h-123v221q-236 41 -250 352z" />
-<glyph unicode="%" horiz-adv-x="1001" d="M55 911v437q0 110 82 156q33 18 90.5 18t97.5 -44t44 -87l4 -43v-437q0 -107 -81 -157q-32 -19 -77 -19q-129 0 -156 135zM158 0l553 1505h131l-547 -1505h-137zM178 911q-4 -55 37 -55q16 0 25.5 14.5t9.5 26.5v451q2 55 -35 55q-18 0 -27.5 -13.5t-9.5 -27.5v-451z M631 158v436q0 108 81 156q33 20 79 20q125 0 153 -135l4 -41v-436q0 -110 -80 -156q-32 -18 -90.5 -18t-98.5 43t-44 88zM754 158q-4 -57 37 -58q37 0 34 58v436q2 55 -34 55q-18 0 -27.5 -13t-9.5 -28v-450z" />
-<glyph unicode="&#x26;" horiz-adv-x="854" d="M49 304q0 126 44 225.5t126 222.5q-106 225 -106 442v18q0 94 47 180q70 130 223 130q203 0 252 -215q14 -61 12 -113q0 -162 -205 -434q76 -174 148 -285q33 96 47 211l176 -33q-16 -213 -92 -358q55 -63 92 -76v-235q-23 0 -86 37.5t-123 101.5q-123 -139 -252 -139 t-216 97t-87 223zM263 325.5q1 -65.5 28.5 -107.5t78.5 -42t117 86q-88 139 -174 295q-18 -30 -34.5 -98t-15.5 -133.5zM305 1194q0 -111 55 -246q101 156 101 252q-2 2 0 15.5t-2 36t-11 42.5q-19 52 -61.5 52t-6 [...]
-<glyph unicode="'" horiz-adv-x="309" d="M45 1012l72 266h-72v227h215v-227l-113 -266h-102z" />
-<glyph unicode="(" horiz-adv-x="561" d="M66 645q0 143 29.5 292.5t73.5 261.5q92 235 159 343l30 47l162 -84q-38 -53 -86.5 -148t-82.5 -189.5t-61.5 -238t-27.5 -284.5t26.5 -282.5t64.5 -240.5q80 -207 141 -296l26 -39l-162 -84q-41 61 -96 173t-94 217.5t-70.5 257t-31.5 294.5z" />
-<glyph unicode=")" horiz-adv-x="561" d="M41 -213q36 50 85.5 147t83.5 190t61.5 236.5t27.5 284.5t-26.5 282.5t-64.5 240.5q-78 205 -140 298l-27 39l162 84q41 -61 96 -173.5t94 -217t71 -257.5t32 -296t-30 -292.5t-74 -260.5q-92 -233 -159 -342l-30 -47z" />
-<glyph unicode="*" horiz-adv-x="677" d="M74 1251l43 148l164 -70l-19 176h154l-19 -176l164 70l43 -148l-172 -34l115 -138l-131 -80l-78 152l-76 -152l-131 80l115 138z" />
-<glyph unicode="+" horiz-adv-x="1060" d="M74 649v172h370v346h172v-346h371v-172h-371v-346h-172v346h-370z" />
-<glyph unicode="," horiz-adv-x="309" d="M45 0v227h215v-227l-113 -266h-102l72 266h-72z" />
-<glyph unicode="-" horiz-adv-x="444" d="M74 455v194h297v-194h-297z" />
-<glyph unicode="." horiz-adv-x="321" d="M53 0v227h215v-227h-215z" />
-<glyph unicode="/" horiz-adv-x="720" d="M8 -147l543 1652h162l-537 -1652h-168z" />
-<glyph unicode="0" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5l2 26v887q0 46 -19.5 78.5 t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="1" horiz-adv-x="475" d="M25 1180v141q129 25 205 130q16 21 30 54h133v-1505h-221v1180h-147z" />
-<glyph unicode="2" horiz-adv-x="731" d="M55 0v219l39 62q25 39 88.5 152.5t112.5 220t91 241.5t44 238q0 184 -73.5 184t-73.5 -184v-105h-222v105q0 389 295 389t295 -375q0 -336 -346 -928h350v-219h-600z" />
-<glyph unicode="3" horiz-adv-x="686" d="M45 1071q0 249 63 343q29 42 84.5 75t134.5 33t136 -31t84.5 -71t44.5 -92q22 -71 22 -130q0 -291 -108 -399q127 -100 127 -414q0 -68 -19.5 -145.5t-47 -128t-85 -89t-136.5 -38.5t-135 31.5t-86 75.5t-48 113q-23 91 -23 230h217q2 -150 17.5 -203t59.5 -53t56.5 50.5 t12.5 104.5t1 102t0 63q-6 82 -14 95l-18 33q-12 22 -29 29q-55 22 -108 25h-19v184q133 7 156 73q12 34 12 91v105q0 146 -29 177q-16 17 -40 17q-41 0 -52.5 -49t-13.5 -207h-217z" />
-<glyph unicode="4" horiz-adv-x="684" d="M25 328v194l323 983h221v-983h103v-194h-103v-328h-202v328h-342zM213 522h154v516h-13z" />
-<glyph unicode="5" horiz-adv-x="704" d="M74 438h221v-59q0 -115 14.5 -159t52 -44t53 45t15.5 156v336q0 111 -70 110q-33 0 -59.5 -40t-26.5 -70h-186v792h535v-219h-344v-313q74 55 127 51q78 0 133 -40t77 -100q35 -98 35 -171v-336q0 -393 -289 -393q-78 0 -133 29.5t-84.5 71.5t-46.5 109q-24 98 -24 244z " />
-<glyph unicode="6" horiz-adv-x="700" d="M66 309v856q0 356 288.5 356.5t288.5 -356.5v-94h-221q0 162 -11.5 210t-53.5 48t-56 -37t-14 -127v-268q59 37 124.5 37t119 -36t75.5 -93q37 -92 37 -189v-307q0 -90 -42 -187q-26 -61 -89 -99.5t-157.5 -38.5t-158 38.5t-88.5 99.5q-42 98 -42 187zM287 244 q0 -20 17.5 -44t49 -24t50 24.5t18.5 43.5v450q0 18 -18.5 43t-49 25t-48 -20.5t-19.5 -41.5v-456z" />
-<glyph unicode="7" horiz-adv-x="589" d="M8 1286v219h557v-221l-221 -1284h-229l225 1286h-332z" />
-<glyph unicode="8" horiz-adv-x="696" d="M53 322v176q0 188 115 297q-102 102 -102 276v127q0 213 147 293q57 31 135 31t135.5 -31t84 -71t42.5 -93q21 -66 21 -129v-127q0 -174 -103 -276q115 -109 115 -297v-176q0 -222 -153 -306q-60 -32 -142 -32t-141.5 32.5t-88 73.5t-44.5 96q-21 69 -21 136zM269 422 q1 -139 16.5 -187.5t57.5 -48.5t59.5 30t21.5 71t4 158t-13.5 174t-66.5 57t-66.5 -57.5t-12.5 -196.5zM284 1116q-1 -123 11 -173t53 -50t53.5 50t12.5 170t-12.5 167t-51.5 47t-52 -44t-14 -167z" />
-<glyph unicode="9" horiz-adv-x="700" d="M57 340v94h222q0 -162 11 -210t53 -48t56.5 37t14.5 127v283q-59 -37 -125 -37t-119 35.5t-76 92.5q-37 96 -37 189v293q0 87 43 188q25 60 88.5 99t157.5 39t157.5 -39t88.5 -99q43 -101 43 -188v-856q0 -356 -289 -356t-289 356zM279 825q0 -18 18 -42.5t49 -24.5 t48.5 20.5t19.5 40.5v443q0 20 -17.5 43.5t-49.5 23.5t-50 -24.5t-18 -42.5v-437z" />
-<glyph unicode=":" horiz-adv-x="362" d="M74 0v227h215v-227h-215zM74 893v227h215v-227h-215z" />
-<glyph unicode=";" horiz-adv-x="362" d="M74 0v227h215v-227l-113 -266h-102l71 266h-71zM74 893v227h215v-227h-215z" />
-<glyph unicode="&#x3c;" horiz-adv-x="1058" d="M74 649v160l911 475v-199l-698 -356l698 -356v-199z" />
-<glyph unicode="=" horiz-adv-x="1058" d="M74 477v172h911v-172h-911zM74 864v172h911v-172h-911z" />
-<glyph unicode="&#x3e;" horiz-adv-x="1058" d="M74 174v199l698 356l-698 356v199l911 -475v-160z" />
-<glyph unicode="?" horiz-adv-x="645" d="M25 1260q24 67 78 131q105 128 235 122q82 -2 138 -33.5t82 -81.5q46 -88 46 -170.5t-80 -219.5l-57 -96q-18 -32 -42 -106.5t-24 -143.5v-256h-190v256q0 102 24.5 195t48 140t65.5 118t50 105t-9 67.5t-60 34.5t-78 -48t-49 -98zM199 0h215v227h-215v-227z" />
-<glyph unicode="@" horiz-adv-x="872" d="M66 303v889q0 97 73 200q39 56 117 93t184.5 37t184 -37t116.5 -93q74 -105 74 -200v-793h-164l-20 56q-14 -28 -46 -48t-67 -20q-145 0 -145 172v485q0 170 145 170q71 0 113 -67v45q0 51 -45 104.5t-145.5 53.5t-145.5 -53.5t-45 -104.5v-889q0 -53 44 -103t153.5 -50 t160.5 63l152 -86q-109 -143 -320 -143q-106 0 -184 35.5t-117 90.5q-73 102 -73 193zM535 573q0 -53 48 -53t48 53v455q0 53 -48 53t-48 -53v-455z" />
-<glyph unicode="A" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM307 541h152l-64 475l-6 39h-12z" />
-<glyph unicode="B" horiz-adv-x="745" d="M82 0v1505h194q205 0 304.5 -91t99.5 -308q0 -106 -29.5 -175t-107.5 -136q14 -5 47 -38.5t54 -71.5q52 -97 52 -259q0 -414 -342 -426h-272zM303 219q74 0 109 31q55 56 55 211t-63 195q-42 26 -93 26h-8v-463zM303 885q87 0 119 39q45 55 45 138t-14.5 124t-30.5 60.5 t-45 28.5q-35 11 -74 11v-401z" />
-<glyph unicode="C" horiz-adv-x="708" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-207h-206v207q-2 0 0 11.5t-3.5 27.5t-12.5 33q-17 39 -68 39q-70 -10 -78 -111v-887q0 -43 21.5 -76.5t59.5 -33.5t59.5 27.5t21.5 56.5v233h206v-207q0 -42 -17 -106t-45 -107 t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175z" />
-<glyph unicode="D" horiz-adv-x="761" d="M82 0v1505h174q270 0 346 -113q31 -46 50.5 -95.5t28.5 -139.5t12 -177t3 -228.5t-3 -228.5t-12 -176t-28.5 -138t-50.5 -95t-80 -68q-106 -46 -266 -46h-174zM303 221q117 0 140.5 78t23.5 399v111q0 322 -23.5 398.5t-140.5 76.5v-1063z" />
-<glyph unicode="E" horiz-adv-x="628" d="M82 0v1505h506v-227h-285v-395h205v-242h-205v-414h285v-227h-506z" />
-<glyph unicode="F" horiz-adv-x="616" d="M82 0v1505h526v-227h-305v-395h205v-228h-205v-655h-221z" />
-<glyph unicode="G" horiz-adv-x="737" d="M67 271.5q0 26.5 1 37.5v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-231h-221v231q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-905q0 -46 19.5 -78.5t54 -32.5t53 28t18.5 54l2 29v272h-88v187h309v-750h-131l-26 72 q-70 -88 -172 -88q-203 0 -250 213q-11 48 -11 74.5z" />
-<glyph unicode="H" horiz-adv-x="778" d="M82 0v1505h221v-622h172v622h221v-1505h-221v655h-172v-655h-221z" />
-<glyph unicode="I" horiz-adv-x="385" d="M82 0v1505h221v-1505h-221z" />
-<glyph unicode="J" horiz-adv-x="423" d="M12 -14v217q4 0 12.5 -1t29 2t35.5 12t28.5 34.5t13.5 62.5v1192h221v-1226q0 -137 -74 -216q-74 -78 -223 -78h-4q-19 0 -39 1z" />
-<glyph unicode="K" horiz-adv-x="768" d="M82 0v1505h221v-526h8l195 526h215l-203 -495l230 -1010h-216l-153 655l-6 31h-6l-64 -154v-532h-221z" />
-<glyph unicode="L" horiz-adv-x="604" d="M82 0v1505h221v-1300h293v-205h-514z" />
-<glyph unicode="M" horiz-adv-x="991" d="M82 0v1505h270l131 -688l11 -80h4l10 80l131 688h270v-1505h-204v1010h-13l-149 -1010h-94l-142 946l-8 64h-12v-1010h-205z" />
-<glyph unicode="N" horiz-adv-x="808" d="M82 0v1505h197l215 -784l18 -70h12v854h203v-1505h-197l-215 784l-18 70h-12v-854h-203z" />
-<glyph unicode="O" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5l2 26v887q0 46 -19.5 78.5 t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="P" horiz-adv-x="720" d="M82 0v1505h221q166 0 277.5 -105.5t111.5 -345t-111.5 -346t-277.5 -106.5v-602h-221zM303 827q102 0 134 45.5t32 175.5t-33 181t-133 51v-453z" />
-<glyph unicode="Q" horiz-adv-x="729" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -94 -45 -182q33 -43 88 -53v-189q-160 0 -227 117q-55 -18 -125 -18t-130 33.5t-88 81.5q-55 94 -60 175zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5l2 26v887 q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="R" horiz-adv-x="739" d="M82 0v1505h221q377 0 377 -434q0 -258 -123 -342l141 -729h-221l-115 635h-59v-635h-221zM303 840q117 0 149 98q15 49 15 125t-15.5 125t-45.5 68q-44 30 -103 30v-446z" />
-<glyph unicode="S" horiz-adv-x="702" d="M37 422l217 20q0 -256 104 -256q90 0 91 166q0 59 -32 117.5t-45 79.5l-54 79q-40 58 -77 113t-73.5 117t-68 148.5t-31.5 162.5q0 139 71.5 245t216.5 108h10q88 0 152 -36t94 -100q54 -120 54 -264l-217 -20q0 217 -89 217q-75 -2 -75 -146q0 -59 23 -105 q32 -66 58 -104l197 -296q31 -49 67 -139.5t36 -166.5q0 -378 -306 -378h-2q-229 0 -290 188q-31 99 -31 250z" />
-<glyph unicode="T" horiz-adv-x="647" d="M4 1278v227h639v-227h-209v-1278h-221v1278h-209z" />
-<glyph unicode="U" horiz-adv-x="749" d="M80 309v1196h221v-1196q0 -46 19.5 -78t54.5 -32t53 27.5t18 56.5l3 26v1196h221v-1196q0 -42 -17.5 -106t-45 -107t-88 -77.5t-144.5 -34.5t-144.5 33.5t-88.5 81.5q-55 97 -60 175z" />
-<glyph unicode="V" horiz-adv-x="716" d="M18 1505h215l111 -827l8 -64h13l118 891h215l-229 -1505h-221z" />
-<glyph unicode="W" horiz-adv-x="1036" d="M25 1505h204l88 -782l5 -49h16l100 831h160l100 -831h17l92 831h205l-203 -1505h-172l-115 801h-8l-115 -801h-172z" />
-<glyph unicode="X" horiz-adv-x="737" d="M16 0l244 791l-240 714h218l120 -381l7 -18h8l127 399h217l-240 -714l244 -791h-217l-127 449l-4 18h-8l-132 -467h-217z" />
-<glyph unicode="Y" horiz-adv-x="700" d="M14 1505h217l111 -481l6 -14h4l6 14l111 481h217l-225 -864v-641h-221v641z" />
-<glyph unicode="Z" horiz-adv-x="626" d="M20 0v238l347 1048h-297v219h536v-219l-352 -1067h352v-219h-586z" />
-<glyph unicode="[" horiz-adv-x="538" d="M82 -213v1718h399v-196h-202v-1325h202v-197h-399z" />
-<glyph unicode="\" horiz-adv-x="792" d="M8 1692h162l614 -1872h-168z" />
-<glyph unicode="]" horiz-adv-x="538" d="M57 -16h203v1325h-203v196h400v-1718h-400v197z" />
-<glyph unicode="^" horiz-adv-x="1101" d="M53 809l381 696h234l381 -696h-199l-299 543l-299 -543h-199z" />
-<glyph unicode="_" horiz-adv-x="1210" d="M74 -154h1063v-172h-1063v172z" />
-<glyph unicode="`" horiz-adv-x="1024" d="M293 1489h215l106 -184h-159z" />
-<glyph unicode="a" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM252 291 q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250z" />
-<glyph unicode="b" horiz-adv-x="686" d="M82 0v1505h207v-458q88 90 165 90t117.5 -69t40.5 -150v-715q0 -82 -41 -150.5t-118 -68.5q-33 0 -74 22.5t-66 44.5l-24 23v-74h-207zM289 246q0 -29 19.5 -48.5t42 -19.5t39 19.5t16.5 48.5v628q0 29 -16.5 48.5t-39 19.5t-42 -21.5t-19.5 -46.5v-628z" />
-<glyph unicode="c" horiz-adv-x="645" d="M66 315v490q0 332 264 332q137 0 201.5 -71t64.5 -251v-88h-207v135q0 51 -12 70.5t-47 19.5q-58 0 -58 -90v-604q0 -90 58 -90q35 0 47 19.5t12 70.5v156h207v-109q0 -180 -64.5 -250.5t-201.5 -70.5q-264 0 -264 331z" />
-<glyph unicode="d" horiz-adv-x="686" d="M74 203v715q0 82 41 150.5t118 68.5q33 0 74 -22.5t66 -45.5l24 -22v458h207v-1505h-207v74q-88 -90 -165 -90t-117.5 68.5t-40.5 150.5zM281 246q0 -29 16 -48.5t38.5 -19.5t42 19.5t19.5 48.5v628q0 25 -19.5 46.5t-42 21.5t-38.5 -19.5t-16 -48.5v-628z" />
-<glyph unicode="e" horiz-adv-x="659" d="M66 279v563q0 36 16 94.5t42 97.5t81 71t129 32q199 0 252 -197q14 -51 14 -92v-326h-342v-256q0 -59 39 -88q16 -12 37 -12q70 10 74 113v122h192v-129q0 -37 -16.5 -93t-41 -95t-79.5 -69.5t-130 -30.5t-130.5 30.5t-80.5 73.5q-49 87 -54 160zM258 684h150v158 q0 48 -19.5 81t-53.5 33t-53.5 -28.5t-21.5 -57.5l-2 -28v-158z" />
-<glyph unicode="f" horiz-adv-x="475" d="M20 934v186h105v31q0 190 51 270q23 35 71 63.5t115 28.5l97 -14v-178q-27 8 -62 8q-65 0 -65 -175v-5v-29h104v-186h-104v-934h-207v934h-105z" />
-<glyph unicode="g" horiz-adv-x="700" d="M12 -184q0 94 162 170q-125 35 -125 149q0 45 40 93t89 75q-51 35 -80.5 95.5t-34.5 105.5l-4 43v305q0 35 16.5 91t41 94t79 69t126.5 31q135 0 206 -103q102 102 170 103v-185q-72 0 -120 -24l10 -70v-317q0 -37 -17.5 -90.5t-42 -90t-79 -66.5t-104.5 -30t-62 2 q-29 -25 -29 -46t11 -33.5t42 -20.5t45.5 -10t65.5 -10.5t95 -21.5t89 -41q96 -60 96 -205t-103 -212q-100 -65 -250 -65h-9q-156 2 -240 50t-84 165zM213 -150q0 -77 132 -77h3q59 0 108.5 19t49.5 54t-20.5 52.5t-90.5 2 [...]
-<glyph unicode="h" horiz-adv-x="690" d="M82 0v1505h207v-479l32 32q79 79 145.5 79t106 -69t39.5 -150v-918h-206v887q-1 49 -50 49q-41 0 -67 -53v-883h-207z" />
-<glyph unicode="i" horiz-adv-x="370" d="M82 0v1120h207v-1120h-207zM82 1298v207h207v-207h-207z" />
-<glyph unicode="j" horiz-adv-x="364" d="M-45 -182q29 -8 57 -8q64 0 64 142v1168h207v-1149q0 -186 -51 -266q-23 -35 -71 -62.5t-115 -27.5t-91 12v191zM76 1298v207h207v-207h-207z" />
-<glyph unicode="k" horiz-adv-x="641" d="M82 0v1505h207v-714h10l113 329h186l-149 -364l188 -756h-199l-102 453l-4 16h-10l-33 -82v-387h-207z" />
-<glyph unicode="l" horiz-adv-x="370" d="M82 0v1505h207v-1505h-207z" />
-<glyph unicode="m" horiz-adv-x="1021" d="M82 0v1120h207v-94q2 0 33 30q80 81 139 81q100 0 139 -125q125 125 194.5 125t109.5 -69t40 -150v-918h-194v887q-1 49 -56 49q-41 0 -78 -53v-883h-194v887q0 49 -55 49q-41 0 -78 -53v-883h-207z" />
-<glyph unicode="n" horiz-adv-x="690" d="M82 0v1120h207v-94l32 32q79 79 145.5 79t106 -69t39.5 -150v-918h-206v887q-1 49 -50 49q-41 0 -67 -53v-883h-207z" />
-<glyph unicode="o" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM257 259q0 -17 9 -44q18 -49 62 -49q70 10 71 113v563l1 19q0 19 -10 45q-18 50 -62 50 q-68 -10 -70 -114v-563q1 -1 1 -4z" />
-<glyph unicode="p" horiz-adv-x="686" d="M82 -385v1505h207v-73q88 90 165 90t117.5 -69t40.5 -150v-715q0 -82 -41 -150.5t-118 -68.5q-33 0 -74 22.5t-66 44.5l-24 23v-459h-207zM289 246q0 -25 19.5 -46.5t42 -21.5t39 19.5t16.5 48.5v628q0 29 -16.5 48.5t-39 19.5t-42 -19.5t-19.5 -48.5v-628z" />
-<glyph unicode="q" horiz-adv-x="686" d="M74 203v715q0 82 41 150.5t118 68.5q33 0 74 -22.5t66 -45.5l24 -22v73h207v-1505h-207v459q-88 -90 -165 -90t-117.5 68.5t-40.5 150.5zM281 246q0 -29 16 -48.5t38.5 -19.5t42 21.5t19.5 46.5v628q0 29 -19.5 48.5t-42 19.5t-38.5 -19.5t-16 -48.5v-628z" />
-<glyph unicode="r" horiz-adv-x="503" d="M82 0v1120h207v-125q8 41 58.5 91.5t148.5 50.5v-230q-34 11 -77 11t-86.5 -39t-43.5 -101v-778h-207z" />
-<glyph unicode="s" horiz-adv-x="630" d="M37 326h192q0 -170 97 -170q71 0 71 131q0 78 -129 202q-68 66 -98.5 99t-64 101.5t-33.5 134t12 114.5t39 95q59 100 201 104h11q161 0 211 -105q42 -86 42 -198h-193q0 131 -67 131q-63 -2 -64 -131q0 -33 23.5 -73t45 -62.5t66.5 -65.5q190 -182 191 -342 q0 -123 -64.5 -215t-199.5 -92q-197 0 -260 170q-29 76 -29 172z" />
-<glyph unicode="t" horiz-adv-x="501" d="M20 934v186h105v277h207v-277h141v-186h-141v-557q0 -184 65 -184l76 8v-203q-45 -14 -112 -14t-114.5 28.5t-70 64.5t-34.5 96q-17 79 -17 187v574h-105z" />
-<glyph unicode="u" horiz-adv-x="690" d="M78 203v917h207v-887q0 -49 49 -49q41 0 67 54v882h207v-1120h-207v94l-31 -32q-78 -78 -145.5 -78t-107 68.5t-39.5 150.5z" />
-<glyph unicode="v" horiz-adv-x="602" d="M16 1120h201l68 -649l8 -72h16l76 721h201l-183 -1120h-204z" />
-<glyph unicode="w" horiz-adv-x="905" d="M20 1120h189l65 -585l9 -64h12l96 649h123l86 -585l10 -64h13l73 649h189l-166 -1120h-172l-80 535l-10 63h-8l-91 -598h-172z" />
-<glyph unicode="x" horiz-adv-x="618" d="M16 0l193 578l-176 542h194l74 -262l6 -31h4l6 31l74 262h195l-176 -542l192 -578h-201l-84 283l-6 30h-4l-6 -30l-84 -283h-201z" />
-<glyph unicode="y" horiz-adv-x="634" d="M25 1120h202l82 -688l4 -57h9l4 57l82 688h202l-198 -1204q-16 -127 -94 -222t-193 -95l-92 4v184q16 -4 49 -4q61 6 97 61.5t36 122.5z" />
-<glyph unicode="z" horiz-adv-x="532" d="M12 0v168l285 764h-240v188h459v-168l-285 -764h285v-188h-504z" />
-<glyph unicode="{" horiz-adv-x="688" d="M61 453v163q72 0 102 49.5t30 90.5v397q0 223 96 298t342 71v-172q-135 2 -188.5 -38t-53.5 -159v-397q0 -143 -127 -221q127 -82 127 -222v-397q0 -119 53.5 -159t188.5 -38v-172q-246 -4 -342 71t-96 298v397q0 57 -41 97.5t-91 42.5z" />
-<glyph unicode="|" horiz-adv-x="356" d="M82 -512v2204h192v-2204h-192z" />
-<glyph unicode="}" horiz-adv-x="688" d="M57 -281q135 -2 188.5 38t53.5 159v397q0 139 127 222q-127 78 -127 221v397q0 119 -53 159t-189 38v172q246 4 342.5 -71t96.5 -298v-397q0 -63 41 -101.5t90 -38.5v-163q-72 -4 -101.5 -52.5t-29.5 -87.5v-397q0 -223 -96.5 -298t-342.5 -71v172z" />
-<glyph unicode="~" horiz-adv-x="1280" d="M113 1352q35 106 115 200q34 41 94.5 74t121 33t116.5 -18.5t82 -33t83 -51.5q106 -72 174 -71q109 0 178 153l13 29l135 -57q-63 -189 -206 -276q-56 -34 -120 -34q-121 0 -272 101q-115 74 -178.5 74t-113.5 -45.5t-69 -90.5l-18 -45z" />
-<glyph unicode="&#xa1;" horiz-adv-x="387" d="M74 -385l55 1100h129l55 -1100h-239zM86 893v227h215v-227h-215z" />
-<glyph unicode="&#xa2;" horiz-adv-x="636" d="M66 508v489q0 297 208 328v242h123v-244q98 -16 144.5 -88t46.5 -227v-88h-189v135q0 90 -72.5 90t-72.5 -90v-604q0 -90 72 -91q74 0 73 91v155h189v-108q0 -156 -46 -228.5t-145 -89.5v-303h-123v301q-209 31 -208 330z" />
-<glyph unicode="&#xa3;" horiz-adv-x="817" d="M4 63q8 20 23.5 53.5t70 91.5t117.5 68q37 111 37 189t-31 184h-188v137h147l-6 21q-78 254 -78 333t15.5 140t48.5 116q72 122 231 126q190 4 267 -126q65 -108 65 -276h-213q0 201 -115 197q-47 -2 -68.5 -51t-21.5 -139.5t70 -315.5l6 -25h211v-137h-174 q25 -100 24.5 -189t-57.5 -204q16 -8 44 -24q59 -35 89 -35q74 4 82 190l188 -22q-12 -182 -81.5 -281.5t-169.5 -99.5q-51 0 -143.5 51t-127.5 51t-63.5 -25.5t-40.5 -52.5l-12 -24z" />
-<glyph unicode="&#xa5;" horiz-adv-x="720" d="M25 1505h217l110 -481l6 -14h4l7 14l110 481h217l-196 -753h147v-138h-176v-137h176v-137h-176v-340h-221v340h-176v137h176v137h-176v138h147z" />
-<glyph unicode="&#xa8;" horiz-adv-x="1024" d="M272 1305v200h191v-200h-191zM561 1305v200h191v-200h-191z" />
-<glyph unicode="&#xa9;" horiz-adv-x="1644" d="M53 751.5q0 317.5 225.5 544t543 226.5t543.5 -226.5t226 -544t-226 -542.5t-543.5 -225t-543 225t-225.5 542.5zM172 751.5q0 -266.5 191.5 -458t457.5 -191.5t459 191.5t193 459t-191.5 459t-459 191.5t-459 -192.5t-191.5 -459zM627 487v531q0 122 97 174q40 22 95 22 q147 0 182 -147l7 -49v-125h-138v142q0 11 -12 28.5t-37 17.5q-47 -2 -49 -63v-531q0 -63 49 -63q53 2 49 63v125h138v-125q0 -68 -40 -127q-18 -26 -57 -47.5t-108.5 -21.5t-117.5 49t-54 98z" />
-<glyph unicode="&#xaa;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM252 291 q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250z" />
-<glyph unicode="&#xad;" horiz-adv-x="444" d="M74 455v194h297v-194h-297z" />
-<glyph unicode="&#xae;" horiz-adv-x="1644" d="M53 751.5q0 317.5 225.5 544t543 226.5t543.5 -226.5t226 -544t-226 -542.5t-543.5 -225t-543 225t-225.5 542.5zM172 751.5q0 -266.5 191.5 -458t457.5 -191.5t459 191.5t193 459t-191.5 459t-459 191.5t-459 -192.5t-191.5 -459zM625 313v879h196q231 0 232 -258 q0 -76 -16.5 -125t-71.5 -96l106 -400h-151l-95 365h-55v-365h-145zM770 805h45q43 0 65.5 21.5t27.5 45t5 61.5t-5 62.5t-27.5 46t-65.5 21.5h-45v-258z" />
-<glyph unicode="&#xaf;" horiz-adv-x="1024" d="M313 1315v162h398v-162h-398z" />
-<glyph unicode="&#xb2;" horiz-adv-x="731" d="M55 0v219l39 62q25 39 88.5 152.5t112.5 220t91 241.5t44 238q0 184 -73.5 184t-73.5 -184v-105h-222v105q0 389 295 389t295 -375q0 -336 -346 -928h350v-219h-600z" />
-<glyph unicode="&#xb3;" horiz-adv-x="686" d="M45 1071q0 249 63 343q29 42 84.5 75t134.5 33t136 -31t84.5 -71t44.5 -92q22 -71 22 -130q0 -291 -108 -399q127 -100 127 -414q0 -68 -19.5 -145.5t-47 -128t-85 -89t-136.5 -38.5t-135 31.5t-86 75.5t-48 113q-23 91 -23 230h217q2 -150 17.5 -203t59.5 -53t56.5 50.5 t12.5 104.5t1 102t0 63q-6 82 -14 95l-18 33q-12 22 -29 29q-55 22 -108 25h-19v184q133 7 156 73q12 34 12 91v105q0 146 -29 177q-16 17 -40 17q-41 0 -52.5 -49t-13.5 -207h-217z" />
-<glyph unicode="&#xb4;" horiz-adv-x="1024" d="M410 1305l106 184h215l-162 -184h-159z" />
-<glyph unicode="&#xb7;" horiz-adv-x="215" d="M0 649v228h215v-228h-215z" />
-<glyph unicode="&#xb8;" horiz-adv-x="1024" d="M426 -111h172v-141l-45 -133h-104l40 133h-63v141z" />
-<glyph unicode="&#xb9;" horiz-adv-x="475" d="M25 1180v141q129 25 205 130q16 21 30 54h133v-1505h-221v1180h-147z" />
-<glyph unicode="&#xba;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM257 259q0 -17 9 -44q18 -49 62 -49q70 10 71 113v563l1 19q0 19 -10 45q-18 50 -62 50 q-68 -10 -70 -114v-563q1 -1 1 -4z" />
-<glyph unicode="&#xbf;" horiz-adv-x="645" d="M41 -106q0 82 80 219l57 95q18 32 42 106.5t24 144.5v256h190v-256q0 -102 -24.5 -195.5t-48 -140.5t-65.5 -118t-50 -104.5t9 -67.5t60 -35t78 48.5t49 98.5l179 -84q-24 -66 -78 -132q-104 -126 -236 -122q-163 4 -220 115q-46 90 -46 172zM231 893v227h215v-227h-215z " />
-<glyph unicode="&#xc0;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM141 1823h215l107 -185h-160zM307 541h152l-64 475l-6 39h-12z" />
-<glyph unicode="&#xc1;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM293 1638l106 185h215l-161 -185h-160zM307 541h152l-64 475l-6 39h-12z" />
-<glyph unicode="&#xc2;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM133 1638l141 185h220l141 -185h-189l-63 72l-61 -72h-189zM307 541h152l-64 475l-6 39h-12z" />
-<glyph unicode="&#xc3;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM184 1632v152q49 39 95.5 39t104.5 -18.5t100.5 -19.5t97.5 32v-152q-51 -39 -95.5 -39t-102.5 19.5t-98 19.5t-102 -33zM307 541h152l-64 475l-6 39h-12z" />
-<glyph unicode="&#xc4;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM143 1638v201h191v-201h-191zM307 541h152l-64 475l-6 39h-12zM432 1638v201h191v-201h-191z" />
-<glyph unicode="&#xc5;" horiz-adv-x="765" d="M20 0l228 1505h270l227 -1505h-215l-41 307h-213l-40 -307h-216zM231 1761.5q0 61.5 45.5 102.5t109 41t107.5 -41t44 -102.5t-44 -102.5t-107.5 -41t-109 41t-45.5 102.5zM307 541h152l-64 475l-6 39h-12zM309 1761.5q0 -28.5 23.5 -50t52.5 -21.5t52.5 21.5t23.5 50 t-23.5 50t-52.5 21.5t-52.5 -21.5t-23.5 -50z" />
-<glyph unicode="&#xc6;" horiz-adv-x="1099" d="M16 0l420 1505h623v-227h-285v-395h205v-242h-205v-414h285v-227h-506v307h-227l-90 -307h-220zM393 541h160v514h-10z" />
-<glyph unicode="&#xc7;" horiz-adv-x="708" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-207h-206v207q-2 0 0 11.5t-3.5 27.5t-12.5 33q-17 39 -68 39q-70 -10 -78 -111v-887q0 -43 21.5 -76.5t59.5 -33.5t59.5 27.5t21.5 56.5v233h206v-207q0 -42 -17 -106t-45 -107 t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM268 -111v-141h64l-41 -133h104l45 133v141h-172z" />
-<glyph unicode="&#xc8;" horiz-adv-x="628" d="M82 0v1505h506v-227h-285v-395h205v-242h-205v-414h285v-227h-506zM111 1823h215l106 -185h-160z" />
-<glyph unicode="&#xc9;" horiz-adv-x="628" d="M82 0v1505h506v-227h-285v-395h205v-242h-205v-414h285v-227h-506zM236 1638l106 185h215l-162 -185h-159z" />
-<glyph unicode="&#xca;" horiz-adv-x="628" d="M82 0v1505h506v-227h-285v-395h205v-242h-205v-414h285v-227h-506zM84 1638l141 185h219l142 -185h-189l-63 72l-62 -72h-188z" />
-<glyph unicode="&#xcb;" horiz-adv-x="628" d="M82 0v1505h506v-227h-285v-395h205v-242h-205v-414h285v-227h-506zM94 1638v201h191v-201h-191zM383 1638v201h190v-201h-190z" />
-<glyph unicode="&#xcc;" horiz-adv-x="401" d="M-6 1823h215l106 -185h-159zM98 0v1505h221v-1505h-221z" />
-<glyph unicode="&#xcd;" horiz-adv-x="401" d="M82 0v1505h221v-1505h-221zM86 1638l107 185h215l-162 -185h-160z" />
-<glyph unicode="&#xce;" horiz-adv-x="370" d="M-66 1638l142 185h219l141 -185h-188l-64 72l-61 -72h-189zM74 0v1505h221v-1505h-221z" />
-<glyph unicode="&#xcf;" horiz-adv-x="372" d="M-53 1638v201h190v-201h-190zM76 0v1505h221v-1505h-221zM236 1638v201h190v-201h-190z" />
-<glyph unicode="&#xd0;" horiz-adv-x="761" d="M20 655v228h62v622h174q270 0 346 -113q31 -46 50.5 -95.5t28.5 -139.5t12 -177t3 -228.5t-3 -228.5t-12 -176t-28.5 -138t-50.5 -95t-80 -68q-106 -46 -266 -46h-174v655h-62zM303 221q117 0 141.5 81t22.5 452q2 371 -22.5 450.5t-141.5 79.5v-401h84v-228h-84v-434z " />
-<glyph unicode="&#xd1;" horiz-adv-x="808" d="M82 0v1505h197l215 -784l18 -70h12v854h203v-1505h-197l-215 784l-18 70h-12v-854h-203zM207 1632v152q49 39 95 39t104.5 -18.5t102.5 -19.5t95 32v-152q-51 -39 -95 -39t-102.5 19.5t-100 19.5t-99.5 -33z" />
-<glyph unicode="&#xd2;" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM121 1823h215l106 -185h-159zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5 l2 26v887q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="&#xd3;" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM285 1638l106 185h215l-162 -185h-159zM289 309q0 -46 19.5 -78t54 -32t53 27.5 t18.5 56.5l2 26v887q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="&#xd4;" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM113 1638l141 185h219l141 -185h-188l-64 72l-61 -72h-188zM289 309q0 -46 19.5 -78 t54 -32t53 27.5t18.5 56.5l2 26v887q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="&#xd5;" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM164 1632v152q49 39 95 39t104.5 -18.5t102.5 -19.5t95 32v-152q-51 -39 -95 -39 t-102.5 19.5t-100 19.5t-99.5 -33zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5l2 26v887q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887z" />
-<glyph unicode="&#xd6;" d="M68 309v887q0 42 17 106t45 107t88.5 78t144 35t144 -34t88.5 -81q55 -93 60 -178l2 -33v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-144 -34.5t-144 33.5t-88.5 81.5q-55 94 -60 175zM123 1638v201h190v-201h-190zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5 l2 26v887q0 46 -19.5 78.5t-54 32.5t-53 -28t-18.5 -54l-2 -29v-887zM412 1638v201h190v-201h-190z" />
-<glyph unicode="&#xd8;" d="M59 -20l47 157q-36 74 -36 148l-2 24v887q0 42 17 106t45 107t88.5 78t148 35t153.5 -43l15 47h122l-45 -150q43 -84 43 -155l2 -25v-887q0 -42 -17 -106t-45 -107t-88.5 -77.5t-150.5 -34.5t-153 43l-15 -47h-129zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5 l2 26v488zM289 727l147 479q-8 100 -74 101q-35 0 -53 -28t-18 -54l-2 -29v-469z" />
-<glyph unicode="&#xd9;" horiz-adv-x="749" d="M80 309q0 -42 17.5 -106t45 -107t88 -77.5t144.5 -34.5t144.5 33.5t88.5 81.5q55 97 60 175l2 35v1196h-221v-1196q0 -44 -19.5 -77t-54.5 -33t-53.5 27.5t-18.5 56.5l-2 26v1196h-221v-1196zM145 1823h215l107 -185h-160z" />
-<glyph unicode="&#xda;" horiz-adv-x="749" d="M80 309q0 -42 17.5 -106t45 -107t88 -77.5t144.5 -34.5t144.5 33.5t88.5 81.5q55 97 60 175l2 35v1196h-221v-1196q0 -44 -19.5 -77t-54.5 -33t-53.5 27.5t-18.5 56.5l-2 26v1196h-221v-1196zM307 1638l107 185h215l-162 -185h-160z" />
-<glyph unicode="&#xdb;" horiz-adv-x="749" d="M80 309q0 -42 17.5 -106t45 -107t88 -77.5t144.5 -34.5t144.5 33.5t88.5 81.5q55 97 60 175l2 35v1196h-221v-1196q0 -44 -19.5 -77t-54.5 -33t-53.5 27.5t-18.5 56.5l-2 26v1196h-221v-1196zM125 1638l141 185h219l142 -185h-189l-63 72l-62 -72h-188z" />
-<glyph unicode="&#xdc;" horiz-adv-x="749" d="M80 309v1196h221v-1196q0 -46 19.5 -78t54.5 -32t53 27.5t18 56.5l3 26v1196h221v-1196q0 -42 -17.5 -106t-45 -107t-88 -77.5t-144.5 -34.5t-144.5 33.5t-88.5 81.5q-55 97 -60 175zM135 1638v201h191v-201h-191zM424 1638v201h190v-201h-190z" />
-<glyph unicode="&#xdd;" horiz-adv-x="704" d="M16 1505l226 -864v-641h221v641l225 864h-217l-111 -481l-6 -14h-4l-6 14l-111 481h-217zM254 1638l106 185h215l-161 -185h-160z" />
-<glyph unicode="&#xde;" d="M82 0v1505h219v-241h2q166 0 277.5 -105.5t111.5 -345.5t-111.5 -346.5t-277.5 -106.5v-360h-221zM303 586q102 0 134 45t32 175t-33 181t-133 51v-452z" />
-<glyph unicode="&#xdf;" horiz-adv-x="733" d="M66 0v1235q0 123 70.5 205t206.5 82t204.5 -81t68.5 -197t-88 -181q152 -88 152 -488q0 -362 -87 -475q-46 -59 -102.5 -79.5t-144.5 -20.5v193q45 0 70 25q57 57 57 357q0 316 -57 377q-25 27 -70 27v141q35 0 60.5 33t25.5 84q0 100 -86 100q-74 0 -74 -102v-1235h-206 z" />
-<glyph unicode="&#xe0;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM102 1489h215 l107 -184h-160zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250z" />
-<glyph unicode="&#xe1;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM252 291 q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250zM264 1305l107 184h215l-162 -184h-160z" />
-<glyph unicode="&#xe2;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM90 1305 l141 184h220l141 -184h-189l-63 71l-61 -71h-189zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250z" />
-<glyph unicode="&#xe3;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM143 1305v151 q49 39 95.5 39t104.5 -18.5t97 -19.5t101 32v-152q-51 -39 -95.5 -39t-102.5 19.5t-99 19.5t-101 -32zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250z" />
-<glyph unicode="&#xe4;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM102 1305v200 h191v-200h-191zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250zM391 1305v200h191v-200h-191z" />
-<glyph unicode="&#xe5;" horiz-adv-x="681" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t190 88t184.5 -74t75 -180v-688q0 -109 14 -195h-202q-18 20 -19 90h-14q-20 -37 -65.5 -71.5t-102.5 -34.5t-110.5 60t-53.5 191zM188 1421.5 q0 61.5 45.5 102.5t109 41t107.5 -41t44 -102.5t-44 -102.5t-107.5 -41t-109 41t-45.5 102.5zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250zM266 1421.5q0 -28.5 23.5 -50t52.5 -21. [...]
-<glyph unicode="&#xe6;" horiz-adv-x="989" d="M49 235q0 131 34 212t83 124t98 73t88 50.5t43 36.5v123q0 102 -57 102q-41 0 -50 -42t-9 -84v-39h-207v47q0 123 80.5 211t197.5 88q84 0 152 -52q66 51 162 52q199 0 251 -197q14 -51 15 -92v-326h-342v-256q0 -60 38 -88q17 -12 38 -12q70 10 73 113v122h193v-129 q0 -37 -16.5 -93t-41 -95t-80 -69.5t-130.5 -30.5q-158 0 -226 131q-102 -131 -221 -131q-59 0 -112.5 60t-53.5 191zM252 291q0 -104 57 -105q35 0 60.5 19.5t25.5 48.5v287q-143 -62 -143 -250zM588 684h149v158q [...]
-<glyph unicode="&#xe7;" horiz-adv-x="645" d="M66 315v490q0 332 264 332q137 0 201.5 -71t64.5 -251v-88h-207v135q0 51 -12 70.5t-47 19.5q-58 0 -58 -90v-604q0 -90 58 -90q35 0 47 19.5t12 70.5v156h207v-109q0 -180 -64.5 -250.5t-201.5 -70.5q-264 0 -264 331zM238 -111v-141h63l-41 -133h105l45 133v141h-172z " />
-<glyph unicode="&#xe8;" horiz-adv-x="659" d="M66 279v563q0 36 16 94.5t42 97.5t81 71t129 32q199 0 252 -197q14 -51 14 -92v-326h-342v-256q0 -59 39 -88q16 -12 37 -12q70 10 74 113v122h192v-129q0 -37 -16.5 -93t-41 -95t-79.5 -69.5t-130 -30.5t-130.5 30.5t-80.5 73.5q-49 87 -54 160zM102 1489h215l107 -184 h-160zM258 684h150v158q0 48 -19.5 81t-53.5 33t-53.5 -28.5t-21.5 -57.5l-2 -28v-158z" />
-<glyph unicode="&#xe9;" horiz-adv-x="659" d="M66 279v563q0 36 16 94.5t42 97.5t81 71t129 32q199 0 252 -197q14 -51 14 -92v-326h-342v-256q0 -59 39 -88q16 -12 37 -12q70 10 74 113v122h192v-129q0 -37 -16.5 -93t-41 -95t-79.5 -69.5t-130 -30.5t-130.5 30.5t-80.5 73.5q-49 87 -54 160zM258 684h150v158 q0 48 -19.5 81t-53.5 33t-53.5 -28.5t-21.5 -57.5l-2 -28v-158zM264 1305l107 184h215l-162 -184h-160z" />
-<glyph unicode="&#xea;" horiz-adv-x="659" d="M66 279v563q0 36 16 94.5t42 97.5t81 71t129 32q199 0 252 -197q14 -51 14 -92v-326h-342v-256q0 -59 39 -88q16 -12 37 -12q70 10 74 113v122h192v-129q0 -37 -16.5 -93t-41 -95t-79.5 -69.5t-130 -30.5t-130.5 30.5t-80.5 73.5q-49 87 -54 160zM80 1305l141 184h219 l142 -184h-189l-63 71l-62 -71h-188zM258 684h150v158q0 48 -19.5 81t-53.5 33t-53.5 -28.5t-21.5 -57.5l-2 -28v-158z" />
-<glyph unicode="&#xeb;" horiz-adv-x="659" d="M66 279v563q0 36 16 94.5t42 97.5t81 71t129 32q199 0 252 -197q14 -51 14 -92v-326h-342v-256q0 -59 39 -88q16 -12 37 -12q70 10 74 113v122h192v-129q0 -37 -16.5 -93t-41 -95t-79.5 -69.5t-130 -30.5t-130.5 30.5t-80.5 73.5q-49 87 -54 160zM90 1305v200h191v-200 h-191zM258 684h150v158q0 48 -19.5 81t-53.5 33t-53.5 -28.5t-21.5 -57.5l-2 -28v-158zM379 1305v200h190v-200h-190z" />
-<glyph unicode="&#xec;" horiz-adv-x="370" d="M-33 1489h215l107 -184h-160zM82 0h207v1120h-207v-1120z" />
-<glyph unicode="&#xed;" horiz-adv-x="370" d="M82 0h207v1120h-207v-1120zM82 1305l106 184h215l-161 -184h-160z" />
-<glyph unicode="&#xee;" horiz-adv-x="370" d="M-66 1305l142 184h219l141 -184h-188l-64 71l-61 -71h-189zM82 0h207v1120h-207v-1120z" />
-<glyph unicode="&#xef;" horiz-adv-x="372" d="M-53 1305v200h190v-200h-190zM82 0v1120h207v-1120h-207zM236 1305v200h190v-200h-190z" />
-<glyph unicode="&#xf0;" horiz-adv-x="673" d="M76 279v579q0 279 172 279q63 0 155 -78q-12 109 -51 203l-82 -72l-55 63l100 88l-45 66l109 100q25 -27 53 -61l94 82l56 -66l-101 -88q125 -201 125 -446v-656q0 -102 -56 -188q-26 -39 -80 -69.5t-129 -30.5t-130 30.5t-80 73.5q-53 91 -53 160zM270 267.5 q-2 -11.5 2 -29t10 -34.5q16 -38 58 -38q70 10 72 113v563q-2 0 0 11t-2 28.5t-10 34.5q-16 40 -60 40q-68 -10 -70 -114v-563q2 0 0 -11.5z" />
-<glyph unicode="&#xf1;" horiz-adv-x="690" d="M82 0v1120h207v-94l32 32q79 79 145.5 79t106 -69t39.5 -150v-918h-206v887q-1 49 -50 49q-41 0 -67 -53v-883h-207zM147 1305v151q49 39 95.5 39t105 -18.5t97 -19.5t100.5 32v-152q-51 -39 -95.5 -39t-102.5 19.5t-99 19.5t-101 -32z" />
-<glyph unicode="&#xf2;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM98 1489h215l107 -184h-160zM258 267.5q-2 -11.5 2 -29t10 -34.5q14 -38 58 -38 q70 10 71 113v563q-2 0 0 11t-2 28.5t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5z" />
-<glyph unicode="&#xf3;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM258 267.5q-2 -11.5 2 -29t10 -34.5q14 -38 58 -38q70 10 71 113v563q-2 0 0 11t-2 28.5 t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5zM260 1305l107 184h215l-162 -184h-160z" />
-<glyph unicode="&#xf4;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM78 1305l141 184h219l142 -184h-189l-63 71l-62 -71h-188zM258 267.5q-2 -11.5 2 -29 t10 -34.5q14 -38 58 -38q70 10 71 113v563q-2 0 0 11t-2 28.5t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5z" />
-<glyph unicode="&#xf5;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM131 1305v151q49 39 95.5 39t104.5 -18.5t98.5 -19.5t98.5 32v-152q-51 -39 -95 -39 t-102 19.5t-101 19.5t-99 -32zM258 267.5q-2 -11.5 2 -29t10 -34.5q14 -38 58 -38q70 10 71 113v563q-2 0 0 11t-2 28.5t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5z" />
-<glyph unicode="&#xf6;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t129.5 32q199 0 252 -197q14 -51 14 -92v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-129 -30.5t-130 30.5t-80.5 73.5q-52 92 -52 160zM90 1305v200h191v-200h-191zM258 267.5q-2 -11.5 2 -29t10 -34.5q14 -38 58 -38 q70 10 71 113v563q-2 0 0 11t-2 28.5t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5zM379 1305v200h190v-200h-190z" />
-<glyph unicode="&#xf8;" horiz-adv-x="657" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t118 32t117.5 -19l21 80h75l-30 -121q88 -84 94 -229v-576q0 -102 -56 -188q-26 -39 -80.5 -69.5t-120.5 -30.5t-112 16l-20 -78h-80l31 121q-41 39 -64.5 97.5t-25.5 97.5zM258 436l125 486q-18 35 -55 34q-68 -10 -70 -114 v-406zM274 197q17 -31 54 -31q70 10 71 113v403z" />
-<glyph unicode="&#xf9;" horiz-adv-x="690" d="M78 203v917h207v-887q0 -49 49 -49q41 0 67 54v882h207v-1120h-207v94l-31 -32q-78 -78 -145.5 -78t-107 68.5t-39.5 150.5zM113 1489h215l106 -184h-160z" />
-<glyph unicode="&#xfa;" horiz-adv-x="690" d="M78 203v917h207v-887q0 -49 49 -49q41 0 67 54v882h207v-1120h-207v94l-31 -32q-78 -78 -145.5 -78t-107 68.5t-39.5 150.5zM274 1305l107 184h215l-162 -184h-160z" />
-<glyph unicode="&#xfb;" horiz-adv-x="690" d="M78 203v917h207v-887q0 -49 49 -49q41 0 67 54v882h207v-1120h-207v94l-31 -32q-78 -78 -145.5 -78t-107 68.5t-39.5 150.5zM94 1305l142 184h219l141 -184h-188l-64 71l-61 -71h-189z" />
-<glyph unicode="&#xfc;" horiz-adv-x="690" d="M78 203v917h207v-887q0 -49 49 -49q41 0 67 54v882h207v-1120h-207v94l-31 -32q-78 -78 -145.5 -78t-107 68.5t-39.5 150.5zM106 1305v200h191v-200h-191zM395 1305v200h191v-200h-191z" />
-<glyph unicode="&#xfd;" horiz-adv-x="634" d="M25 1120l190 -1153q0 -68 -36 -123t-97 -61l-49 4v-184q70 -4 92 -4q115 0 192.5 95t94.5 222l198 1204h-202l-82 -688l-4 -57h-9l-4 57l-82 688h-202zM231 1305l107 184h215l-162 -184h-160z" />
-<glyph unicode="&#xfe;" horiz-adv-x="686" d="M82 -385v1890h207v-458q88 90 165 90t117.5 -69t40.5 -150v-715q0 -82 -41 -150.5t-118 -68.5q-33 0 -74 22.5t-66 44.5l-24 23v-459h-207zM289 246q0 -25 19.5 -46.5t42 -21.5t39 19.5t16.5 48.5v628q0 29 -16.5 48.5t-39 19.5t-42 -19.5t-19.5 -48.5v-628z" />
-<glyph unicode="&#xff;" horiz-adv-x="634" d="M25 1120h202l82 -688l4 -57h9l4 57l82 688h202l-198 -1204q-16 -127 -94 -222t-193 -95l-92 4v184q16 -4 49 -4q61 6 97 61.5t36 122.5zM78 1305v200h190v-200h-190zM367 1305v200h190v-200h-190z" />
-<glyph unicode="&#x152;" horiz-adv-x="983" d="M68 309v887q0 41 17 101.5t45 100.5t88.5 73.5t143.5 33.5h580v-227h-285v-395h205v-242h-205v-414h285v-227h-580q-84 0 -144 31.5t-88 78.5q-55 91 -60 169zM289 309q0 -46 19.5 -78t54 -32t53 27.5t18.5 56.5l2 26v901q-6 96 -74 97q-35 0 -53 -28t-18 -54l-2 -29 v-887z" />
-<glyph unicode="&#x153;" horiz-adv-x="995" d="M63 279v563q0 40 15.5 96.5t40 95.5t80 71t145.5 32t156 -60q66 59 170 60q199 0 252 -197q14 -51 14 -92v-326h-342v-250q0 -46 22.5 -76t53.5 -30q70 10 73 113v122h193v-129q0 -37 -16.5 -93t-41 -95t-80 -69.5t-146 -30.5t-154.5 57q-68 -57 -156 -57t-143.5 30.5 t-80.5 73.5q-52 92 -52 160zM258 267.5q-2 -11.5 2 -29t10 -34.5q14 -38 58 -38q70 10 71 113v563q-2 0 0 11t-2 28.5t-10 34.5q-15 40 -59 40q-68 -10 -70 -114v-563q2 0 0 -11.5zM594 684h149v158q0 48 -19 81t [...]
-<glyph unicode="&#x178;" horiz-adv-x="704" d="M16 1505h217l111 -481l6 -14h4l6 14l111 481h217l-225 -864v-641h-221v641zM113 1638v201h190v-201h-190zM401 1638v201h191v-201h-191z" />
-<glyph unicode="&#x2c6;" horiz-adv-x="1021" d="M260 1305l141 184h220l141 -184h-189l-63 71l-61 -71h-189z" />
-<glyph unicode="&#x2dc;" horiz-adv-x="1024" d="M313 1305v151q49 39 95.5 39t104.5 -18.5t97 -19.5t101 32v-152q-51 -39 -95.5 -39t-102.5 19.5t-99 19.5t-101 -32z" />
-<glyph unicode="&#x2000;" horiz-adv-x="952" />
-<glyph unicode="&#x2001;" horiz-adv-x="1905" />
-<glyph unicode="&#x2002;" horiz-adv-x="952" />
-<glyph unicode="&#x2003;" horiz-adv-x="1905" />
-<glyph unicode="&#x2004;" horiz-adv-x="635" />
-<glyph unicode="&#x2005;" horiz-adv-x="476" />
-<glyph unicode="&#x2006;" horiz-adv-x="317" />
-<glyph unicode="&#x2007;" horiz-adv-x="317" />
-<glyph unicode="&#x2008;" horiz-adv-x="238" />
-<glyph unicode="&#x2009;" horiz-adv-x="381" />
-<glyph unicode="&#x200a;" horiz-adv-x="105" />
-<glyph unicode="&#x2010;" horiz-adv-x="444" d="M74 455v194h297v-194h-297z" />
-<glyph unicode="&#x2011;" horiz-adv-x="444" d="M74 455v194h297v-194h-297z" />
-<glyph unicode="&#x2012;" horiz-adv-x="444" d="M74 455v194h297v-194h-297z" />
-<glyph unicode="&#x2013;" horiz-adv-x="806" d="M74 649v195h659v-195h-659z" />
-<glyph unicode="&#x2014;" horiz-adv-x="972" d="M74 649v195h825v-195h-825z" />
-<glyph unicode="&#x2018;" horiz-adv-x="309" d="M49 1012v227l113 266h102l-71 -266h71v-227h-215z" />
-<glyph unicode="&#x2019;" horiz-adv-x="309" d="M45 1012l72 266h-72v227h215v-227l-113 -266h-102z" />
-<glyph unicode="&#x201a;" horiz-adv-x="309" d="M45 0v227h215v-227l-113 -266h-102l72 266h-72z" />
-<glyph unicode="&#x201c;" horiz-adv-x="624" d="M53 1012v227l113 266h102l-71 -266h71v-227h-215zM356 1012v227l113 266h102l-71 -266h71v-227h-215z" />
-<glyph unicode="&#x201d;" horiz-adv-x="624" d="M53 1012l72 266h-72v227h215v-227l-112 -266h-103zM356 1012l72 266h-72v227h215v-227l-112 -266h-103z" />
-<glyph unicode="&#x201e;" horiz-adv-x="624" d="M53 0v227h215v-227l-112 -266h-103l72 266h-72zM356 0v227h215v-227l-112 -266h-103l72 266h-72z" />
-<glyph unicode="&#x2022;" horiz-adv-x="663" d="M82 815q0 104 72.5 177t177 73t177.5 -72.5t73 -177t-73 -177.5t-177 -73t-177 73t-73 177z" />
-<glyph unicode="&#x2026;" horiz-adv-x="964" d="M53 0v227h215v-227h-215zM375 0v227h215v-227h-215zM696 0v227h215v-227h-215z" />
-<glyph unicode="&#x202f;" horiz-adv-x="381" />
-<glyph unicode="&#x2039;" horiz-adv-x="1058" d="M74 649v160l911 475v-199l-698 -356l698 -356v-199z" />
-<glyph unicode="&#x203a;" horiz-adv-x="1058" d="M74 174v199l698 356l-698 356v199l911 -475v-160z" />
-<glyph unicode="&#x205f;" horiz-adv-x="476" />
-<glyph unicode="&#x20ac;" horiz-adv-x="813" d="M53 547v137h107v137h-107v137h107v238q0 42 17.5 106t45 107t88 78t144.5 35t144 -34t88 -81q53 -90 61 -178l2 -33v-84h-207v84q-2 0 0 11.5t-3 27.5t-12 33q-18 39 -69 39q-70 -10 -78 -111v-238h233v-137h-233v-137h233v-137h-233v-238q0 -43 21.5 -76.5t59.5 -33.5 t58.5 27.5t20.5 56.5l2 26v84h207v-84q0 -38 -17.5 -104t-45.5 -109t-88 -77.5t-144 -34.5t-144.5 33.5t-88.5 81.5q-55 97 -60 175l-2 35v238h-107z" />
-<glyph unicode="&#x2122;" horiz-adv-x="937" d="M74 1401v104h321v-104h-104v-580h-113v580h-104zM440 821v684h138l67 -319h6l68 319h137v-684h-104v449l-78 -449h-51l-80 449v-449h-103z" />
-<glyph unicode="&#xe000;" horiz-adv-x="1120" d="M0 0v1120h1120v-1120h-1120z" />
-<glyph unicode="&#xfb01;" horiz-adv-x="772" d="M20 934v186h105v31q0 172 31 231q16 31 42 67q53 71 181 71q59 0 127 -13l20 -2v-184q-41 12 -91 12t-69.5 -18.5t-25.5 -58.5q-8 -52 -8 -107v-29h358v-1120h-207v934h-151v-934h-207v934h-105z" />
-<glyph unicode="&#xfb02;" horiz-adv-x="772" d="M20 934v186h105v31q0 172 31 231q16 31 42 67q53 71 181 71q59 0 127 -13l20 -2h164v-1505h-207v1329q-37 4 -67.5 4t-50 -18.5t-25.5 -58.5q-8 -52 -8 -107v-29h104v-186h-104v-934h-207v934h-105z" />
-<glyph unicode="&#xfb03;" horiz-adv-x="1320" d="M20 934v186h105v31q0 190 51 270q23 35 71 63.5t115 28.5l97 -14v-178q-27 8 -62 8q-66 0 -65 -180v-29h104v-186h-104v-934h-207v934h-105zM495 934v186h105v31q0 190 51 270q23 35 71 63.5t115 28.5l97 -14v-178q-27 8 -62 8q-66 0 -65 -180v-29h104v-186h-104v-934 h-207v934h-105zM1032 0v1120h207v-1120h-207zM1032 1298v207h207v-207h-207z" />
-<glyph unicode="&#xfb04;" horiz-adv-x="1320" d="M20 934v186h105v31q0 190 51 270q23 35 71 63.5t115 28.5l97 -14v-178q-27 8 -62 8q-66 0 -65 -180v-29h104v-186h-104v-934h-207v934h-105zM495 934v186h105v31q0 190 51 270q23 35 71 63.5t115 28.5l97 -14v-178q-27 8 -62 8q-66 0 -65 -180v-29h104v-186h-104v-934 h-207v934h-105zM1032 0v1505h207v-1505h-207z" />
-</font>
-</defs></svg> 
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.ttf b/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.ttf
deleted file mode 100644
index 29f896a..0000000
Binary files a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.ttf and /dev/null differ
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.woff b/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.woff
deleted file mode 100644
index 71117fb..0000000
Binary files a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic-webfont.woff and /dev/null differ
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic_license b/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic_license
deleted file mode 100644
index 29513e9..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/font/league_gothic_license
+++ /dev/null
@@ -1,2 +0,0 @@
-SIL Open Font License (OFL)
-http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=OFL
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/js/classList.js b/uflacs-merge-into-ffc/doc/roadmap/lib/js/classList.js
deleted file mode 100644
index 44f2b4c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/js/classList.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! @source http://purl.eligrey.com/github/classList.js/blob/master/classList.js*/
-if(typeof document!=="undefined"&&!("classList" in document.createElement("a"))){(function(j){var a="classList",f="prototype",m=(j.HTMLElement||j.Element)[f],b=Object,k=String[f].trim||function(){return this.replace(/^\s+|\s+$/g,"")},c=Array[f].indexOf||function(q){var p=0,o=this.length;for(;p<o;p++){if(p in this&&this[p]===q){return p}}return -1},n=function(o,p){this.name=o;this.code=DOMException[o];this.message=p},g=function(p,o){if(o===""){throw new n("SYNTAX_ERR","An invalid or illeg [...]
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/js/head.min.js b/uflacs-merge-into-ffc/doc/roadmap/lib/js/head.min.js
deleted file mode 100644
index 6242b0f..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/js/head.min.js
+++ /dev/null
@@ -1,8 +0,0 @@
-/**
-    Head JS     The only script in your <HEAD>
-    Copyright   Tero Piirainen (tipiirai)
-    License     MIT / http://bit.ly/mit-license
-    Version     0.96
-
-    http://headjs.com
-*/(function(a){function z(){d||(d=!0,s(e,function(a){p(a)}))}function y(c,d){var e=a.createElement("script");e.type="text/"+(c.type||"javascript"),e.src=c.src||c,e.async=!1,e.onreadystatechange=e.onload=function(){var a=e.readyState;!d.done&&(!a||/loaded|complete/.test(a))&&(d.done=!0,d())},(a.body||b).appendChild(e)}function x(a,b){if(a.state==o)return b&&b();if(a.state==n)return k.ready(a.name,b);if(a.state==m)return a.onpreload.push(function(){x(a,b)});a.state=n,y(a.url,function(){a.s [...]
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/lib/js/html5shiv.js b/uflacs-merge-into-ffc/doc/roadmap/lib/js/html5shiv.js
deleted file mode 100644
index 50649b9..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/lib/js/html5shiv.js
+++ /dev/null
@@ -1,7 +0,0 @@
-document.createElement('header');
-document.createElement('nav');
-document.createElement('section');
-document.createElement('article');
-document.createElement('aside');
-document.createElement('footer');
-document.createElement('hgroup');
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/highlight/highlight.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/highlight/highlight.js
deleted file mode 100644
index 3e6b894..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/highlight/highlight.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// START CUSTOM REVEAL.JS INTEGRATION
-(function() {
-	if( typeof window.addEventListener === 'function' ) {
-		var hljs_nodes = document.querySelectorAll( 'pre code' );
-
-		for( var i = 0, len = hljs_nodes.length; i < len; i++ ) {
-			var element = hljs_nodes[i];
-
-			// trim whitespace if data-trim attribute is present
-			if( element.hasAttribute( 'data-trim' ) && typeof element.innerHTML.trim === 'function' ) {
-				element.innerHTML = element.innerHTML.trim();
-			}
-
-			// Now escape html unless prevented by author
-			if( ! element.hasAttribute( 'data-noescape' )) {
-				element.innerHTML = element.innerHTML.replace(/</g,"<").replace(/>/g,">");
-			}
-
-			// re-highlight when focus is lost (for edited code)
-			element.addEventListener( 'focusout', function( event ) {
-				hljs.highlightBlock( event.currentTarget );
-			}, false );
-		}
-	}
-})();
-// END CUSTOM REVEAL.JS INTEGRATION
-
-// highlight.js build includes support for:
-// All languages in master + fsharp
-
-
-var hljs=new function(){function l(o){return o.replace(/&/gm,"&").replace(/</gm,"<").replace(/>/gm,">")}function b(p){for(var o=p.firstChild;o;o=o.nextSibling){if(o.nodeName=="CODE"){return o}if(!(o.nodeType==3&&o.nodeValue.match(/\s+/))){break}}}function h(p,o){return Array.prototype.map.call(p.childNodes,function(q){if(q.nodeType==3){return o?q.nodeValue.replace(/\n/g,""):q.nodeValue}if(q.nodeName=="BR"){return"\n"}return h(q,o)}).join("")}function a(q){var p=(q.className+" " [...]
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/leap/leap.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/leap/leap.js
deleted file mode 100644
index 9d5271a..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/leap/leap.js
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2013, Leap Motion, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR P [...]
- *
- * Version 0.2.0 - http://js.leapmotion.com/0.2.0/leap.min.js
- * Grab latest versions from http://js.leapmotion.com/
- */
-
-!function(e,t,n){function i(n,s){if(!t[n]){if(!e[n]){var o=typeof require=="function"&&require;if(!s&&o)return o(n,!0);if(r)return r(n,!0);throw new Error("Cannot find module '"+n+"'")}var u=t[n]={exports:{}};e[n][0].call(u.exports,function(t){var r=e[n][1][t];return i(r?r:t)},u,u.exports)}return t[n].exports}var r=typeof require=="function"&&require;for(var s=0;s<n.length;s++)i(n[s]);return i}({1:[function(require,module,exports){var chooseProtocol=require("./protocol").chooseProtocol,E [...]
-out[1]=Math.min(a[1],b[1]);return out};vec2.max=function(out,a,b){out[0]=Math.max(a[0],b[0]);out[1]=Math.max(a[1],b[1]);return out};vec2.scale=function(out,a,b){out[0]=a[0]*b;out[1]=a[1]*b;return out};vec2.dist=vec2.distance=function(a,b){var x=b[0]-a[0],y=b[1]-a[1];return Math.sqrt(x*x+y*y)};vec2.sqrDist=vec2.squaredDistance=function(a,b){var x=b[0]-a[0],y=b[1]-a[1];return x*x+y*y};vec2.len=vec2.length=function(a){var x=a[0],y=a[1];return Math.sqrt(x*x+y*y)};vec2.sqrLen=vec2.squaredLeng [...]
-var b=right.criteria;if(a!==b){if(a>b||a===void 0)return 1;if(a<b||b===void 0)return-1}return left.index<right.index?-1:1}),"value")};var group=function(obj,value,context,behavior){var result={};var iterator=lookupIterator(value||_.identity);each(obj,function(value,index){var key=iterator.call(context,value,index,obj);behavior(result,key,value)});return result};_.groupBy=function(obj,value,context){return group(obj,value,context,function(result,key,value){(_.has(result,key)?result[key]:r [...]
-
-/*
- * Leap Motion integration for Reveal.js.
- * James Sun  [sun16]
- * Rory Hardy [gneatgeek]
- */
-
-(function () {
-  var body        = document.body,
-      controller  = new Leap.Controller({ enableGestures: true }),
-      lastGesture = 0,
-      leapConfig  = Reveal.getConfig().leap,
-      pointer     = document.createElement( 'div' ),
-      config      = {
-        autoCenter       : true,      // Center pointer around detected position.
-        gestureDelay     : 500,       // How long to delay between gestures.
-        naturalSwipe     : true,      // Swipe as if it were a touch screen.
-        pointerColor     : '#00aaff', // Default color of the pointer.
-        pointerOpacity   : 0.7,       // Default opacity of the pointer.
-        pointerSize      : 15,        // Default minimum height/width of the pointer.
-        pointerTolerance : 120        // Bigger = slower pointer.
-      },
-      entered, enteredPosition, now, size, tipPosition; // Other vars we need later, but don't need to redeclare.
-
-      // Merge user defined settings with defaults
-      if( leapConfig ) {
-        for( key in leapConfig ) {
-          config[key] = leapConfig[key];
-        }
-      }
-
-      pointer.id = 'leap';
-
-      pointer.style.position        = 'absolute';
-      pointer.style.visibility      = 'hidden';
-      pointer.style.zIndex          = 50;
-      pointer.style.opacity         = config.pointerOpacity;
-      pointer.style.backgroundColor = config.pointerColor;
-
-      body.appendChild( pointer );
-
-  // Leap's loop
-  controller.on( 'frame', function ( frame ) {
-    // Timing code to rate limit gesture execution
-    now = new Date().getTime();
-
-    // Pointer: 1 to 2 fingers. Strictly one finger works but may cause innaccuracies.
-    // The innaccuracies were observed on a development model and may not be an issue with consumer models.
-    if( frame.fingers.length > 0 && frame.fingers.length < 3 ) {
-      // Invert direction and multiply by 3 for greater effect.
-      size = -3 * frame.fingers[0].tipPosition[2];
-
-      if( size < config.pointerSize ) {
-        size = config.pointerSize;
-      }
-
-      pointer.style.width        = size     + 'px';
-      pointer.style.height       = size     + 'px';
-      pointer.style.borderRadius = size - 5 + 'px';
-      pointer.style.visibility   = 'visible';
-
-      if( config.autoCenter ) {
-        tipPosition = frame.fingers[0].tipPosition;
-
-        // Check whether the finger has entered the z range of the Leap Motion. Used for the autoCenter option.
-        if( !entered ) {
-          entered         = true;
-          enteredPosition = frame.fingers[0].tipPosition;
-        }
-
-        pointer.style.top =
-          (-1 * (( tipPosition[1] - enteredPosition[1] ) * body.offsetHeight / config.pointerTolerance )) +
-            ( body.offsetHeight / 2 ) + 'px';
-
-        pointer.style.left =
-          (( tipPosition[0] - enteredPosition[0] ) * body.offsetWidth / config.pointerTolerance ) +
-            ( body.offsetWidth / 2 ) + 'px';
-      }
-      else {
-        pointer.style.top  = ( 1 - (( tipPosition[1] - 50) / config.pointerTolerance )) *
-          body.offsetHeight + 'px';
-
-        pointer.style.left = ( tipPosition[0] * body.offsetWidth / config.pointerTolerance ) +
-          ( body.offsetWidth / 2 ) + 'px';
-      }
-    }
-    else {
-      // Hide pointer on exit
-      entered                  = false;
-      pointer.style.visibility = 'hidden';
-    }
-
-    // Gestures
-    if( frame.gestures.length > 0 && (now - lastGesture) > config.gestureDelay ) {
-      var gesture = frame.gestures[0];
-
-      // One hand gestures
-      if( frame.hands.length === 1 ) {
-        // Swipe gestures. 3+ fingers.
-        if( frame.fingers.length > 2 && gesture.type === 'swipe' ) {
-          // Define here since some gestures will throw undefined for these.
-          var x = gesture.direction[0],
-              y = gesture.direction[1];
-
-          // Left/right swipe gestures
-          if( Math.abs( x ) > Math.abs( y )) {
-            if( x > 0 ) {
-              config.naturalSwipe ? Reveal.left() : Reveal.right();
-            }
-            else {
-              config.naturalSwipe ? Reveal.right() : Reveal.left();
-            }
-          }
-          // Up/down swipe gestures
-          else {
-            if( y > 0 ) {
-              config.naturalSwipe ? Reveal.down() : Reveal.up();
-            }
-            else {
-              config.naturalSwipe ? Reveal.up() : Reveal.down();
-            }
-          }
-
-          lastGesture = now;
-        }
-      }
-      // Two hand gestures
-      else if( frame.hands.length === 2 ) {
-        // Upward two hand swipe gesture
-        if( gesture.direction[1] > 0 && gesture.type === 'swipe' ) {
-          Reveal.toggleOverview();
-        }
-
-        lastGesture = now;
-      }
-    }
-  });
-
-  controller.connect();
-})();
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.html b/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.html
deleted file mode 100644
index 909639f..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.html
+++ /dev/null
@@ -1,129 +0,0 @@
-<!doctype html>
-<html lang="en">
-
-	<head>
-		<meta charset="utf-8">
-
-		<title>reveal.js - Markdown Demo</title>
-
-		<link rel="stylesheet" href="../../css/reveal.css">
-		<link rel="stylesheet" href="../../css/theme/default.css" id="theme">
-
-        <link rel="stylesheet" href="../../lib/css/zenburn.css">
-	</head>
-
-	<body>
-
-		<div class="reveal">
-
-			<div class="slides">
-
-                <!-- Use external markdown resource, separate slides by three newlines; vertical slides by two newlines -->
-                <section data-markdown="example.md" data-separator="^\n\n\n" data-vertical="^\n\n"></section>
-
-                <!-- Slides are separated by three dashes (quick 'n dirty regular expression) -->
-                <section data-markdown data-separator="---">
-                    <script type="text/template">
-                        ## Demo 1
-                        Slide 1
-                        ---
-                        ## Demo 1
-                        Slide 2
-                        ---
-                        ## Demo 1
-                        Slide 3
-                    </script>
-                </section>
-
-                <!-- Slides are separated by newline + three dashes + newline, vertical slides identical but two dashes -->
-                <section data-markdown data-separator="^\n---\n$" data-vertical="^\n--\n$">
-                    <script type="text/template">
-                        ## Demo 2
-                        Slide 1.1
-
-                        --
-
-                        ## Demo 2
-                        Slide 1.2
-
-                        ---
-
-                        ## Demo 2
-                        Slide 2
-                    </script>
-                </section>
-
-                <!-- No "extra" slides, since there are no separators defined (so they'll become horizontal rulers) -->
-                <section data-markdown>
-                    <script type="text/template">
-                        A
-
-                        ---
-
-                        B
-
-                        ---
-
-                        C
-                    </script>
-                </section>
-
-                <!-- Slide attributes -->
-                <section data-markdown>
-                    <script type="text/template">
-                        <!-- .slide: data-background="#000000" -->
-                        ## Slide attributes
-                    </script>
-                </section>
-
-                <!-- Element attributes -->
-                <section data-markdown>
-                    <script type="text/template">
-                        ## Element attributes
-                        - Item 1 <!-- .element: class="fragment" data-fragment-index="2" -->
-                        - Item 2 <!-- .element: class="fragment" data-fragment-index="1" -->
-                    </script>
-                </section>
-
-                <!-- Code -->
-                <section data-markdown>
-                    <script type="text/template">
-                        ```php
-                        public function foo()
-                        {
-                            $foo = array(
-                                'bar' => 'bar'
-                            )
-                        }
-                        ```
-                    </script>
-                </section>
-
-            </div>
-		</div>
-
-		<script src="../../lib/js/head.min.js"></script>
-		<script src="../../js/reveal.js"></script>
-
-		<script>
-
-			Reveal.initialize({
-				controls: true,
-				progress: true,
-				history: true,
-				center: true,
-
-				// Optional libraries used to extend on reveal.js
-				dependencies: [
-					{ src: '../../lib/js/classList.js', condition: function() { return !document.body.classList; } },
-					{ src: 'marked.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
-                    { src: 'markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
-                    { src: '../highlight/highlight.js', async: true, callback: function() { hljs.initHighlightingOnLoad(); } },
-					{ src: '../notes/notes.js' }
-				]
-			});
-
-		</script>
-
-	</body>
-</html>
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.md b/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.md
deleted file mode 100644
index 6f6f577..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/example.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Markdown Demo
-
-
-
-## External 1.1
-
-Content 1.1
-
-Note: This will only appear in the speaker notes window.
-
-
-## External 1.2
-
-Content 1.2
-
-
-
-## External 2
-
-Content 2.1
-
-
-
-## External 3.1
-
-Content 3.1
-
-
-## External 3.2
-
-Content 3.2
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/markdown.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/markdown.js
deleted file mode 100755
index 19aea28..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/markdown.js
+++ /dev/null
@@ -1,392 +0,0 @@
-/**
- * The reveal.js markdown plugin. Handles parsing of
- * markdown inside of presentations as well as loading
- * of external markdown documents.
- */
-(function( root, factory ) {
-	if( typeof exports === 'object' ) {
-		module.exports = factory( require( './marked' ) );
-	}
-	else {
-		// Browser globals (root is window)
-		root.RevealMarkdown = factory( root.marked );
-		root.RevealMarkdown.initialize();
-	}
-}( this, function( marked ) {
-
-	if( typeof marked === 'undefined' ) {
-		throw 'The reveal.js Markdown plugin requires marked to be loaded';
-	}
-
-	if( typeof hljs !== 'undefined' ) {
-		marked.setOptions({
-			highlight: function( lang, code ) {
-				return hljs.highlightAuto( lang, code ).value;
-			}
-		});
-	}
-
-	var DEFAULT_SLIDE_SEPARATOR = '^\n---\n$',
-		DEFAULT_NOTES_SEPARATOR = 'note:',
-		DEFAULT_ELEMENT_ATTRIBUTES_SEPARATOR = '\\\.element\\\s*?(.+?)$',
-		DEFAULT_SLIDE_ATTRIBUTES_SEPARATOR = '\\\.slide:\\\s*?(\\\S.+?)$';
-
-
-	/**
-	 * Retrieves the markdown contents of a slide section
-	 * element. Normalizes leading tabs/whitespace.
-	 */
-	function getMarkdownFromSlide( section ) {
-
-		var template = section.querySelector( 'script' );
-
-		// strip leading whitespace so it isn't evaluated as code
-		var text = ( template || section ).textContent;
-
-		var leadingWs = text.match( /^\n?(\s*)/ )[1].length,
-			leadingTabs = text.match( /^\n?(\t*)/ )[1].length;
-
-		if( leadingTabs > 0 ) {
-			text = text.replace( new RegExp('\\n?\\t{' + leadingTabs + '}','g'), '\n' );
-		}
-		else if( leadingWs > 1 ) {
-			text = text.replace( new RegExp('\\n? {' + leadingWs + '}','g'), '\n' );
-		}
-
-		return text;
-
-	}
-
-	/**
-	 * Given a markdown slide section element, this will
-	 * return all arguments that aren't related to markdown
-	 * parsing. Used to forward any other user-defined arguments
-	 * to the output markdown slide.
-	 */
-	function getForwardedAttributes( section ) {
-
-		var attributes = section.attributes;
-		var result = [];
-
-		for( var i = 0, len = attributes.length; i < len; i++ ) {
-			var name = attributes[i].name,
-				value = attributes[i].value;
-
-			// disregard attributes that are used for markdown loading/parsing
-			if( /data\-(markdown|separator|vertical|notes)/gi.test( name ) ) continue;
-
-			if( value ) {
-				result.push( name + '=' + value );
-			}
-			else {
-				result.push( name );
-			}
-		}
-
-		return result.join( ' ' );
-
-	}
-
-	/**
-	 * Inspects the given options and fills out default
-	 * values for what's not defined.
-	 */
-	function getSlidifyOptions( options ) {
-
-		options = options || {};
-		options.separator = options.separator || DEFAULT_SLIDE_SEPARATOR;
-		options.notesSeparator = options.notesSeparator || DEFAULT_NOTES_SEPARATOR;
-		options.attributes = options.attributes || '';
-
-		return options;
-
-	}
-
-	/**
-	 * Helper function for constructing a markdown slide.
-	 */
-	function createMarkdownSlide( content, options ) {
-
-		options = getSlidifyOptions( options );
-
-		var notesMatch = content.split( new RegExp( options.notesSeparator, 'mgi' ) );
-
-		if( notesMatch.length === 2 ) {
-			content = notesMatch[0] + '<aside class="notes" data-markdown>' + notesMatch[1].trim() + '</aside>';
-		}
-
-		return '<script type="text/template">' + content + '</script>';
-
-	}
-
-	/**
-	 * Parses a data string into multiple slides based
-	 * on the passed in separator arguments.
-	 */
-	function slidify( markdown, options ) {
-
-		options = getSlidifyOptions( options );
-
-		var separatorRegex = new RegExp( options.separator + ( options.verticalSeparator ? '|' + options.verticalSeparator : '' ), 'mg' ),
-			horizontalSeparatorRegex = new RegExp( options.separator );
-
-		var matches,
-			lastIndex = 0,
-			isHorizontal,
-			wasHorizontal = true,
-			content,
-			sectionStack = [];
-
-		// iterate until all blocks between separators are stacked up
-		while( matches = separatorRegex.exec( markdown ) ) {
-			notes = null;
-
-			// determine direction (horizontal by default)
-			isHorizontal = horizontalSeparatorRegex.test( matches[0] );
-
-			if( !isHorizontal && wasHorizontal ) {
-				// create vertical stack
-				sectionStack.push( [] );
-			}
-
-			// pluck slide content from markdown input
-			content = markdown.substring( lastIndex, matches.index );
-
-			if( isHorizontal && wasHorizontal ) {
-				// add to horizontal stack
-				sectionStack.push( content );
-			}
-			else {
-				// add to vertical stack
-				sectionStack[sectionStack.length-1].push( content );
-			}
-
-			lastIndex = separatorRegex.lastIndex;
-			wasHorizontal = isHorizontal;
-		}
-
-		// add the remaining slide
-		( wasHorizontal ? sectionStack : sectionStack[sectionStack.length-1] ).push( markdown.substring( lastIndex ) );
-
-		var markdownSections = '';
-
-		// flatten the hierarchical stack, and insert <section data-markdown> tags
-		for( var i = 0, len = sectionStack.length; i < len; i++ ) {
-			// vertical
-			if( sectionStack[i] instanceof Array ) {
-				markdownSections += '<section '+ options.attributes +'>';
-
-				sectionStack[i].forEach( function( child ) {
-					markdownSections += '<section data-markdown>' +  createMarkdownSlide( child, options ) + '</section>';
-				} );
-
-				markdownSections += '</section>';
-			}
-			else {
-				markdownSections += '<section '+ options.attributes +' data-markdown>' + createMarkdownSlide( sectionStack[i], options ) + '</section>';
-			}
-		}
-
-		return markdownSections;
-
-	}
-
-	/**
-	 * Parses any current data-markdown slides, splits
-	 * multi-slide markdown into separate sections and
-	 * handles loading of external markdown.
-	 */
-	function processSlides() {
-
-		var sections = document.querySelectorAll( '[data-markdown]'),
-			section;
-
-		for( var i = 0, len = sections.length; i < len; i++ ) {
-
-			section = sections[i];
-
-			if( section.getAttribute( 'data-markdown' ).length ) {
-
-				var xhr = new XMLHttpRequest(),
-					url = section.getAttribute( 'data-markdown' );
-
-				datacharset = section.getAttribute( 'data-charset' );
-
-				// see https://developer.mozilla.org/en-US/docs/Web/API/element.getAttribute#Notes
-				if( datacharset != null && datacharset != '' ) {
-					xhr.overrideMimeType( 'text/html; charset=' + datacharset );
-				}
-
-				xhr.onreadystatechange = function() {
-					if( xhr.readyState === 4 ) {
-						if ( xhr.status >= 200 && xhr.status < 300 ) {
-
-							section.outerHTML = slidify( xhr.responseText, {
-								separator: section.getAttribute( 'data-separator' ),
-								verticalSeparator: section.getAttribute( 'data-vertical' ),
-								notesSeparator: section.getAttribute( 'data-notes' ),
-								attributes: getForwardedAttributes( section )
-							});
-
-						}
-						else {
-
-							section.outerHTML = '<section data-state="alert">' +
-								'ERROR: The attempt to fetch ' + url + ' failed with HTTP status ' + xhr.status + '.' +
-								'Check your browser\'s JavaScript console for more details.' +
-								'<p>Remember that you need to serve the presentation HTML from a HTTP server.</p>' +
-								'</section>';
-
-						}
-					}
-				};
-
-				xhr.open( 'GET', url, false );
-
-				try {
-					xhr.send();
-				}
-				catch ( e ) {
-					alert( 'Failed to get the Markdown file ' + url + '. Make sure that the presentation and the file are served by a HTTP server and the file can be found there. ' + e );
-				}
-
-			}
-			else if( section.getAttribute( 'data-separator' ) || section.getAttribute( 'data-vertical' ) || section.getAttribute( 'data-notes' ) ) {
-
-				section.outerHTML = slidify( getMarkdownFromSlide( section ), {
-					separator: section.getAttribute( 'data-separator' ),
-					verticalSeparator: section.getAttribute( 'data-vertical' ),
-					notesSeparator: section.getAttribute( 'data-notes' ),
-					attributes: getForwardedAttributes( section )
-				});
-
-			}
-			else {
-				section.innerHTML = createMarkdownSlide( getMarkdownFromSlide( section ) );
-			}
-		}
-
-	}
-
-	/**
-	 * Check if a node value has the attributes pattern.
-	 * If yes, extract it and add that value as one or several attributes
-	 * the the terget element.
-	 *
-	 * You need Cache Killer on Chrome to see the effect on any FOM transformation
-	 * directly on refresh (F5)
-	 * http://stackoverflow.com/questions/5690269/disabling-chrome-cache-for-website-development/7000899#answer-11786277
-	 */
-	function addAttributeInElement( node, elementTarget, separator ) {
-
-		var mardownClassesInElementsRegex = new RegExp( separator, 'mg' );
-		var mardownClassRegex = new RegExp( "([^\"= ]+?)=\"([^\"=]+?)\"", 'mg' );
-		var nodeValue = node.nodeValue;
-		if( matches = mardownClassesInElementsRegex.exec( nodeValue ) ) {
-
-			var classes = matches[1];
-			nodeValue = nodeValue.substring( 0, matches.index ) + nodeValue.substring( mardownClassesInElementsRegex.lastIndex );
-			node.nodeValue = nodeValue;
-			while( matchesClass = mardownClassRegex.exec( classes ) ) {
-				elementTarget.setAttribute( matchesClass[1], matchesClass[2] );
-			}
-			return true;
-		}
-		return false;
-	}
-
-	/**
-	 * Add attributes to the parent element of a text node,
-	 * or the element of an attribute node.
-	 */
-	function addAttributes( section, element, previousElement, separatorElementAttributes, separatorSectionAttributes ) {
-
-		if ( element != null && element.childNodes != undefined && element.childNodes.length > 0 ) {
-			previousParentElement = element;
-			for( var i = 0; i < element.childNodes.length; i++ ) {
-				childElement = element.childNodes[i];
-				if ( i > 0 ) {
-					j = i - 1;
-					while ( j >= 0 ) {
-						aPreviousChildElement = element.childNodes[j];
-						if ( typeof aPreviousChildElement.setAttribute == 'function' && aPreviousChildElement.tagName != "BR" ) {
-							previousParentElement = aPreviousChildElement;
-							break;
-						}
-						j = j - 1;
-					}
-				}
-				parentSection = section;
-				if( childElement.nodeName ==  "section" ) {
-					parentSection = childElement ;
-					previousParentElement = childElement ;
-				}
-				if ( typeof childElement.setAttribute == 'function' || childElement.nodeType == Node.COMMENT_NODE ) {
-					addAttributes( parentSection, childElement, previousParentElement, separatorElementAttributes, separatorSectionAttributes );
-				}
-			}
-		}
-
-		if ( element.nodeType == Node.COMMENT_NODE ) {
-			if ( addAttributeInElement( element, previousElement, separatorElementAttributes ) == false ) {
-				addAttributeInElement( element, section, separatorSectionAttributes );
-			}
-		}
-	}
-
-	/**
-	 * Converts any current data-markdown slides in the
-	 * DOM to HTML.
-	 */
-	function convertSlides() {
-
-		var sections = document.querySelectorAll( '[data-markdown]');
-
-		for( var i = 0, len = sections.length; i < len; i++ ) {
-
-			var section = sections[i];
-
-			// Only parse the same slide once
-			if( !section.getAttribute( 'data-markdown-parsed' ) ) {
-
-				section.setAttribute( 'data-markdown-parsed', true )
-
-				var notes = section.querySelector( 'aside.notes' );
-				var markdown = getMarkdownFromSlide( section );
-
-				section.innerHTML = marked( markdown );
-				addAttributes( 	section, section, null, section.getAttribute( 'data-element-attributes' ) ||
-								section.parentNode.getAttribute( 'data-element-attributes' ) ||
-								DEFAULT_ELEMENT_ATTRIBUTES_SEPARATOR,
-								section.getAttribute( 'data-attributes' ) ||
-								section.parentNode.getAttribute( 'data-attributes' ) ||
-								DEFAULT_SLIDE_ATTRIBUTES_SEPARATOR);
-
-				// If there were notes, we need to re-add them after
-				// having overwritten the section's HTML
-				if( notes ) {
-					section.appendChild( notes );
-				}
-
-			}
-
-		}
-
-	}
-
-	// API
-	return {
-
-		initialize: function() {
-			processSlides();
-			convertSlides();
-		},
-
-		// TODO: Do these belong in the API?
-		processSlides: processSlides,
-		convertSlides: convertSlides,
-		slidify: slidify
-
-	};
-
-}));
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/marked.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/marked.js
deleted file mode 100644
index ca558fb..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/markdown/marked.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * marked - a markdown parser
- * Copyright (c) 2011-2013, Christopher Jeffrey. (MIT Licensed)
- * https://github.com/chjj/marked
- */
-
-(function(){var block={newline:/^\n+/,code:/^( {4}[^\n]+\n*)+/,fences:noop,hr:/^( *[-*_]){3,} *(?:\n+|$)/,heading:/^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)/,nptable:noop,lheading:/^([^\n]+)\n *(=|-){3,} *\n*/,blockquote:/^( *>[^\n]+(\n[^\n]+)*\n*)+/,list:/^( *)(bull) [\s\S]+?(?:hr|\n{2,}(?! )(?!\1bull )\n*|\s*$)/,html:/^ *(?:comment|closed|closing) *(?:\n{2,}|\s*$)/,def:/^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)/,table:noop,paragraph:/^((?:[^\n]+\n?(?!hr|heading|lhea [...]
-text:/^[^\n]+/};block.bullet=/(?:[*+-]|\d+\.)/;block.item=/^( *)(bull) [^\n]*(?:\n(?!\1bull )[^\n]*)*/;block.item=replace(block.item,"gm")(/bull/g,block.bullet)();block.list=replace(block.list)(/bull/g,block.bullet)("hr",/\n+(?=(?: *[-*_]){3,} *(?:\n+|$))/)();block._tag="(?!(?:"+"a|em|strong|small|s|cite|q|dfn|abbr|data|time|code"+"|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo"+"|span|br|wbr|ins|del|img)\\b)\\w+(?!:/|@)\\b";block.html=replace(block.html)("comment",/\x3c!--[\s\S]*?- [...]
-/<(tag)[\s\S]+?<\/\1>/)("closing",/<tag(?:"[^"]*"|'[^']*'|[^'">])*?>/)(/tag/g,block._tag)();block.paragraph=replace(block.paragraph)("hr",block.hr)("heading",block.heading)("lheading",block.lheading)("blockquote",block.blockquote)("tag","<"+block._tag)("def",block.def)();block.normal=merge({},block);block.gfm=merge({},block.normal,{fences:/^ *(`{3,}|~{3,}) *(\S+)? *\n([\s\S]+?)\s*\1 *(?:\n+|$)/,paragraph:/^/});block.gfm.paragraph=replace(block.paragraph)("(?!","(?!"+block.gfm.fences.sour [...]
-"\\2")+"|")();block.tables=merge({},block.gfm,{nptable:/^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*/,table:/^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*/});function Lexer(options){this.tokens=[];this.tokens.links={};this.options=options||marked.defaults;this.rules=block.normal;if(this.options.gfm)if(this.options.tables)this.rules=block.tables;else this.rules=block.gfm}Lexer.rules=block;Lexer.lex=function(src,options){var lexer=new Lexer(options);return lexe [...]
-Lexer.prototype.lex=function(src){src=src.replace(/\r\n|\r/g,"\n").replace(/\t/g,"    ").replace(/\u00a0/g," ").replace(/\u2424/g,"\n");return this.token(src,true)};Lexer.prototype.token=function(src,top){var src=src.replace(/^ +$/gm,""),next,loose,cap,bull,b,item,space,i,l;while(src){if(cap=this.rules.newline.exec(src)){src=src.substring(cap[0].length);if(cap[0].length>1)this.tokens.push({type:"space"})}if(cap=this.rules.code.exec(src)){src=src.substring(cap[0].length);cap=cap[0].replac [...]
-"");this.tokens.push({type:"code",text:!this.options.pedantic?cap.replace(/\n+$/,""):cap});continue}if(cap=this.rules.fences.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"code",lang:cap[2],text:cap[3]});continue}if(cap=this.rules.heading.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"heading",depth:cap[1].length,text:cap[2]});continue}if(top&&(cap=this.rules.nptable.exec(src))){src=src.substring(cap[0].length);item={type:"table",header:cap[1].re [...]
-"").split(/ *\| */),align:cap[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:cap[3].replace(/\n$/,"").split("\n")};for(i=0;i<item.align.length;i++)if(/^ *-+: *$/.test(item.align[i]))item.align[i]="right";else if(/^ *:-+: *$/.test(item.align[i]))item.align[i]="center";else if(/^ *:-+ *$/.test(item.align[i]))item.align[i]="left";else item.align[i]=null;for(i=0;i<item.cells.length;i++)item.cells[i]=item.cells[i].split(/ *\| */);this.tokens.push(item);continue}if(cap=this.rules.lheading.e [...]
-src.substring(cap[0].length);this.tokens.push({type:"heading",depth:cap[2]==="="?1:2,text:cap[1]});continue}if(cap=this.rules.hr.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"hr"});continue}if(cap=this.rules.blockquote.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"blockquote_start"});cap=cap[0].replace(/^ *> ?/gm,"");this.token(cap,top);this.tokens.push({type:"blockquote_end"});continue}if(cap=this.rules.list.exec(src)){src=src.substring(cap[0] [...]
-bull=cap[2];this.tokens.push({type:"list_start",ordered:bull.length>1});cap=cap[0].match(this.rules.item);next=false;l=cap.length;i=0;for(;i<l;i++){item=cap[i];space=item.length;item=item.replace(/^ *([*+-]|\d+\.) +/,"");if(~item.indexOf("\n ")){space-=item.length;item=!this.options.pedantic?item.replace(new RegExp("^ {1,"+space+"}","gm"),""):item.replace(/^ {1,4}/gm,"")}if(this.options.smartLists&&i!==l-1){b=block.bullet.exec(cap[i+1])[0];if(bull!==b&&!(bull.length>1&&b.length>1)){src=c [...]
-1).join("\n")+src;i=l-1}}loose=next||/\n\n(?!\s*$)/.test(item);if(i!==l-1){next=item[item.length-1]==="\n";if(!loose)loose=next}this.tokens.push({type:loose?"loose_item_start":"list_item_start"});this.token(item,false);this.tokens.push({type:"list_item_end"})}this.tokens.push({type:"list_end"});continue}if(cap=this.rules.html.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:this.options.sanitize?"paragraph":"html",pre:cap[1]==="pre"||cap[1]==="script",text:cap[0]});cont [...]
-(cap=this.rules.def.exec(src))){src=src.substring(cap[0].length);this.tokens.links[cap[1].toLowerCase()]={href:cap[2],title:cap[3]};continue}if(top&&(cap=this.rules.table.exec(src))){src=src.substring(cap[0].length);item={type:"table",header:cap[1].replace(/^ *| *\| *$/g,"").split(/ *\| */),align:cap[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:cap[3].replace(/(?: *\| *)?\n$/,"").split("\n")};for(i=0;i<item.align.length;i++)if(/^ *-+: *$/.test(item.align[i]))item.align[i]="right";el [...]
-"center";else if(/^ *:-+ *$/.test(item.align[i]))item.align[i]="left";else item.align[i]=null;for(i=0;i<item.cells.length;i++)item.cells[i]=item.cells[i].replace(/^ *\| *| *\| *$/g,"").split(/ *\| */);this.tokens.push(item);continue}if(top&&(cap=this.rules.paragraph.exec(src))){src=src.substring(cap[0].length);this.tokens.push({type:"paragraph",text:cap[1][cap[1].length-1]==="\n"?cap[1].slice(0,-1):cap[1]});continue}if(cap=this.rules.text.exec(src)){src=src.substring(cap[0].length);this. [...]
-text:cap[0]});continue}if(src)throw new Error("Infinite loop on byte: "+src.charCodeAt(0));}return this.tokens};var inline={escape:/^\\([\\`*{}\[\]()#+\-.!_>])/,autolink:/^<([^ >]+(@|:\/)[^ >]+)>/,url:noop,tag:/^\x3c!--[\s\S]*?--\x3e|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>/,link:/^!?\[(inside)\]\(href\)/,reflink:/^!?\[(inside)\]\s*\[([^\]]*)\]/,nolink:/^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]/,strong:/^__([\s\S]+?)__(?!_)|^\*\*([\s\S]+?)\*\*(?!\*)/,em:/^\b_((?:__|[\s\S])+?)_\b|^\*((?:\*\*|[\s\S])+ [...]
-code:/^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)/,br:/^ {2,}\n(?!\s*$)/,del:noop,text:/^[\s\S]+?(?=[\\<!\[_*`]| {2,}\n|$)/};inline._inside=/(?:\[[^\]]*\]|[^\]]|\](?=[^\[]*\]))*/;inline._href=/\s*<?([^\s]*?)>?(?:\s+['"]([\s\S]*?)['"])?\s*/;inline.link=replace(inline.link)("inside",inline._inside)("href",inline._href)();inline.reflink=replace(inline.reflink)("inside",inline._inside)();inline.normal=merge({},inline);inline.pedantic=merge({},inline.normal,{strong:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?= [...]
-em:/^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/});inline.gfm=merge({},inline.normal,{escape:replace(inline.escape)("])","~|])")(),url:/^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])/,del:/^~~(?=\S)([\s\S]*?\S)~~/,text:replace(inline.text)("]|","~]|")("|","|https?://|")()});inline.breaks=merge({},inline.gfm,{br:replace(inline.br)("{2,}","*")(),text:replace(inline.gfm.text)("{2,}","*")()});function InlineLexer(links,options){this.options=options||marked.defaults;this.links=links;this.r [...]
-if(!this.links)throw new Error("Tokens array requires a `links` property.");if(this.options.gfm)if(this.options.breaks)this.rules=inline.breaks;else this.rules=inline.gfm;else if(this.options.pedantic)this.rules=inline.pedantic}InlineLexer.rules=inline;InlineLexer.output=function(src,links,options){var inline=new InlineLexer(links,options);return inline.output(src)};InlineLexer.prototype.output=function(src){var out="",link,text,href,cap;while(src){if(cap=this.rules.escape.exec(src)){src [...]
-out+=cap[1];continue}if(cap=this.rules.autolink.exec(src)){src=src.substring(cap[0].length);if(cap[2]==="@"){text=cap[1][6]===":"?this.mangle(cap[1].substring(7)):this.mangle(cap[1]);href=this.mangle("mailto:")+text}else{text=escape(cap[1]);href=text}out+='<a href="'+href+'">'+text+"</a>";continue}if(cap=this.rules.url.exec(src)){src=src.substring(cap[0].length);text=escape(cap[1]);href=text;out+='<a href="'+href+'">'+text+"</a>";continue}if(cap=this.rules.tag.exec(src)){src=src.substrin [...]
-out+=this.options.sanitize?escape(cap[0]):cap[0];continue}if(cap=this.rules.link.exec(src)){src=src.substring(cap[0].length);out+=this.outputLink(cap,{href:cap[2],title:cap[3]});continue}if((cap=this.rules.reflink.exec(src))||(cap=this.rules.nolink.exec(src))){src=src.substring(cap[0].length);link=(cap[2]||cap[1]).replace(/\s+/g," ");link=this.links[link.toLowerCase()];if(!link||!link.href){out+=cap[0][0];src=cap[0].substring(1)+src;continue}out+=this.outputLink(cap,link);continue}if(cap [...]
-src.substring(cap[0].length);out+="<strong>"+this.output(cap[2]||cap[1])+"</strong>";continue}if(cap=this.rules.em.exec(src)){src=src.substring(cap[0].length);out+="<em>"+this.output(cap[2]||cap[1])+"</em>";continue}if(cap=this.rules.code.exec(src)){src=src.substring(cap[0].length);out+="<code>"+escape(cap[2],true)+"</code>";continue}if(cap=this.rules.br.exec(src)){src=src.substring(cap[0].length);out+="<br>";continue}if(cap=this.rules.del.exec(src)){src=src.substring(cap[0].length);out+ [...]
-this.output(cap[1])+"</del>";continue}if(cap=this.rules.text.exec(src)){src=src.substring(cap[0].length);out+=escape(cap[0]);continue}if(src)throw new Error("Infinite loop on byte: "+src.charCodeAt(0));}return out};InlineLexer.prototype.outputLink=function(cap,link){if(cap[0][0]!=="!")return'<a href="'+escape(link.href)+'"'+(link.title?' title="'+escape(link.title)+'"':"")+">"+this.output(cap[1])+"</a>";else return'<img src="'+escape(link.href)+'" alt="'+escape(cap[1])+'"'+(link.title?'  [...]
-escape(link.title)+'"':"")+">"};InlineLexer.prototype.smartypants=function(text){if(!this.options.smartypants)return text;return text.replace(/--/g,"\u2014").replace(/'([^']*)'/g,"\u2018$1\u2019").replace(/"([^"]*)"/g,"\u201c$1\u201d").replace(/\.{3}/g,"\u2026")};InlineLexer.prototype.mangle=function(text){var out="",l=text.length,i=0,ch;for(;i<l;i++){ch=text.charCodeAt(i);if(Math.random()>0.5)ch="x"+ch.toString(16);out+="&#"+ch+";"}return out};function Parser(options){this.tokens=[];thi [...]
-this.options=options||marked.defaults}Parser.parse=function(src,options){var parser=new Parser(options);return parser.parse(src)};Parser.prototype.parse=function(src){this.inline=new InlineLexer(src.links,this.options);this.tokens=src.reverse();var out="";while(this.next())out+=this.tok();return out};Parser.prototype.next=function(){return this.token=this.tokens.pop()};Parser.prototype.peek=function(){return this.tokens[this.tokens.length-1]||0};Parser.prototype.parseText=function(){var  [...]
-while(this.peek().type==="text")body+="\n"+this.next().text;return this.inline.output(body)};Parser.prototype.tok=function(){switch(this.token.type){case "space":return"";case "hr":return"<hr>\n";case "heading":return"<h"+this.token.depth+">"+this.inline.output(this.token.text)+"</h"+this.token.depth+">\n";case "code":if(this.options.highlight){var code=this.options.highlight(this.token.text,this.token.lang);if(code!=null&&code!==this.token.text){this.token.escaped=true;this.token.text=c [...]
-escape(this.token.text,true);return"<pre><code"+(this.token.lang?' class="'+this.options.langPrefix+this.token.lang+'"':"")+">"+this.token.text+"</code></pre>\n";case "table":var body="",heading,i,row,cell,j;body+="<thead>\n<tr>\n";for(i=0;i<this.token.header.length;i++){heading=this.inline.output(this.token.header[i]);body+=this.token.align[i]?'<th align="'+this.token.align[i]+'">'+heading+"</th>\n":"<th>"+heading+"</th>\n"}body+="</tr>\n</thead>\n";body+="<tbody>\n";for(i=0;i<this.toke [...]
-this.token.cells[i];body+="<tr>\n";for(j=0;j<row.length;j++){cell=this.inline.output(row[j]);body+=this.token.align[j]?'<td align="'+this.token.align[j]+'">'+cell+"</td>\n":"<td>"+cell+"</td>\n"}body+="</tr>\n"}body+="</tbody>\n";return"<table>\n"+body+"</table>\n";case "blockquote_start":var body="";while(this.next().type!=="blockquote_end")body+=this.tok();return"<blockquote>\n"+body+"</blockquote>\n";case "list_start":var type=this.token.ordered?"ol":"ul",body="";while(this.next().typ [...]
-this.tok();return"<"+type+">\n"+body+"</"+type+">\n";case "list_item_start":var body="";while(this.next().type!=="list_item_end")body+=this.token.type==="text"?this.parseText():this.tok();return"<li>"+body+"</li>\n";case "loose_item_start":var body="";while(this.next().type!=="list_item_end")body+=this.tok();return"<li>"+body+"</li>\n";case "html":return!this.token.pre&&!this.options.pedantic?this.inline.output(this.token.text):this.token.text;case "paragraph":return"<p>"+this.inline.out [...]
-"</p>\n";case "text":return"<p>"+this.parseText()+"</p>\n"}};function escape(html,encode){return html.replace(!encode?/&(?!#?\w+;)/g:/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/'/g,"'")}function replace(regex,opt){regex=regex.source;opt=opt||"";return function self(name,val){if(!name)return new RegExp(regex,opt);val=val.source||val;val=val.replace(/(^|[^\[])\^/g,"$1");regex=regex.replace(name,val);return self}}function noop(){}noop.exec=noo [...]
-1,target,key;for(;i<arguments.length;i++){target=arguments[i];for(key in target)if(Object.prototype.hasOwnProperty.call(target,key))obj[key]=target[key]}return obj}function marked(src,opt,callback){if(callback||typeof opt==="function"){if(!callback){callback=opt;opt=null}if(opt)opt=merge({},marked.defaults,opt);var tokens=Lexer.lex(tokens,opt),highlight=opt.highlight,pending=0,l=tokens.length,i=0;if(!highlight||highlight.length<3)return callback(null,Parser.parse(tokens,opt));var done=fu [...]
-var out=Parser.parse(tokens,opt);opt.highlight=highlight;return callback(null,out)};for(;i<l;i++)(function(token){if(token.type!=="code")return;pending++;return highlight(token.text,token.lang,function(err,code){if(code==null||code===token.text)return--pending||done();token.text=code;token.escaped=true;--pending||done()})})(tokens[i]);return}try{if(opt)opt=merge({},marked.defaults,opt);return Parser.parse(Lexer.lex(src,opt),opt)}catch(e){e.message+="\nPlease report this to https://github [...]
-if((opt||marked.defaults).silent)return"<p>An error occured:</p><pre>"+escape(e.message+"",true)+"</pre>";throw e;}}marked.options=marked.setOptions=function(opt){merge(marked.defaults,opt);return marked};marked.defaults={gfm:true,tables:true,breaks:false,pedantic:false,sanitize:false,smartLists:false,silent:false,highlight:null,langPrefix:""};marked.Parser=Parser;marked.parser=Parser.parse;marked.Lexer=Lexer;marked.lexer=Lexer.lex;marked.InlineLexer=InlineLexer;marked.inlineLexer=Inline [...]
-marked.parse=marked;if(typeof exports==="object")module.exports=marked;else if(typeof define==="function"&&define.amd)define(function(){return marked});else this.marked=marked}).call(function(){return this||(typeof window!=="undefined"?window:global)}());
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/math/math.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/math/math.js
deleted file mode 100755
index d55d9d1..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/math/math.js
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * A plugin which enables rendering of math equations inside
- * of reveal.js slides. Essentially a thin wrapper for MathJax.
- *
- * @author Hakim El Hattab
- */
-var RevealMath = window.RevealMath || (function(){
-
-	var options = Reveal.getConfig().math || {};
-	options.mathjax = options.mathjax || 'http://cdn.mathjax.org/mathjax/latest/MathJax.js';
-	options.config = options.config || 'TeX-AMS_HTML-full';
-
-	loadScript( options.mathjax + '?config=' + options.config, function() {
-
-		MathJax.Hub.Config({
-			messageStyle: 'none',
-			tex2jax: { inlineMath: [['$','$'],['\\(','\\)']] },
-			skipStartupTypeset: true
-		});
-
-		// Typeset followed by an immediate reveal.js layout since
-		// the typesetting process could affect slide height
-		MathJax.Hub.Queue( [ 'Typeset', MathJax.Hub ] );
-		MathJax.Hub.Queue( Reveal.layout );
-
-		// Reprocess equations in slides when they turn visible
-		Reveal.addEventListener( 'slidechanged', function( event ) {
-
-			MathJax.Hub.Queue( [ 'Typeset', MathJax.Hub, event.currentSlide ] );
-
-		} );
-
-	} );
-
-	function loadScript( url, callback ) {
-
-		var head = document.querySelector( 'head' );
-		var script = document.createElement( 'script' );
-		script.type = 'text/javascript';
-		script.src = url;
-
-		// Wrapper for callback to make sure it only fires once
-		var finish = function() {
-			if( typeof callback === 'function' ) {
-				callback.call();
-				callback = null;
-			}
-		}
-
-		script.onload = finish;
-
-		// IE
-		script.onreadystatechange = function() {
-			if ( this.readyState === 'loaded' ) {
-				finish();
-			}
-		}
-
-		// Normal browsers
-		head.appendChild( script );
-
-	}
-
-})();
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/client.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/client.js
deleted file mode 100644
index e6179f6..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/client.js
+++ /dev/null
@@ -1,13 +0,0 @@
-(function() {
-	var multiplex = Reveal.getConfig().multiplex;
-	var socketId = multiplex.id;
-	var socket = io.connect(multiplex.url);
-
-	socket.on(multiplex.id, function(data) {
-		// ignore data from sockets that aren't ours
-		if (data.socketId !== socketId) { return; }
-		if( window.location.host === 'localhost:1947' ) return;
-
-		Reveal.slide(data.indexh, data.indexv, data.indexf, 'remote');
-	});
-}());
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/index.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/index.js
deleted file mode 100644
index 6f5d8b1..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/index.js
+++ /dev/null
@@ -1,56 +0,0 @@
-var express		= require('express');
-var fs			= require('fs');
-var io			= require('socket.io');
-var crypto		= require('crypto');
-
-var app			= express.createServer();
-var staticDir	= express.static;
-
-io				= io.listen(app);
-
-var opts = {
-	port: 1948,
-	baseDir : __dirname + '/../../'
-};
-
-io.sockets.on('connection', function(socket) {
-	socket.on('slidechanged', function(slideData) {
-		if (typeof slideData.secret == 'undefined' || slideData.secret == null || slideData.secret === '') return;
-		if (createHash(slideData.secret) === slideData.socketId) {
-			slideData.secret = null;
-			socket.broadcast.emit(slideData.socketId, slideData);
-		};
-	});
-});
-
-app.configure(function() {
-	[ 'css', 'js', 'plugin', 'lib' ].forEach(function(dir) {
-		app.use('/' + dir, staticDir(opts.baseDir + dir));
-	});
-});
-
-app.get("/", function(req, res) {
-	res.writeHead(200, {'Content-Type': 'text/html'});
-	fs.createReadStream(opts.baseDir + '/index.html').pipe(res);
-});
-
-app.get("/token", function(req,res) {
-	var ts = new Date().getTime();
-	var rand = Math.floor(Math.random()*9999999);
-	var secret = ts.toString() + rand.toString();
-	res.send({secret: secret, socketId: createHash(secret)});
-});
-
-var createHash = function(secret) {
-	var cipher = crypto.createCipher('blowfish', secret);
-	return(cipher.final('hex'));
-};
-
-// Actually listen
-app.listen(opts.port || null);
-
-var brown = '\033[33m',
-	green = '\033[32m',
-	reset = '\033[0m';
-
-console.log( brown + "reveal.js:" + reset + " Multiplex running on port " + green + opts.port + reset );
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/master.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/master.js
deleted file mode 100644
index b6a7eb7..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/multiplex/master.js
+++ /dev/null
@@ -1,51 +0,0 @@
-(function() {
-	// Don't emit events from inside of notes windows
-	if ( window.location.search.match( /receiver/gi ) ) { return; }
-
-	var multiplex = Reveal.getConfig().multiplex;
-
-	var socket = io.connect(multiplex.url);
-
-	var notify = function( slideElement, indexh, indexv, origin ) {
-		if( typeof origin === 'undefined' && origin !== 'remote' ) {
-			var nextindexh;
-			var nextindexv;
-
-			var fragmentindex = Reveal.getIndices().f;
-			if (typeof fragmentindex == 'undefined') {
-				fragmentindex = 0;
-			}
-
-			if (slideElement.nextElementSibling && slideElement.parentNode.nodeName == 'SECTION') {
-				nextindexh = indexh;
-				nextindexv = indexv + 1;
-			} else {
-				nextindexh = indexh + 1;
-				nextindexv = 0;
-			}
-
-			var slideData = {
-				indexh : indexh,
-				indexv : indexv,
-				indexf : fragmentindex,
-				nextindexh : nextindexh,
-				nextindexv : nextindexv,
-				secret: multiplex.secret,
-				socketId : multiplex.id
-			};
-
-			socket.emit('slidechanged', slideData);
-		}
-	}
-
-	Reveal.addEventListener( 'slidechanged', function( event ) {
-		notify( event.currentSlide, event.indexh, event.indexv, event.origin );
-	} );
-
-	var fragmentNotify = function( event ) {
-		notify( Reveal.getCurrentSlide(), Reveal.getIndices().h, Reveal.getIndices().v, event.origin );
-	};
-
-	Reveal.addEventListener( 'fragmentshown', fragmentNotify );
-	Reveal.addEventListener( 'fragmenthidden', fragmentNotify );
-}());
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/client.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/client.js
deleted file mode 100644
index 156cb9a..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/client.js
+++ /dev/null
@@ -1,57 +0,0 @@
-(function() {
-	// don't emit events from inside the previews themselves
-	if ( window.location.search.match( /receiver/gi ) ) { return; }
-
-	var socket = io.connect(window.location.origin);
-	var socketId = Math.random().toString().slice(2);
-	
-	console.log('View slide notes at ' + window.location.origin + '/notes/' + socketId);
-	window.open(window.location.origin + '/notes/' + socketId, 'notes-' + socketId);
-
-	// Fires when a fragment is shown
-	Reveal.addEventListener( 'fragmentshown', function( event ) {
-		var fragmentData = {
-			fragment : 'next',
-			socketId : socketId
-		};
-		socket.emit('fragmentchanged', fragmentData);
-	} );
-
-	// Fires when a fragment is hidden
-	Reveal.addEventListener( 'fragmenthidden', function( event ) {
-		var fragmentData = {
-			fragment : 'previous',
-			socketId : socketId
-		};
-		socket.emit('fragmentchanged', fragmentData);
-	} );
-
-	// Fires when slide is changed
-	Reveal.addEventListener( 'slidechanged', function( event ) {
-		var nextindexh;
-		var nextindexv;
-		var slideElement = event.currentSlide;
-
-		if (slideElement.nextElementSibling && slideElement.parentNode.nodeName == 'SECTION') {
-			nextindexh = event.indexh;
-			nextindexv = event.indexv + 1;
-		} else {
-			nextindexh = event.indexh + 1;
-			nextindexv = 0;
-		}
-
-		var notes = slideElement.querySelector('aside.notes');
-		var slideData = {
-			notes : notes ? notes.innerHTML : '',
-			indexh : event.indexh,
-			indexv : event.indexv,
-			nextindexh : nextindexh,
-			nextindexv : nextindexv,
-			socketId : socketId,
-			markdown : notes ? typeof notes.getAttribute('data-markdown') === 'string' : false
-
-		};
-
-		socket.emit('slidechanged', slideData);
-	} );
-}());
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/index.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/index.js
deleted file mode 100644
index 5535c90..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/index.js
+++ /dev/null
@@ -1,59 +0,0 @@
-var express   = require('express');
-var fs        = require('fs');
-var io        = require('socket.io');
-var _         = require('underscore');
-var Mustache  = require('mustache');
-
-var app       = express.createServer();
-var staticDir = express.static;
-
-io            = io.listen(app);
-
-var opts = {
-	port :      1947,
-	baseDir :   __dirname + '/../../'
-};
-
-io.sockets.on('connection', function(socket) {
-	socket.on('slidechanged', function(slideData) {
-		socket.broadcast.emit('slidedata', slideData);
-	});
-	socket.on('fragmentchanged', function(fragmentData) {
-		socket.broadcast.emit('fragmentdata', fragmentData);
-	});
-});
-
-app.configure(function() {
-	[ 'css', 'js', 'images', 'plugin', 'lib' ].forEach(function(dir) {
-		app.use('/' + dir, staticDir(opts.baseDir + dir));
-	});
-});
-
-app.get("/", function(req, res) {
-	res.writeHead(200, {'Content-Type': 'text/html'});
-	fs.createReadStream(opts.baseDir + '/index.html').pipe(res);
-});
-
-app.get("/notes/:socketId", function(req, res) {
-
-	fs.readFile(opts.baseDir + 'plugin/notes-server/notes.html', function(err, data) {
-		res.send(Mustache.to_html(data.toString(), {
-			socketId : req.params.socketId
-		}));
-	});
-	// fs.createReadStream(opts.baseDir + 'notes-server/notes.html').pipe(res);
-});
-
-// Actually listen
-app.listen(opts.port || null);
-
-var brown = '\033[33m',
-	green = '\033[32m',
-	reset = '\033[0m';
-
-var slidesLocation = "http://localhost" + ( opts.port ? ( ':' + opts.port ) : '' );
-
-console.log( brown + "reveal.js - Speaker Notes" + reset );
-console.log( "1. Open the slides at " + green + slidesLocation + reset );
-console.log( "2. Click on the link your JS console to go to the notes page" );
-console.log( "3. Advance through your slides and your notes will advance automatically" );
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/notes.html b/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/notes.html
deleted file mode 100644
index 25d1a62..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes-server/notes.html
+++ /dev/null
@@ -1,142 +0,0 @@
-<!doctype html>
-<html lang="en">
-	<head>
-		<meta charset="utf-8">
-
-		<meta name="viewport" content="width=1150">
-
-		<title>reveal.js - Slide Notes</title>
-
-		<style>
-			body {
-				font-family: Helvetica;
-			}
-
-			#notes {
-				font-size: 24px;
-				width: 640px;
-				margin-top: 5px;
-				clear: left;
-			}
-
-			#wrap-current-slide {
-				width: 640px;
-				height: 512px;
-				float: left;
-				overflow: hidden;
-			}
-
-			#current-slide {
-				width: 1280px;
-				height: 1024px;
-				border: none;
-
-				-webkit-transform-origin: 0 0;
-				   -moz-transform-origin: 0 0;
-				    -ms-transform-origin: 0 0;
-				     -o-transform-origin: 0 0;
-				        transform-origin: 0 0;
-
-				-webkit-transform: scale(0.5);
-				   -moz-transform: scale(0.5);
-				    -ms-transform: scale(0.5);
-				     -o-transform: scale(0.5);
-				        transform: scale(0.5);
-			}
-
-			#wrap-next-slide {
-				width: 448px;
-				height: 358px;
-				float: left;
-				margin: 0 0 0 10px;
-				overflow: hidden;
-			}
-
-			#next-slide {
-				width: 1280px;
-				height: 1024px;
-				border: none;
-
-				-webkit-transform-origin: 0 0;
-				   -moz-transform-origin: 0 0;
-				    -ms-transform-origin: 0 0;
-				     -o-transform-origin: 0 0;
-				        transform-origin: 0 0;
-
-				-webkit-transform: scale(0.35);
-				   -moz-transform: scale(0.35);
-				    -ms-transform: scale(0.35);
-				     -o-transform: scale(0.35);
-				        transform: scale(0.35);
-			}
-
-			.slides {
-				position: relative;
-				margin-bottom: 10px;
-				border: 1px solid black;
-				border-radius: 2px;
-				background: rgb(28, 30, 32);
-			}
-
-			.slides span {
-				position: absolute;
-				top: 3px;
-				left: 3px;
-				font-weight: bold;
-				font-size: 14px;
-				color: rgba( 255, 255, 255, 0.9 );
-			}
-		</style>
-	</head>
-
-	<body>
-
-		<div id="wrap-current-slide" class="slides">
-			<iframe src="/?receiver" width="1280" height="1024" id="current-slide"></iframe>
-		</div>
-
-		<div id="wrap-next-slide" class="slides">
-			<iframe src="/?receiver" width="640" height="512" id="next-slide"></iframe>
-			<span>UPCOMING:</span>
-		</div>
-		<div id="notes"></div>
-
-		<script src="/socket.io/socket.io.js"></script>
-		<script src="/plugin/markdown/marked.js"></script>
-
-		<script>
-		var socketId = '{{socketId}}';
-		var socket = io.connect(window.location.origin);
-		var notes = document.getElementById('notes');
-		var currentSlide = document.getElementById('current-slide');
-		var nextSlide = document.getElementById('next-slide');
-
-		socket.on('slidedata', function(data) {
-			// ignore data from sockets that aren't ours
-			if (data.socketId !== socketId) { return; }
-
-			if (data.markdown) {
-				notes.innerHTML = marked(data.notes);
-			}
-			else {
-				notes.innerHTML = data.notes;
-			}
-
-			currentSlide.contentWindow.Reveal.slide(data.indexh, data.indexv);
-			nextSlide.contentWindow.Reveal.slide(data.nextindexh, data.nextindexv);
-		});
-		socket.on('fragmentdata', function(data) {
-			// ignore data from sockets that aren't ours
-			if (data.socketId !== socketId) { return; }
-
-			if (data.fragment === 'next') {
-				currentSlide.contentWindow.Reveal.nextFragment();
-			}
-			else if (data.fragment === 'previous') {
-				currentSlide.contentWindow.Reveal.prevFragment();
-			}
-		});
-		</script>
-
-	</body>
-</html>
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.html b/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.html
deleted file mode 100644
index 847499d..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.html
+++ /dev/null
@@ -1,267 +0,0 @@
-<!doctype html>
-<html lang="en">
-	<head>
-		<meta charset="utf-8">
-
-		<title>reveal.js - Slide Notes</title>
-
-		<style>
-			body {
-				font-family: Helvetica;
-			}
-
-			#notes {
-				font-size: 24px;
-				width: 640px;
-				margin-top: 5px;
-				clear: left;
-			}
-
-			#wrap-current-slide {
-				width: 640px;
-				height: 512px;
-				float: left;
-				overflow: hidden;
-			}
-
-			#current-slide {
-				width: 1280px;
-				height: 1024px;
-				border: none;
-
-				-webkit-transform-origin: 0 0;
-				   -moz-transform-origin: 0 0;
-					-ms-transform-origin: 0 0;
-					 -o-transform-origin: 0 0;
-						transform-origin: 0 0;
-
-				-webkit-transform: scale(0.5);
-				   -moz-transform: scale(0.5);
-					-ms-transform: scale(0.5);
-					 -o-transform: scale(0.5);
-						transform: scale(0.5);
-			}
-
-			#wrap-next-slide {
-				width: 448px;
-				height: 358px;
-				float: left;
-				margin: 0 0 0 10px;
-				overflow: hidden;
-			}
-
-			#next-slide {
-				width: 1280px;
-				height: 1024px;
-				border: none;
-
-				-webkit-transform-origin: 0 0;
-				   -moz-transform-origin: 0 0;
-					-ms-transform-origin: 0 0;
-					 -o-transform-origin: 0 0;
-						transform-origin: 0 0;
-
-				-webkit-transform: scale(0.35);
-				   -moz-transform: scale(0.35);
-					-ms-transform: scale(0.35);
-					 -o-transform: scale(0.35);
-						transform: scale(0.35);
-			}
-
-			.slides {
-				position: relative;
-				margin-bottom: 10px;
-				border: 1px solid black;
-				border-radius: 2px;
-				background: rgb(28, 30, 32);
-			}
-
-			.slides span {
-				position: absolute;
-				top: 3px;
-				left: 3px;
-				font-weight: bold;
-				font-size: 14px;
-				color: rgba( 255, 255, 255, 0.9 );
-			}
-
-			.error {
-				font-weight: bold;
-				color: red;
-				font-size: 1.5em;
-				text-align: center;
-				margin-top: 10%;
-			}
-
-			.error code {
-				font-family: monospace;
-			}
-
-			.time {
-				width: 448px;
-				margin: 30px 0 0 10px;
-				float: left;
-				text-align: center;
-				opacity: 0;
-
-				-webkit-transition: opacity 0.4s;
-				   -moz-transition: opacity 0.4s;
-				     -o-transition: opacity 0.4s;
-				        transition: opacity 0.4s;
-			}
-
-			.elapsed,
-			.clock {
-				color: #333;
-				font-size: 2em;
-				text-align: center;
-				display: inline-block;
-				padding: 0.5em;
-				background-color: #eee;
-				border-radius: 10px;
-			}
-
-			.elapsed h2,
-			.clock h2 {
-				font-size: 0.8em;
-				line-height: 100%;
-				margin: 0;
-				color: #aaa;
-			}
-
-			.elapsed .mute {
-				color: #ddd;
-			}
-
-		</style>
-	</head>
-
-	<body>
-
-		<script>
-			function getNotesURL( controls ) {
-				return window.opener.location.protocol + '//' + window.opener.location.host + window.opener.location.pathname + '?receiver&controls='+ ( controls || 'false' ) +'&progress=false&overview=false' + window.opener.location.hash;
-			}
-			var notesCurrentSlideURL = getNotesURL( true );
-			var notesNextSlideURL = getNotesURL( false );
-		</script>
-
-		<div id="wrap-current-slide" class="slides">
-			<script>document.write( '<iframe width="1280" height="1024" id="current-slide" src="'+ notesCurrentSlideURL +'"></iframe>' );</script>
-		</div>
-
-		<div id="wrap-next-slide" class="slides">
-			<script>document.write( '<iframe width="640" height="512" id="next-slide" src="'+ notesNextSlideURL +'"></iframe>' );</script>
-			<span>UPCOMING:</span>
-		</div>
-
-		<div class="time">
-			<div class="clock">
-				<h2>Time</h2>
-				<span id="clock">0:00:00 AM</span>
-			</div>
-			<div class="elapsed">
-				<h2>Elapsed</h2>
-				<span id="hours">00</span><span id="minutes">:00</span><span id="seconds">:00</span>
-			</div>
-		</div>
-
-		<div id="notes"></div>
-
-		<script src="../../plugin/markdown/marked.js"></script>
-		<script>
-
-			window.addEventListener( 'load', function() {
-
-				if( window.opener && window.opener.location && window.opener.location.href ) {
-
-					var notes = document.getElementById( 'notes' ),
-						currentSlide = document.getElementById( 'current-slide' ),
-						nextSlide = document.getElementById( 'next-slide' ),
-						silenced = false;
-
-					window.addEventListener( 'message', function( event ) {
-						var data = JSON.parse( event.data );
-
-						// No need for updating the notes in case of fragment changes
-						if ( data.notes !== undefined) {
-							if( data.markdown ) {
-								notes.innerHTML = marked( data.notes );
-							}
-							else {
-								notes.innerHTML = data.notes;
-							}
-						}
-
-						silenced = true;
-
-						// Update the note slides
-						currentSlide.contentWindow.Reveal.slide( data.indexh, data.indexv, data.indexf );
-						nextSlide.contentWindow.Reveal.slide( data.nextindexh, data.nextindexv );
-
-						silenced = false;
-
-					}, false );
-
-					var start = new Date(),
-						timeEl = document.querySelector( '.time' ),
-						clockEl = document.getElementById( 'clock' ),
-						hoursEl = document.getElementById( 'hours' ),
-						minutesEl = document.getElementById( 'minutes' ),
-						secondsEl = document.getElementById( 'seconds' );
-
-					setInterval( function() {
-
-						timeEl.style.opacity = 1;
-
-						var diff, hours, minutes, seconds,
-							now = new Date();
-
-						diff = now.getTime() - start.getTime();
-						hours = Math.floor( diff / ( 1000 * 60 * 60 ) );
-						minutes = Math.floor( ( diff / ( 1000 * 60 ) ) % 60 );
-						seconds = Math.floor( ( diff / 1000 ) % 60 );
-
-						clockEl.innerHTML = now.toLocaleTimeString();
-						hoursEl.innerHTML = zeroPadInteger( hours );
-						hoursEl.className = hours > 0 ? "" : "mute";
-						minutesEl.innerHTML = ":" + zeroPadInteger( minutes );
-						minutesEl.className = minutes > 0 ? "" : "mute";
-						secondsEl.innerHTML = ":" + zeroPadInteger( seconds );
-
-					}, 1000 );
-
-					// Broadcasts the state of the notes window to synchronize
-					// the main window
-					function synchronizeMainWindow() {
-
-						if( !silenced ) {
-							var indices = currentSlide.contentWindow.Reveal.getIndices();
-							window.opener.Reveal.slide( indices.h, indices.v, indices.f );
-						}
-
-					}
-
-					// Navigate the main window when the notes slide changes
-					currentSlide.contentWindow.Reveal.addEventListener( 'slidechanged', synchronizeMainWindow );
-					currentSlide.contentWindow.Reveal.addEventListener( 'fragmentshown', synchronizeMainWindow );
-					currentSlide.contentWindow.Reveal.addEventListener( 'fragmenthidden', synchronizeMainWindow );
-
-				}
-				else {
-
-					document.body.innerHTML =  '<p class="error">Unable to access <code>window.opener.location</code>.<br>Make sure the presentation is running on a web server.</p>';
-
-				}
-
-
-			}, false );
-
-			function zeroPadInteger( num ) {
-				var str = "00" + parseInt( num );
-				return str.substring( str.length - 2 );
-			}
-
-		</script>
-	</body>
-</html>
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.js
deleted file mode 100644
index 9a82c3c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/notes/notes.js
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Handles opening of and synchronization with the reveal.js
- * notes window.
- */
-var RevealNotes = (function() {
-
-	function openNotes() {
-		var jsFileLocation = document.querySelector('script[src$="notes.js"]').src;  // this js file path
-		jsFileLocation = jsFileLocation.replace(/notes\.js(\?.*)?$/, '');   // the js folder path
-		var notesPopup = window.open( jsFileLocation + 'notes.html', 'reveal.js - Notes', 'width=1120,height=850' );
-
-		// Fires when slide is changed
-		Reveal.addEventListener( 'slidechanged', post );
-
-		// Fires when a fragment is shown
-		Reveal.addEventListener( 'fragmentshown', post );
-
-		// Fires when a fragment is hidden
-		Reveal.addEventListener( 'fragmenthidden', post );
-
-		/**
-		 * Posts the current slide data to the notes window
-		 */
-		function post() {
-			var slideElement = Reveal.getCurrentSlide(),
-				slideIndices = Reveal.getIndices(),
-				messageData;
-
-			var notes = slideElement.querySelector( 'aside.notes' ),
-				nextindexh,
-				nextindexv;
-
-			if( slideElement.nextElementSibling && slideElement.parentNode.nodeName == 'SECTION' ) {
-				nextindexh = slideIndices.h;
-				nextindexv = slideIndices.v + 1;
-			} else {
-				nextindexh = slideIndices.h + 1;
-				nextindexv = 0;
-			}
-
-			messageData = {
-				notes : notes ? notes.innerHTML : '',
-				indexh : slideIndices.h,
-				indexv : slideIndices.v,
-				indexf : slideIndices.f,
-				nextindexh : nextindexh,
-				nextindexv : nextindexv,
-				markdown : notes ? typeof notes.getAttribute( 'data-markdown' ) === 'string' : false
-			};
-
-			notesPopup.postMessage( JSON.stringify( messageData ), '*' );
-		}
-
-		// Navigate to the current slide when the notes are loaded
-		notesPopup.addEventListener( 'load', function( event ) {
-			post();
-		}, false );
-	}
-
-	// If the there's a 'notes' query set, open directly
-	if( window.location.search.match( /(\?|\&)notes/gi ) !== null ) {
-		openNotes();
-	}
-
-	// Open the notes when the 's' key is hit
-	document.addEventListener( 'keydown', function( event ) {
-		// Disregard the event if the target is editable or a
-		// modifier is present
-		if ( document.querySelector( ':focus' ) !== null || event.shiftKey || event.altKey || event.ctrlKey || event.metaKey ) return;
-
-		if( event.keyCode === 83 ) {
-			event.preventDefault();
-			openNotes();
-		}
-	}, false );
-
-	return { open: openNotes };
-})();
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/example.html b/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/example.html
deleted file mode 100644
index cc57a7b..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/example.html
+++ /dev/null
@@ -1,39 +0,0 @@
-<html>
-	<body>
-
-		<iframe id="reveal" src="../../index.html" style="border: 0;" width="500" height="500"></iframe>
-
-		<div>
-			<input id="back" type="button" value="go back"/>
-			<input id="ahead" type="button" value="go ahead"/>
-			<input id="slideto" type="button" value="slideto 2-2"/>
-		</div>
-
-		<script>
-
-			(function (){
-
-				var back = document.getElementById( 'back' ),
-						ahead = document.getElementById( 'ahead' ),
-						slideto = document.getElementById( 'slideto' ),
-						reveal =  window.frames[0];
-
-					back.addEventListener( 'click', function () {
-						
-					reveal.postMessage( JSON.stringify({method: 'prev', args: []}), '*' );
-				}, false );
-
-				ahead.addEventListener( 'click', function (){
-					reveal.postMessage( JSON.stringify({method: 'next', args: []}), '*' );
-				}, false );
-
-				slideto.addEventListener( 'click', function (){
-					reveal.postMessage( JSON.stringify({method: 'slide', args: [2,2]}), '*' );
-				}, false );
-
-			}());
-
-		</script>
-
-	</body>
-</html>
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/postmessage.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/postmessage.js
deleted file mode 100644
index d0f4140..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/postmessage/postmessage.js
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-
-	simple postmessage plugin
-
-	Useful when a reveal slideshow is inside an iframe.
-	It allows to call reveal methods from outside.
-
-	Example:
-		 var reveal =  window.frames[0];
-
-		 // Reveal.prev(); 
-		 reveal.postMessage(JSON.stringify({method: 'prev', args: []}), '*');
-		 // Reveal.next(); 
-		 reveal.postMessage(JSON.stringify({method: 'next', args: []}), '*');
-		 // Reveal.slide(2, 2); 
-		 reveal.postMessage(JSON.stringify({method: 'slide', args: [2,2]}), '*');
-
-	Add to the slideshow:
-
-		dependencies: [
-			...
-			{ src: 'plugin/postmessage/postmessage.js', async: true, condition: function() { return !!document.body.classList; } }
-		]
-
-*/
-
-(function (){
-
-	window.addEventListener( "message", function ( event ) {
-		var data = JSON.parse( event.data ),
-				method = data.method,
-				args = data.args;
-
-		if( typeof Reveal[method] === 'function' ) {
-			Reveal[method].apply( Reveal, data.args );
-		}
-	}, false);
-
-}());
-
-
-
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/print-pdf/print-pdf.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/print-pdf/print-pdf.js
deleted file mode 100644
index 6b6cad6..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/print-pdf/print-pdf.js
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * phantomjs script for printing presentations to PDF.
- *
- * Example:
- * phantomjs print-pdf.js "http://lab.hakim.se/reveal-js?print-pdf" reveal-demo.pdf
- *
- * By Manuel Bieh (https://github.com/manuelbieh)
- */
-
-// html2pdf.js
-var page = new WebPage();
-var system = require( 'system' );
-
-page.viewportSize  = {
-	width: 1024,
-	height: 768
-};
-
-page.paperSize = {
-	format: 'letter',
-	orientation: 'landscape',
-	margin: {
-		left: '0',
-		right: '0',
-		top: '0',
-		bottom: '0'
-	}
-};
-
-var revealFile = system.args[1] || 'index.html?print-pdf';
-var slideFile = system.args[2] || 'slides.pdf';
-
-if( slideFile.match( /\.pdf$/gi ) === null ) {
-	slideFile += '.pdf';
-}
-
-console.log( 'Printing PDF...' );
-
-page.open( revealFile, function( status ) {
-	console.log( 'Printed succesfully' );
-	page.render( slideFile );
-	phantom.exit();
-} );
-
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/remotes/remotes.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/remotes/remotes.js
deleted file mode 100644
index ba0dbad..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/remotes/remotes.js
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Touch-based remote controller for your presentation courtesy 
- * of the folks at http://remotes.io
- */
-
-(function(window){
-
-    /**
-     * Detects if we are dealing with a touch enabled device (with some false positives)
-     * Borrowed from modernizr: https://github.com/Modernizr/Modernizr/blob/master/feature-detects/touch.js   
-     */
-    var hasTouch  = (function(){
-        return ('ontouchstart' in window) || window.DocumentTouch && document instanceof DocumentTouch;
-    })();
-
-    /**
-     * Detects if notes are enable and the current page is opened inside an /iframe
-     * this prevents loading Remotes.io several times
-     */
-    var isNotesAndIframe = (function(){
-        return window.RevealNotes && !(self == top);
-    })();
-
-    if(!hasTouch && !isNotesAndIframe){
-        head.ready( 'remotes.ne.min.js', function() {
-            new Remotes("preview")
-                .on("swipe-left", function(e){ Reveal.right(); })
-                .on("swipe-right", function(e){ Reveal.left(); })
-                .on("swipe-up", function(e){ Reveal.down(); })
-                .on("swipe-down", function(e){ Reveal.up(); })
-                .on("tap", function(e){ Reveal.next(); })
-                .on("zoom-out", function(e){ Reveal.toggleOverview(true); })
-                .on("zoom-in", function(e){ Reveal.toggleOverview(false); })
-            ;
-        } );
-
-        head.js('https://hakim-static.s3.amazonaws.com/reveal-js/remotes.ne.min.js');
-    }
-})(window);
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/search/search.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/search/search.js
deleted file mode 100644
index ae6582e..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/search/search.js
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Handles finding a text string anywhere in the slides and showing the next occurrence to the user
- * by navigatating to that slide and highlighting it.
- *
- * By Jon Snyder <snyder.jon at gmail.com>, February 2013
- */
-
-var RevealSearch = (function() {
-
-	var matchedSlides;
-	var currentMatchedIndex;
-	var searchboxDirty;
-	var myHilitor;
-
-// Original JavaScript code by Chirp Internet: www.chirp.com.au
-// Please acknowledge use of this code by including this header.
-// 2/2013 jon: modified regex to display any match, not restricted to word boundaries.
-
-function Hilitor(id, tag)
-{
-
-  var targetNode = document.getElementById(id) || document.body;
-  var hiliteTag = tag || "EM";
-  var skipTags = new RegExp("^(?:" + hiliteTag + "|SCRIPT|FORM|SPAN)$");
-  var colors = ["#ff6", "#a0ffff", "#9f9", "#f99", "#f6f"];
-  var wordColor = [];
-  var colorIdx = 0;
-  var matchRegex = "";
-  var matchingSlides = [];
-
-  this.setRegex = function(input)
-  {
-    input = input.replace(/^[^\w]+|[^\w]+$/g, "").replace(/[^\w'-]+/g, "|");
-    matchRegex = new RegExp("(" + input + ")","i");
-  }
-
-  this.getRegex = function()
-  {
-    return matchRegex.toString().replace(/^\/\\b\(|\)\\b\/i$/g, "").replace(/\|/g, " ");
-  }
-
-  // recursively apply word highlighting
-  this.hiliteWords = function(node)
-  {
-    if(node == undefined || !node) return;
-    if(!matchRegex) return;
-    if(skipTags.test(node.nodeName)) return;
-
-    if(node.hasChildNodes()) {
-      for(var i=0; i < node.childNodes.length; i++)
-        this.hiliteWords(node.childNodes[i]);
-    }
-    if(node.nodeType == 3) { // NODE_TEXT
-      if((nv = node.nodeValue) && (regs = matchRegex.exec(nv))) {
-      	//find the slide's section element and save it in our list of matching slides
-      	var secnode = node.parentNode;
-      	while (secnode.nodeName != 'SECTION') {
-      		secnode = secnode.parentNode;
-      	}
-      	
-      	var slideIndex = Reveal.getIndices(secnode);
-      	var slidelen = matchingSlides.length;
-      	var alreadyAdded = false;
-      	for (var i=0; i < slidelen; i++) {
-      		if ( (matchingSlides[i].h === slideIndex.h) && (matchingSlides[i].v === slideIndex.v) ) {
-      			alreadyAdded = true;
-      		}
-      	}
-      	if (! alreadyAdded) {
-      		matchingSlides.push(slideIndex);
-      	}
-      	
-        if(!wordColor[regs[0].toLowerCase()]) {
-          wordColor[regs[0].toLowerCase()] = colors[colorIdx++ % colors.length];
-        }
-
-        var match = document.createElement(hiliteTag);
-        match.appendChild(document.createTextNode(regs[0]));
-        match.style.backgroundColor = wordColor[regs[0].toLowerCase()];
-        match.style.fontStyle = "inherit";
-        match.style.color = "#000";
-
-        var after = node.splitText(regs.index);
-        after.nodeValue = after.nodeValue.substring(regs[0].length);
-        node.parentNode.insertBefore(match, after);
-      }
-    }
-  };
-
-  // remove highlighting
-  this.remove = function()
-  {
-    var arr = document.getElementsByTagName(hiliteTag);
-    while(arr.length && (el = arr[0])) {
-      el.parentNode.replaceChild(el.firstChild, el);
-    }
-  };
-
-  // start highlighting at target node
-  this.apply = function(input)
-  {
-    if(input == undefined || !input) return;
-    this.remove();
-    this.setRegex(input);
-    this.hiliteWords(targetNode);
-    return matchingSlides;
-  };
-
-}
-
-	function openSearch() {
-		//ensure the search term input dialog is visible and has focus:
-		var inputbox = document.getElementById("searchinput");
-		inputbox.style.display = "inline";
-		inputbox.focus();
-		inputbox.select();
-	}
-
-	function toggleSearch() {
-		var inputbox = document.getElementById("searchinput");
-		if (inputbox.style.display !== "inline") {
-			openSearch();
-		}
-		else {
-			inputbox.style.display = "none";
-			myHilitor.remove();
-		}
-	}
-
-	function doSearch() {
-		//if there's been a change in the search term, perform a new search:
-		if (searchboxDirty) {
-			var searchstring = document.getElementById("searchinput").value;
-
-			//find the keyword amongst the slides
-			myHilitor = new Hilitor("slidecontent");
-			matchedSlides = myHilitor.apply(searchstring);
-			currentMatchedIndex = 0;
-		}
-
-		//navigate to the next slide that has the keyword, wrapping to the first if necessary
-		if (matchedSlides.length && (matchedSlides.length <= currentMatchedIndex)) {
-			currentMatchedIndex = 0;
-		}
-		if (matchedSlides.length > currentMatchedIndex) {
-			Reveal.slide(matchedSlides[currentMatchedIndex].h, matchedSlides[currentMatchedIndex].v);
-			currentMatchedIndex++;
-		}
-	}
-
-	var dom = {};
-	dom.wrapper = document.querySelector( '.reveal' );
-
-	if( !dom.wrapper.querySelector( '.searchbox' ) ) {
-			var searchElement = document.createElement( 'div' );
-			searchElement.id = "searchinputdiv";
-			searchElement.classList.add( 'searchdiv' );
-      searchElement.style.position = 'absolute';
-      searchElement.style.top = '10px';
-      searchElement.style.left = '10px';
-      //embedded base64 search icon Designed by Sketchdock - http://www.sketchdock.com/:
-			searchElement.innerHTML = '<span><input type="search" id="searchinput" class="searchinput" style="vertical-align: top;"/><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJiSURBVHjatFZNaxNBGH5md+Mmu92NVdKDRipSAyqCghgQD4L4cRe86UUtAQ+eFCxoa4/25EXBFi8eBE+eRPoDhB6KgiiixdAPCEkx2pjvTXadd9yNsflwuyUDD/O+u8PzzDPvzOwyx3EwyCZhwG3gAkp7MnpjgbopjsltcD4gjuXZZKeAR348MYLYTm3LzOs/y3j3JTfZxgXWXmTuwPHIc4Vm [...]
-			dom.wrapper.appendChild( searchElement );
-	}
-
-	document.getElementById("searchbutton").addEventListener( 'click', function(event) {
-		doSearch();
-	}, false );
-
-	document.getElementById("searchinput").addEventListener( 'keyup', function( event ) {
-		switch (event.keyCode) {
-			case 13:
-				event.preventDefault();
-				doSearch();
-				searchboxDirty = false;
-				break;
-			default:
-				searchboxDirty = true;
-		}
-	}, false );
-
-	// Open the search when the 's' key is hit (yes, this conflicts with the notes plugin, disabling for now)
-	/*
-	document.addEventListener( 'keydown', function( event ) {
-		// Disregard the event if the target is editable or a
-		// modifier is present
-		if ( document.querySelector( ':focus' ) !== null || event.shiftKey || event.altKey || event.ctrlKey || event.metaKey ) return;
-
-		if( event.keyCode === 83 ) {
-			event.preventDefault();
-			openSearch();
-		}
-	}, false );
-*/
-	return { open: openSearch };
-})();
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/tagcloud/tagcloud.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/tagcloud/tagcloud.js
deleted file mode 100644
index 833683c..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/tagcloud/tagcloud.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// tagcloud
-(function(){
-[].forEach.call( document.querySelectorAll('[tagcloud]'), function(cloud){
-    cloud.innerHTML = '<span>' + cloud.innerHTML.split(/\n/).join('</span> <span>') + '</span>';
-    [].forEach.call( cloud.querySelectorAll('span'), function(elem){
-        var prctnge = Math.random() * 150 + 50;
-        if (cloud.hasAttribute('large')) {
-            prctnge = prctnge * 1.2;
-        }
-        elem.style.fontSize = prctnge + '%';
-        if (cloud.hasAttribute('bw')) {
-            var col = Math.round(Math.random() * 155 + 100);
-            elem.style.color = 'rgb('+ col  +',' + col + ',' + col + ')'
-        } else {
-            elem.style.color = 'hsl('+ Math.random()*360 +', 40%, 50%)'
-        }
-        elem.classList.add('clouditem')
-    });
-});
-}
-)();
\ No newline at end of file
diff --git a/uflacs-merge-into-ffc/doc/roadmap/plugin/zoom-js/zoom.js b/uflacs-merge-into-ffc/doc/roadmap/plugin/zoom-js/zoom.js
deleted file mode 100644
index cd5b06f..0000000
--- a/uflacs-merge-into-ffc/doc/roadmap/plugin/zoom-js/zoom.js
+++ /dev/null
@@ -1,258 +0,0 @@
-// Custom reveal.js integration
-(function(){
-	var isEnabled = true;
-
-	document.querySelector( '.reveal' ).addEventListener( 'mousedown', function( event ) {
-		var modifier = ( Reveal.getConfig().zoomKey ? Reveal.getConfig().zoomKey : 'alt' ) + 'Key';
-
-		if( event[ modifier ] && isEnabled ) {
-			event.preventDefault();
-			zoom.to({ element: event.target, pan: false });
-		}
-	} );
-
-	Reveal.addEventListener( 'overviewshown', function() { isEnabled = false; } );
-	Reveal.addEventListener( 'overviewhidden', function() { isEnabled = true; } );
-})();
-
-/*!
- * zoom.js 0.2 (modified version for use with reveal.js)
- * http://lab.hakim.se/zoom-js
- * MIT licensed
- *
- * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
- */
-var zoom = (function(){
-
-	// The current zoom level (scale)
-	var level = 1;
-
-	// The current mouse position, used for panning
-	var mouseX = 0,
-		mouseY = 0;
-
-	// Timeout before pan is activated
-	var panEngageTimeout = -1,
-		panUpdateInterval = -1;
-
-	var currentOptions = null;
-
-	// Check for transform support so that we can fallback otherwise
-	var supportsTransforms = 	'WebkitTransform' in document.body.style ||
-								'MozTransform' in document.body.style ||
-								'msTransform' in document.body.style ||
-								'OTransform' in document.body.style ||
-								'transform' in document.body.style;
-
-	if( supportsTransforms ) {
-		// The easing that will be applied when we zoom in/out
-		document.body.style.transition = 'transform 0.8s ease';
-		document.body.style.OTransition = '-o-transform 0.8s ease';
-		document.body.style.msTransition = '-ms-transform 0.8s ease';
-		document.body.style.MozTransition = '-moz-transform 0.8s ease';
-		document.body.style.WebkitTransition = '-webkit-transform 0.8s ease';
-	}
-
-	// Zoom out if the user hits escape
-	document.addEventListener( 'keyup', function( event ) {
-		if( level !== 1 && event.keyCode === 27 ) {
-			zoom.out();
-		}
-	}, false );
-
-	// Monitor mouse movement for panning
-	document.addEventListener( 'mousemove', function( event ) {
-		if( level !== 1 ) {
-			mouseX = event.clientX;
-			mouseY = event.clientY;
-		}
-	}, false );
-
-	/**
-	 * Applies the CSS required to zoom in, prioritizes use of CSS3
-	 * transforms but falls back on zoom for IE.
-	 *
-	 * @param {Number} pageOffsetX
-	 * @param {Number} pageOffsetY
-	 * @param {Number} elementOffsetX
-	 * @param {Number} elementOffsetY
-	 * @param {Number} scale
-	 */
-	function magnify( pageOffsetX, pageOffsetY, elementOffsetX, elementOffsetY, scale ) {
-
-		if( supportsTransforms ) {
-			var origin = pageOffsetX +'px '+ pageOffsetY +'px',
-				transform = 'translate('+ -elementOffsetX +'px,'+ -elementOffsetY +'px) scale('+ scale +')';
-
-			document.body.style.transformOrigin = origin;
-			document.body.style.OTransformOrigin = origin;
-			document.body.style.msTransformOrigin = origin;
-			document.body.style.MozTransformOrigin = origin;
-			document.body.style.WebkitTransformOrigin = origin;
-
-			document.body.style.transform = transform;
-			document.body.style.OTransform = transform;
-			document.body.style.msTransform = transform;
-			document.body.style.MozTransform = transform;
-			document.body.style.WebkitTransform = transform;
-		}
-		else {
-			// Reset all values
-			if( scale === 1 ) {
-				document.body.style.position = '';
-				document.body.style.left = '';
-				document.body.style.top = '';
-				document.body.style.width = '';
-				document.body.style.height = '';
-				document.body.style.zoom = '';
-			}
-			// Apply scale
-			else {
-				document.body.style.position = 'relative';
-				document.body.style.left = ( - ( pageOffsetX + elementOffsetX ) / scale ) + 'px';
-				document.body.style.top = ( - ( pageOffsetY + elementOffsetY ) / scale ) + 'px';
-				document.body.style.width = ( scale * 100 ) + '%';
-				document.body.style.height = ( scale * 100 ) + '%';
-				document.body.style.zoom = scale;
-			}
-		}
-
-		level = scale;
-
-		if( level !== 1 && document.documentElement.classList ) {
-			document.documentElement.classList.add( 'zoomed' );
-		}
-		else {
-			document.documentElement.classList.remove( 'zoomed' );
-		}
-	}
-
-	/**
-	 * Pan the document when the mosue cursor approaches the edges
-	 * of the window.
-	 */
-	function pan() {
-		var range = 0.12,
-			rangeX = window.innerWidth * range,
-			rangeY = window.innerHeight * range,
-			scrollOffset = getScrollOffset();
-
-		// Up
-		if( mouseY < rangeY ) {
-			window.scroll( scrollOffset.x, scrollOffset.y - ( 1 - ( mouseY / rangeY ) ) * ( 14 / level ) );
-		}
-		// Down
-		else if( mouseY > window.innerHeight - rangeY ) {
-			window.scroll( scrollOffset.x, scrollOffset.y + ( 1 - ( window.innerHeight - mouseY ) / rangeY ) * ( 14 / level ) );
-		}
-
-		// Left
-		if( mouseX < rangeX ) {
-			window.scroll( scrollOffset.x - ( 1 - ( mouseX / rangeX ) ) * ( 14 / level ), scrollOffset.y );
-		}
-		// Right
-		else if( mouseX > window.innerWidth - rangeX ) {
-			window.scroll( scrollOffset.x + ( 1 - ( window.innerWidth - mouseX ) / rangeX ) * ( 14 / level ), scrollOffset.y );
-		}
-	}
-
-	function getScrollOffset() {
-		return {
-			x: window.scrollX !== undefined ? window.scrollX : window.pageXOffset,
-			y: window.scrollY !== undefined ? window.scrollY : window.pageXYffset
-		}
-	}
-
-	return {
-		/**
-		 * Zooms in on either a rectangle or HTML element.
-		 *
-		 * @param {Object} options
-		 *   - element: HTML element to zoom in on
-		 *   OR
-		 *   - x/y: coordinates in non-transformed space to zoom in on
-		 *   - width/height: the portion of the screen to zoom in on
-		 *   - scale: can be used instead of width/height to explicitly set scale
-		 */
-		to: function( options ) {
-			// Due to an implementation limitation we can't zoom in
-			// to another element without zooming out first
-			if( level !== 1 ) {
-				zoom.out();
-			}
-			else {
-				options.x = options.x || 0;
-				options.y = options.y || 0;
-
-				// If an element is set, that takes precedence
-				if( !!options.element ) {
-					// Space around the zoomed in element to leave on screen
-					var padding = 20;
-
-					options.width = options.element.getBoundingClientRect().width + ( padding * 2 );
-					options.height = options.element.getBoundingClientRect().height + ( padding * 2 );
-					options.x = options.element.getBoundingClientRect().left - padding;
-					options.y = options.element.getBoundingClientRect().top - padding;
-				}
-
-				// If width/height values are set, calculate scale from those values
-				if( options.width !== undefined && options.height !== undefined ) {
-					options.scale = Math.max( Math.min( window.innerWidth / options.width, window.innerHeight / options.height ), 1 );
-				}
-
-				if( options.scale > 1 ) {
-					options.x *= options.scale;
-					options.y *= options.scale;
-
-					var scrollOffset = getScrollOffset();
-
-					if( options.element ) {
-						scrollOffset.x -= ( window.innerWidth - ( options.width * options.scale ) ) / 2;
-					}
-
-					magnify( scrollOffset.x, scrollOffset.y, options.x, options.y, options.scale );
-
-					if( options.pan !== false ) {
-
-						// Wait with engaging panning as it may conflict with the
-						// zoom transition
-						panEngageTimeout = setTimeout( function() {
-							panUpdateInterval = setInterval( pan, 1000 / 60 );
-						}, 800 );
-
-					}
-				}
-
-				currentOptions = options;
-			}
-		},
-
-		/**
-		 * Resets the document zoom state to its default.
-		 */
-		out: function() {
-			clearTimeout( panEngageTimeout );
-			clearInterval( panUpdateInterval );
-
-			var scrollOffset = getScrollOffset();
-
-			if( currentOptions && currentOptions.element ) {
-				scrollOffset.x -= ( window.innerWidth - ( currentOptions.width * currentOptions.scale ) ) / 2;
-			}
-
-			magnify( scrollOffset.x, scrollOffset.y, 0, 0, 1 );
-
-			level = 1;
-		},
-
-		// Alias
-		magnify: function( options ) { this.to( options ) },
-		reset: function() { this.out() },
-
-		zoomLevel: function() {
-			return level;
-		}
-	}
-
-})();
-
diff --git a/uflacs-merge-into-ffc/doc/sphinx/Makefile b/uflacs-merge-into-ffc/doc/sphinx/Makefile
deleted file mode 100644
index 8518512..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/Makefile
+++ /dev/null
@@ -1,177 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = build
-
-# User-friendly check for sphinx-build
-ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
-$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
-endif
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  xml        to make Docutils-native XML files"
-	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/UFLAnalyserandCompilerSystemUFLACS.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/UFLAnalyserandCompilerSystemUFLACS.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/UFLAnalyserandCompilerSystemUFLACS"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/UFLAnalyserandCompilerSystemUFLACS"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-latexpdfja:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through platex and dvipdfmx..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
-
-xml:
-	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
-	@echo
-	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
-
-pseudoxml:
-	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
-	@echo
-	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/uflacs-merge-into-ffc/doc/sphinx/README b/uflacs-merge-into-ffc/doc/sphinx/README
deleted file mode 100644
index 5e9d338..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/README
+++ /dev/null
@@ -1,27 +0,0 @@
-====================
-Sphinx documentation
-====================
-
-UFLACS is documented using Sphinx and reStructured text. The
-documnentation is hosted at http://fenics-uflacs.readthedocs.org/. The
-online documentation is automatically updated upon pushes to the
-UFLACS master branch.
-
-
-Updating the API documentation
-==============================
-
-If the UFLACS API is changed, the script::
-
-    ./generate-apidoc
-
-must be run to update the autodoc file. The script can be run from any
-directory.
-
-
-Building the documentation locally
-==================================
-
-The HTML documentation can be built locally using::
-
-    make html
diff --git a/uflacs-merge-into-ffc/doc/sphinx/generate-apidoc b/uflacs-merge-into-ffc/doc/sphinx/generate-apidoc
deleted file mode 100755
index b4c319d..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/generate-apidoc
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (C) 2015 Garth N. Wells
-#
-# This file is part of UFLACS.
-#
-# UFL is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFL is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFL. If not, see <http://www.gnu.org/licenses/>.
-
-# This script calls sphinx-apidoc to generate files ready for autodoc
-
-echo ""
-echo "--- Generating UFLACS autodoc RST files"
-echo ""
-
-# Get location of Sphinx files
-SPHINX_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-SPHINX_SOURCE_DIR=$SPHINX_DIR/source
-
-# Generate .rst files ready for autodoc
-sphinx-apidoc -f -d 1 -o $SPHINX_SOURCE_DIR/api-doc $SPHINX_DIR/../../uflacs
diff --git a/uflacs-merge-into-ffc/doc/sphinx/requirements.txt b/uflacs-merge-into-ffc/doc/sphinx/requirements.txt
deleted file mode 100644
index 344e072..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-numpy
--e git+https://bitbucket.org/fenics-project/instant.git#egg=instant
--e git+https://bitbucket.org/fenics-project/fiat.git#egg=fiat
--e git+https://bitbucket.org/fenics-project/ufl.git#egg=ufl
--e git+https://bitbucket.org/fenics-project/ffc.git#egg=ffc
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/modules.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/modules.rst
deleted file mode 100644
index 6bdfb3f..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/modules.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-uflacs
-======
-
-.. toctree::
-   :maxdepth: 1
-
-   uflacs
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.analysis.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.analysis.rst
deleted file mode 100644
index 3ff2854..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.analysis.rst
+++ /dev/null
@@ -1,102 +0,0 @@
-uflacs.analysis package
-=======================
-
-Submodules
-----------
-
-uflacs.analysis.expr_shapes module
-----------------------------------
-
-.. automodule:: uflacs.analysis.expr_shapes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.factorization module
-------------------------------------
-
-.. automodule:: uflacs.analysis.factorization
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph module
-----------------------------
-
-.. automodule:: uflacs.analysis.graph
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_dependencies module
------------------------------------------
-
-.. automodule:: uflacs.analysis.graph_dependencies
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_rebuild module
-------------------------------------
-
-.. automodule:: uflacs.analysis.graph_rebuild
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_ssa module
---------------------------------
-
-.. automodule:: uflacs.analysis.graph_ssa
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_symbols module
-------------------------------------
-
-.. automodule:: uflacs.analysis.graph_symbols
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.graph_vertices module
--------------------------------------
-
-.. automodule:: uflacs.analysis.graph_vertices
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.indexing module
--------------------------------
-
-.. automodule:: uflacs.analysis.indexing
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.modified_terminals module
------------------------------------------
-
-.. automodule:: uflacs.analysis.modified_terminals
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.analysis.valuenumbering module
--------------------------------------
-
-.. automodule:: uflacs.analysis.valuenumbering
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.analysis
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst
deleted file mode 100644
index e92cefb..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ffc.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-uflacs.backends.ffc package
-===========================
-
-Submodules
-----------
-
-uflacs.backends.ffc.access module
----------------------------------
-
-.. automodule:: uflacs.backends.ffc.access
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.common module
----------------------------------
-
-.. automodule:: uflacs.backends.ffc.common
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.definitions module
---------------------------------------
-
-.. automodule:: uflacs.backends.ffc.definitions
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.ffc_compiler module
----------------------------------------
-
-.. automodule:: uflacs.backends.ffc.ffc_compiler
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.generation module
--------------------------------------
-
-.. automodule:: uflacs.backends.ffc.generation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ffc.representation module
------------------------------------------
-
-.. automodule:: uflacs.backends.ffc.representation
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends.ffc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.rst
deleted file mode 100644
index 8553451..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-uflacs.backends package
-=======================
-
-Subpackages
------------
-
-.. toctree::
-
-    uflacs.backends.ffc
-    uflacs.backends.ufc
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst
deleted file mode 100644
index 865e061..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.backends.ufc.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-uflacs.backends.ufc package
-===========================
-
-Submodules
-----------
-
-uflacs.backends.ufc.coordinate_mapping module
----------------------------------------------
-
-.. automodule:: uflacs.backends.ufc.coordinate_mapping
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.dofmap module
----------------------------------
-
-.. automodule:: uflacs.backends.ufc.dofmap
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.evaluatebasis module
-----------------------------------------
-
-.. automodule:: uflacs.backends.ufc.evaluatebasis
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.finite_element module
------------------------------------------
-
-.. automodule:: uflacs.backends.ufc.finite_element
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.form module
--------------------------------
-
-.. automodule:: uflacs.backends.ufc.form
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.generator module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.generator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.generators module
--------------------------------------
-
-.. automodule:: uflacs.backends.ufc.generators
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.integrals module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.integrals
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.templates module
-------------------------------------
-
-.. automodule:: uflacs.backends.ufc.templates
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.backends.ufc.utils module
---------------------------------
-
-.. automodule:: uflacs.backends.ufc.utils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.backends.ufc
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.datastructures.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.datastructures.rst
deleted file mode 100644
index 55554ed..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.datastructures.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-uflacs.datastructures package
-=============================
-
-Submodules
-----------
-
-uflacs.datastructures.arrays module
------------------------------------
-
-.. automodule:: uflacs.datastructures.arrays
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.datastructures.crs module
---------------------------------
-
-.. automodule:: uflacs.datastructures.crs
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.datastructures.types module
-----------------------------------
-
-.. automodule:: uflacs.datastructures.types
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.datastructures
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.elementtables.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.elementtables.rst
deleted file mode 100644
index 71f3b11..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.elementtables.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-uflacs.elementtables package
-============================
-
-Submodules
-----------
-
-uflacs.elementtables.table_utils module
----------------------------------------
-
-.. automodule:: uflacs.elementtables.table_utils
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.elementtables.terminaltables module
-------------------------------------------
-
-.. automodule:: uflacs.elementtables.terminaltables
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.elementtables
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.generation.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.generation.rst
deleted file mode 100644
index 4582d0c..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.generation.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-uflacs.generation package
-=========================
-
-Submodules
-----------
-
-uflacs.generation.integralgenerator module
-------------------------------------------
-
-.. automodule:: uflacs.generation.integralgenerator
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.generation
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.language.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.language.rst
deleted file mode 100644
index 39dff6a..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.language.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-uflacs.language package
-=======================
-
-Submodules
-----------
-
-uflacs.language.cnodes module
------------------------------
-
-.. automodule:: uflacs.language.cnodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.format_lines module
------------------------------------
-
-.. automodule:: uflacs.language.format_lines
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.format_value module
------------------------------------
-
-.. automodule:: uflacs.language.format_value
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.precedence module
----------------------------------
-
-.. automodule:: uflacs.language.precedence
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.typenodes module
---------------------------------
-
-.. automodule:: uflacs.language.typenodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-uflacs.language.ufl_to_cnodes module
-------------------------------------
-
-.. automodule:: uflacs.language.ufl_to_cnodes
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.language
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.representation.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.representation.rst
deleted file mode 100644
index 2708bb6..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.representation.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-uflacs.representation package
-=============================
-
-Submodules
-----------
-
-uflacs.representation.compute_expr_ir module
---------------------------------------------
-
-.. automodule:: uflacs.representation.compute_expr_ir
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs.representation
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.rst b/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.rst
deleted file mode 100644
index 4da0a02..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/api-doc/uflacs.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-uflacs package
-==============
-
-Subpackages
------------
-
-.. toctree::
-
-    uflacs.analysis
-    uflacs.backends
-    uflacs.datastructures
-    uflacs.elementtables
-    uflacs.generation
-    uflacs.language
-    uflacs.representation
-
-Submodules
-----------
-
-uflacs.params module
---------------------
-
-.. automodule:: uflacs.params
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: uflacs
-    :members:
-    :undoc-members:
-    :show-inheritance:
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/conf.py b/uflacs-merge-into-ffc/doc/sphinx/source/conf.py
deleted file mode 100644
index 8fa1957..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/conf.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# UFL Analyser and Compiler System (UFLACS) documentation build configuration file, created by
-# sphinx-quickstart on Wed Nov  4 14:38:57 2015.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys
-import os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
-    'sphinx.ext.autodoc',
-    'sphinx.ext.doctest',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.coverage',
-    'sphinx.ext.mathjax',
-    'sphinx.ext.viewcode',
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'UFL Analyser and Compiler System (UFLACS)'
-copyright = u'2015, FEniCS Project'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-import uflacs
-uflacs_version = uflacs.__version__
-
-# The short X.Y version.
-version = uflacs_version
-# The full version, including alpha/beta/rc tags.
-release = uflacs_version
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-#html_extra_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'UFLAnalyserandCompilerSystemUFLACSdoc'
-
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-#  author, documentclass [howto, manual, or own class]).
-latex_documents = [
-  ('index', 'UFLAnalyserandCompilerSystemUFLACS.tex', u'UFL Analyser and Compiler System (UFLACS) Documentation',
-   u'FEniCS Project', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'uflanalyserandcompilersystemuflacs', u'UFL Analyser and Compiler System (UFLACS) Documentation',
-     [u'FEniCS Project'], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-  ('index', 'UFLAnalyserandCompilerSystemUFLACS', u'UFL Analyser and Compiler System (UFLACS) Documentation',
-   u'FEniCS Project', 'UFLAnalyserandCompilerSystemUFLACS', 'One line description of project.',
-   'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/uflacs-merge-into-ffc/doc/sphinx/source/index.rst b/uflacs-merge-into-ffc/doc/sphinx/source/index.rst
deleted file mode 100644
index 081699a..0000000
--- a/uflacs-merge-into-ffc/doc/sphinx/source/index.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-.. title:: UFL Analyser and Compiler System
-
-UFL Analyser and Compiler System (UFLACS)
-=========================================
-
-
-Uflacs, the UFL Analyser and Compiler System, is a collection of
-algorithms for processing symbolic UFL forms and expressions. The main
-feature is efficient translation of tensor intensive symbolic
-expressions into a low level expression representation and C++ code.
-It is part of the FEniCS Project (http://fenicsproject.org).
-
-
-Installing
-----------
-
-Either install to default Python location as root::
-
-  sudo python setup.py install
-
-Or install to your own Python path directory::
-
-  python setup.py install --prefix=/path/to/my/own/site-packages
-
-
-Help and support
-----------------
-
-Send help requests and questions to fenics-support at googlegroups.com,
-and send feature requests and questions to
-fenics-dev at googlegroups.com.
-
-
-Development and reporting bugs
-------------------------------
-
-The git source repository for UFLACS is located at
-https://bitbucket.org/fenics-project/uflacsa.  Bugs can be registered
-at https://bitbucket.org/fenics-project/uflacs/issues.
-
-For general UFLACS development questions and to make feature requests,
-use fenics at fenicsproject.org.
-
-
-Documentation
--------------
-
-.. toctree::
-   :titlesonly:
-
-   api-doc/uflacs
-   releases
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
diff --git a/uflacs-merge-into-ffc/release.conf b/uflacs-merge-into-ffc/release.conf
deleted file mode 100644
index 8b35b9e..0000000
--- a/uflacs-merge-into-ffc/release.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-# Configuration file for fenics-release
-
-PACKAGE="uflacs"
-BRANCH="master"
-FILES="ChangeLog uflacs/__init__.py setup.py"
-POST_FILES="ChangeLog uflacs/__init__.py setup.py"
diff --git a/uflacs-merge-into-ffc/setup.py b/uflacs-merge-into-ffc/setup.py
deleted file mode 100755
index ad8f196..0000000
--- a/uflacs-merge-into-ffc/setup.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-
-try:
-    from setuptools import setup
-except ImportError:
-    from distutils.core import setup
-
-import sys
-import re
-
-if sys.version_info < (2, 7):
-    print("Python 2.7 or higher required, please upgrade.")
-    sys.exit(1)
-
-version = re.findall('__version__ = "(.*)"',
-                     open('uflacs/__init__.py', 'r').read())[0]
-
-packages = [
-    "uflacs",
-    "uflacs.language",
-    "uflacs.datastructures",
-    "uflacs.analysis",
-    "uflacs.elementtables",
-    "uflacs.generation",
-    "uflacs.representation",
-    "uflacs.backends",
-    "uflacs.backends.ffc",
-    "uflacs.backends.ufc",
-    ]
-
-
-CLASSIFIERS = """
-Development Status :: 3 - Alpha
-Environment :: Console
-Intended Audience :: Developers
-Intended Audience :: Science/Research
-Programming Language :: Python :: 2.7
-License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
-Topic :: Scientific/Engineering :: Mathematics
-Topic :: Software Development :: Compilers
-Topic :: Software Development :: Libraries :: Python Modules
-"""
-classifiers = CLASSIFIERS.split('\n')[1:-1]
-
-setup(name="uflacs",
-      version=version,
-      description="UFL Analyser and Compiler System",
-      author="Martin Sandve Alnaes",
-      author_email="martinal at simula.no",
-      url="http://bitbucket.com/fenics-project/uflacs",
-      classifiers=classifiers,
-      packages=packages,
-      package_dir={"uflacs": "uflacs"},
-      install_requires = ["numpy", "six", "ufl==1.7.0dev"],
-      #data_files=[(os.path.join("share", "man", "man1"),
-      #             [os.path.join("doc", "man", "man1", "uflacs.1.gz")])]
-    )
diff --git a/uflacs/analysis/indexing.py b/uflacs/analysis/indexing.py
deleted file mode 100644
index 0ab0ed8..0000000
--- a/uflacs/analysis/indexing.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Algorithms for working with multiindices."""
-
-
-# FIXME: Clean up duplicates in this module, cover with tests and profile
-
-
-from six.moves import xrange as range
-
-from ufl import product
-from ufl.utils.sorting import sorted_by_count
-from ufl.permutation import compute_indices
-from ufl.utils.indexflattening import shape_to_strides, flatten_multiindex
-from ufl.classes import ComponentTensor
-from ufl.classes import FixedIndex
-from ufl.classes import Index
-from ufl.classes import Indexed
-
-
-def map_indexed_arg_components(indexed):  # FIXME: This is the one in use. Is it the best?
-    assert isinstance(indexed, Indexed)
-    e1 = indexed
-    e2, mi = e1.ufl_operands
-    d = _map_indexed_components(e2, e1, mi)
-    assert all(isinstance(x, int) for x in d)
-    assert len(set(d)) == len(d)
-    return d
-
-
-def _map_indexed_components(tensor, indexed, multiindex):
-    e2 = tensor
-    e1 = indexed  # e1 = e2[multiindex]
-
-    # Get tensor and index shape
-    sh1 = e1.ufl_shape
-    sh2 = e2.ufl_shape
-    fi1 = e1.ufl_free_indices
-    fi2 = e2.ufl_free_indices
-    fid1 = e1.ufl_index_dimensions
-    fid2 = e2.ufl_index_dimensions
-
-    # Compute regular and total shape
-    tsh1 = sh1 + fid1
-    tsh2 = sh2 + fid2
-    # r1 = len(tsh1)
-    r2 = len(tsh2)
-    # str1 = shape_to_strides(tsh1)
-    str2 = shape_to_strides(tsh2)
-    assert not sh1
-    assert sh2  # Must have shape to be indexed in the first place
-    assert product(tsh1) <= product(tsh2)
-
-    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
-    ind2_to_ind1_map = [None] * len(fi2)
-    for k, i in enumerate(fi2):
-        ind2_to_ind1_map[k] = fi1.index(i)
-
-    # Build map from fi1/fid1 position to mi position
-    nmui = len(multiindex)
-    multiindex_to_ind1_map = [None] * nmui
-    for k, i in enumerate(multiindex):
-        if isinstance(i, Index):
-            multiindex_to_ind1_map[k] = fi1.index(i.count())
-
-    # Build map from flattened e1 component to flattened e2 component
-    perm1 = compute_indices(tsh1)
-    ni1 = product(tsh1)
-
-    # Situation: e1 = e2[mi]
-    d1 = [None] * ni1
-    p2 = [None] * r2
-    assert len(sh2) == nmui
-    #p2ks = set()
-    for k, i in enumerate(multiindex):
-        if isinstance(i, FixedIndex):
-            p2[k] = int(i)
-            #p2ks.add(k)
-    for c1, p1 in enumerate(perm1):
-        for k, i in enumerate(multiindex):
-            if isinstance(i, Index):
-                p2[k] = p1[multiindex_to_ind1_map[k]]
-                #p2ks.add(k)
-        for k, i in enumerate(ind2_to_ind1_map):
-            p2[nmui + k] = p1[i]
-            #p2ks.add(nmui + k)
-        c2 = flatten_multiindex(p2, str2)
-        d1[c1] = c2
-
-    return d1
-
-
-def map_component_tensor_arg_components(component_tensor):  # FIXME: This is the one in use. Is it the best?
-    assert isinstance(component_tensor, ComponentTensor)
-    e2 = component_tensor
-    e1, mi = e2.ufl_operands
-    d = _map_component_tensor_components(e2, e1, mi)
-    assert all(isinstance(x, int) for x in d)
-    assert len(set(d)) == len(d)
-    return d
-
-
-def _map_component_tensor_components(tensor, indexed, multiindex):
-    e1 = indexed
-    e2 = tensor  # e2 = as_tensor(e1, multiindex)
-    mi = [i for i in multiindex if isinstance(i, Index)]
-
-    # Get tensor and index shape
-    sh1 = e1.ufl_shape
-    sh2 = e2.ufl_shape
-    fi1 = e1.ufl_free_indices
-    fi2 = e2.ufl_free_indices
-    fid1 = e1.ufl_index_dimensions
-    fid2 = e2.ufl_index_dimensions
-
-    # Compute regular and total shape
-    tsh1 = sh1 + fid1
-    tsh2 = sh2 + fid2
-    r1 = len(tsh1)
-    r2 = len(tsh2)
-    str1 = shape_to_strides(tsh1)
-    # str2 = shape_to_strides(tsh2)
-    assert not sh1
-    assert sh2
-    assert len(mi) == len(multiindex)
-    assert product(tsh1) == product(tsh2)
-    assert fi1
-
-    assert all(i in fi1 for i in fi2)
-
-    nmui = len(multiindex)
-    assert nmui == len(sh2)
-
-    # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position
-    p2_to_p1_map = [None] * r2
-    for k, i in enumerate(fi2):
-        p2_to_p1_map[k + nmui] = fi1.index(i)
-
-    # Build map from fi1/fid1 position to mi position
-    for k, i in enumerate(mi):
-        p2_to_p1_map[k] = fi1.index(mi[k].count())
-
-    # Build map from flattened e1 component to flattened e2 component
-    perm2 = compute_indices(tsh2)
-    ni2 = product(tsh2)
-
-    # Situation: e2 = as_tensor(e1, mi)
-    d2 = [None] * ni2
-    p1 = [None] * r1
-    for c2, p2 in enumerate(perm2):
-        for k2, k1 in enumerate(p2_to_p1_map):
-            p1[k1] = p2[k2]
-        c1 = flatten_multiindex(p1, str1)
-        d2[c2] = c1
-
-    return d2
-
-
-def __map_indexed_to_arg_components(indexed):
-    e1 = indexed
-    assert isinstance(e1, Indexed)
-    A1, mi1 = e1.ufl_operands
-    e2 = A1
-
-    # Get tensor and index shape
-    sh1 = e1.ufl_shape
-    sh2 = e2.ufl_shape
-    fi1 = e1.ufl_free_indices
-    fi2 = e2.ufl_free_indices
-    fid1 = e1.ufl_index_dimensions
-    fid2 = e2.ufl_index_dimensions
-
-    # Compute regular and total shape
-    tsh1 = sh1 + fid1
-    tsh2 = sh2 + fid2
-    str1 = shape_to_strides(tsh1)
-    str2 = shape_to_strides(tsh2)
-    assert product(tsh1) == product(tsh2)
-    assert (not sh1) and (fid1) and (sh2) and (not fid2)
-
-    sh_to_ind_map = [fi1.index(i.count()) for i in mi1 if isinstance(i, Index)]
-    comp1 = []
-    comp2 = []
-    for p2 in compute_indices(sh2):
-        p1 = [None] * len(p2)
-        for j, p in enumerate(p2):
-            p1[sh_to_ind_map[j]] = p
-        c1 = flatten_multiindex(p1, str1)
-        c2 = flatten_multiindex(p2, str2)
-        comp1.append(c1)
-        comp2.append(c2)
-    return tuple(comp1), tuple(comp2)
-
-
-def __map_indexed_arg_components4(indexed):
-    assert isinstance(indexed, Indexed)
-    e1 = indexed
-    e2, mi = e1.ufl_operands
-
-    # Get tensor and index shape
-    sh1 = e1.ufl_shape
-    sh2 = e2.ufl_shape
-    fi1 = e1.ufl_free_indices
-    fi2 = e2.ufl_free_indices
-    fid1 = e1.ufl_index_dimensions
-    fid2 = e2.ufl_index_dimensions
-
-    # Compute regular and total shape
-    tsh1 = sh1 + fid1
-    tsh2 = sh2 + fid2
-    str1 = shape_to_strides(tsh1)
-    # str2 = shape_to_strides(tsh2)
-    assert product(tsh1) == product(tsh2)
-    assert (not sh1) and (fid1) and (sh2) and (not fid2)
-
-    # Build map from fi1/fid1 position to mi position
-    mi = [i for i in mi if isinstance(i, Index)]
-    nmi = len(mi)
-    ind1_to_mi_map = [None] * nmi
-    for k in range(nmi):
-        ind1_to_mi_map[fi1.index(mi[k].count())] = k
-
-    # Build map from flattened e1 component to flattened e2 component
-    indices2 = compute_indices(sh2)
-    ni = len(indices2)
-    d1 = [None] * ni
-    d2 = [None] * ni
-    for c2, p2 in enumerate(indices2):
-        p1 = [p2[k] for k in ind1_to_mi_map]
-        c1 = flatten_multiindex(p1, str1)
-        d1[c1] = c2
-        d2[c2] = c1
-    assert d1 == d2
-    return d1
-
-
-def __map_component_tensor_arg_components4(component_tensor):
-    assert isinstance(component_tensor, ComponentTensor)
-    e2 = component_tensor
-    e1, mi = e2.ufl_operands
-
-    # Get tensor and index shape
-    sh1 = e1.ufl_shape
-    sh2 = e2.ufl_shape
-    fi1 = e1.ufl_free_indices
-    fi2 = e2.ufl_free_indices
-    fid1 = e1.ufl_index_dimensions
-    fid2 = e2.ufl_index_dimensions
-
-    # Compute regular and total shape
-    tsh1 = sh1 + fid1
-    tsh2 = sh2 + fid2
-    str1 = shape_to_strides(tsh1)
-    # str2 = shape_to_strides(tsh2)
-    assert product(tsh1) == product(tsh2)
-    assert (not sh1) and (fid1) and (sh2) and (not fid2)
-
-    # Build map from fi1/fid1 position to mi position
-    mi = [i for i in mi if isinstance(i, Index)]
-    nmi = len(mi)
-    ind1_to_mi_map = [None] * nmi
-    for k in range(nmi):
-        ind1_to_mi_map[fi1.index(mi[k].count())] = k
-
-    # Build map from flattened e1 component to flattened e2 component
-    indices2 = compute_indices(sh2)
-    ni = len(indices2)
-    d1 = [None] * ni
-    d2 = [None] * ni
-    for c2, p2 in enumerate(indices2):
-        p1 = [p2[k] for k in ind1_to_mi_map]
-        c1 = flatten_multiindex(p1, str1)
-        d1[c1] = c2
-        d2[c2] = c1
-    assert d1 == d2
-    return d2
diff --git a/uflacs/backends/ffc/common.py b/uflacs/backends/ffc/common.py
deleted file mode 100644
index 77101b2..0000000
--- a/uflacs/backends/ffc/common.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""FFC specific utilities."""
-
-from six.moves import xrange as range
-
-
-# FIXME: Do something like this for shared symbol naming?
-class FFCBackendSymbols(object):
-    def __init__(self, language, coefficient_numbering):
-        self.L = language
-        self.S = self.L.Symbol
-        self.coefficient_numbering = coefficient_numbering
-
-        # Rules, make functions? (NB! Currently duplicated from names)
-        self.restriction_postfix = {"+": "_0", "-": "_1", None: ""}  # TODO: Use this wherever we need it?
-
-    # FIXME: Used in access: weights, points, ia, A, w, x, J
-
-    def entity(self, entitytype, restriction):
-        "Entity index."
-        return self.S(format_entity_name(entitytype, restriction))
-
-    def x(self, quadloop):
-        "Physical coordinates."
-        return self.S("x" + str(quadloop))
-
-    def xi(self, quadloop):
-        "Reference cell coordinates."
-        return self.S("xi" + str(quadloop))
-
-    def quadrature_loop_index(self):
-        "Reusing a single index name for all quadrature loops, assumed not to be nested."
-        # If we want to use num_points-specific names for any symbols, this need num_points as well (or some other scope id).
-        #return self.S("iq%d" % (num_points,))
-        return self.S("iq")
-
-    def coefficient_dof_sum_index(self):
-        "Reusing a single index name for all coefficient dof*basis sums, assumed to always be the innermost loop."
-        return self.S("ic")
-
-    def coefficient_value_access(self, coefficient,):
-        c = self.coefficient_numbering[coefficient] # coefficient.count()
-        # If we want to use num_points-specific names for any symbols, this need num_points as well (or some other scope id).
-        #return self.S("w%d_%d" % (c, num_points))
-        return self.S("w%d" % c)
-
-    def coefficient_dof_access(self, coefficient, dof_number):
-        # TODO: Add domain_number = self.ir["domain_numbering"][coefficient.ufl_domain().domain_key()]
-        # TODO: Flatten dofs array and use CRS lookup table.
-        # TODO: Apply integral specific renumbering.
-        c = self.coefficient_numbering[coefficient] # coefficient.count()
-        #return self.L.ArrayAccess(names.w, (c, dof_number))
-        return self.S("w")[c, dof_number]
-
-    def domain_dof_access(self, dof, component, gdim, num_scalar_dofs, restriction, interleaved_components):
-        # TODO: Add domain number as argument here, and {domain_offset} to array indexing:
-        # domain_offset = self.ir["domain_offsets"][domain_number]
-        vc = self.S("coordinate_dofs" + self.restriction_postfix[restriction])
-        if interleaved_components:
-            #return L.ArrayAccess(vc, L.Add(L.Mul(gdim, dof), component))
-            return vc[gdim*dof + component]
-        else:
-            #return L.ArrayAccess(vc, L.Add(L.Mul(num_scalar_dofs, component), dof))
-            return vc[num_scalar_dofs*component + dof]
-
-    def domain_dofs_access(self, gdim, num_scalar_dofs, restriction, interleaved_components):
-        # TODO: Add domain number as argument here, and {domain_offset} to array indexing:
-        # FIXME: Handle restriction here
-        # domain_offset = self.ir["domain_offsets"][domain_number]
-        return [self.domain_dof_access(dof, component, gdim, num_scalar_dofs, restriction, interleaved_components)
-                for component in range(gdim)
-                for dof in range(num_scalar_dofs)]
-
-
-# TODO: This is not used much anymore, integrate in backend class, and use L.Symbol
-class Names:
-
-    def __init__(self):
-        # Topology argument names
-        self.vertex = "vertex"
-        self.facet = "facet"
-
-        # Geometry names
-        self.coordinate_dofs = "coordinate_dofs"
-        self.xi = "xi"
-        self.x = "x"
-        self.J = "J"
-        self.K = "K"
-        self.detJ = "detJ"
-        self.det = "det"
-
-        # Quadrature rule
-        self.points = "points"
-        self.weights = "weights"
-
-        # Quadrature temps
-        self.qw = "qw"
-        self.D = "D"
-
-        # (Base)name for intermediate registers
-        self.s = "s"
-
-        # Element tensor
-        self.A = "A"
-
-        # Coefficient dofs array
-        self.w = "w"
-
-        # Basenames for function components
-        self.wbase = "w"
-        self.vbase = "v"
-        self.dwbase = "dw"
-        self.dvbase = "dv"
-
-        # Loop indices
-        self.iq = "iq"   # Quadrature loop
-        self.ic = "ic"   # Coefficient accumulation loop
-        self.ia = "ia"   # Argument dof loop
-        self.ild = "ild"  # Local derivative accumulation loop
-
-        # Rules, make functions?
-        self.restriction_postfix = {"+": "_0", "-": "_1", None: ""}  # TODO: Use this wherever we need it?
-
-names = Names()
-
-
-def format_entity_name(entitytype, r):
-    if entitytype == "cell":
-        entity = "0"  # None # TODO: Keep 3D tables and use entity 0 for cells or make tables 2D and use None?
-    elif entitytype == "facet":
-        entity = names.facet + names.restriction_postfix[r]
-    elif entitytype == "vertex":
-        entity = names.vertex
-    return entity
-
-def format_mt_der(mt):
-    # Expecting only local derivatives here
-    assert not mt.global_derivatives
-    # Add derivatives to name
-    if mt.local_derivatives:
-        der = "_d{0}".format(''.join(map(str, mt.local_derivatives)))
-    else:
-        der = ""
-    return der
-
-def format_mt_comp(mt):
-    # Add flattened component to name (TODO: this should be the local component?)
-    if mt.component:
-        comp = "_c{0}".format(mt.flat_component)
-    else:
-        comp = ""
-    return comp
-
-def format_mt_avg(mt):
-    # Add averaged state to name
-    if mt.averaged:
-        avg = "_a{0}".format(mt.averaged)
-    else:
-        avg = ""
-    return avg
-
-def format_mt_res(mt):
-    return names.restriction_postfix[mt.restriction].replace("_", "_r")
-
-def format_mt_name(basename, mt):
-    access = "{basename}{avg}{res}{der}{comp}".format(basename=basename,
-                                                      avg=format_mt_avg(mt),
-                                                      res=format_mt_res(mt),
-                                                      der=format_mt_der(mt),
-                                                      comp=format_mt_comp(mt))
-    return access
-
-def ufc_restriction_postfix(restriction):
-    # TODO: Get restriction postfix from somewhere central
-    if restriction == "+":
-        res = "_0"
-    elif restriction == "-":
-        res = "_1"
-    else:
-        res = ""
-    return res
-
-# from uflacs.backends.ffc.ffc_statement_formatter import format_element_table_access
-# from ufl.utils.derivativetuples import derivative_listing_to_counts
-# def generate_element_table_access(mt):
-#     FIXME: See  format_element_table_access  get_element_table_data
-#     entity = format_entity_name(self.ir["entitytype"], mt.restriction)
-#     return L.ArrayAccess(uname, (entity, names.iq, dof_number))
-#     return "FE[0]" # FIXME
-
-# def generate_geometry_table_access(mt):
-#     return "FJ[0]" # FIXME
diff --git a/uflacs/backends/ffc/ffc_compiler.py b/uflacs/backends/ffc/ffc_compiler.py
deleted file mode 100644
index edb06af..0000000
--- a/uflacs/backends/ffc/ffc_compiler.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""The FFC specific backend to the UFLACS form compiler algorithms."""
-
-def compile_tabulate_tensor_code(form, optimize=True):
-    """This function is basically a mock controller which allows emulating the behaviour of ffc,
-    by joining compute_ir, optimize_ir, and generate_ir.
-    """
-    from ufl.algorithms import compute_form_data
-    from ffc.cpp import set_float_formatting
-    from ffc.uflacsrepr import compute_integral_ir, optimize_integral_ir, generate_integral_code
-
-    # Fake the initialization necessary to get this running through
-    set_float_formatting(8)
-    parameters = {"optimize": optimize, "restrict_keyword": ""}
-    prefix = "uflacs_testing"
-    form_id = 0
-
-    # Apply ufl preprocessing
-    form_data = compute_form_data(form,
-                                  do_apply_function_pullbacks=True,
-                                  do_apply_integral_scaling=True,
-                                  do_apply_geometry_lowering=True,
-                                  preserve_geometry_types=(),
-                                  do_apply_restrictions=True,
-                                  )
-
-    tt_codes = []
-    for itg_data in form_data.integral_data:
-        # Just make a fixed choice of cubic default quadrature rule for this test code
-        itg_data.metadata["quadrature_degree"] = itg_data.metadata.get("quadrature_degree", 3)
-        itg_data.metadata["quadrature_rule"] = itg_data.metadata.get("quadrature_rule", "default")
-
-        # Call uflacs representation functions from ffc, which again calls the matching uflacs functions
-        element_numbers = None # FIXME
-        ir = compute_integral_ir(itg_data, form_data, form_id, element_numbers, parameters)
-        if optimize:
-            ir = optimize_integral_ir(ir, parameters)
-        code = generate_integral_code(ir, prefix, parameters)
-
-        # Store just the tabulate tensor part generated by uflacs
-        tt_codes.append(code["tabulate_tensor"])
-
-    # Just joint the tabulate tensor bodies and return
-    code = ('\n' + '/' * 60 + '\n').join(tt_codes)
-    return code
diff --git a/uflacs/backends/ffc/generation.py b/uflacs/backends/ffc/generation.py
deleted file mode 100644
index 5bb8dd1..0000000
--- a/uflacs/backends/ffc/generation.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""FFC specific algorithms for the generation phase."""
-
-from uflacs.generation.integralgenerator import IntegralGenerator
-
-import uflacs.language.cnodes
-from uflacs.language.format_lines import format_indented_lines
-from uflacs.language.ufl_to_cnodes import UFL2CNodesTranslator
-from uflacs.backends.ffc.access import FFCAccessBackend
-from uflacs.backends.ffc.definitions import FFCDefinitionsBackend
-
-class FFCBackend(object):
-    "Class collecting all aspects of the FFC backend."
-    def __init__(self, ir, parameters):
-        self.language = uflacs.language.cnodes
-        self.ufl_to_language = UFL2CNodesTranslator(self.language)
-        self.definitions = FFCDefinitionsBackend(ir, self.language, parameters)
-        self.access = FFCAccessBackend(ir, self.language, parameters)
-
-def generate_tabulate_tensor_code(ir, prefix, parameters):
-
-    # Create FFC C++ backend
-    backend = FFCBackend(ir, parameters)
-
-    # Create code generator for integral body
-    ig = IntegralGenerator(ir, backend)
-
-    # Generate code ast for the tabulate_tensor body
-    parts = ig.generate()
-
-    # Format code AST as one string
-    body = format_indented_lines(parts.cs_format(), 1)
-    #import IPython; IPython.embed()
-
-    # Fetch includes
-    includes = set()
-    includes.update(ig.get_includes())
-    includes.update(backend.definitions.get_includes())
-
-    # Format uflacs specific code structures into a single
-    # string and place in dict before returning to ffc
-    code = {
-        "tabulate_tensor": body,
-        "additional_includes_set": includes,
-    }
-    return code
diff --git a/uflacs/backends/ffc/representation.py b/uflacs/backends/ffc/representation.py
deleted file mode 100644
index cfb7bc8..0000000
--- a/uflacs/backends/ffc/representation.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""The FFC specific backend to the UFLACS form compiler algorithms."""
-
-from six import iteritems
-import numpy as np
-from ufl.algorithms import replace
-from ufl.utils.sorting import sorted_by_count
-
-from ffc.log import ffc_assert
-
-from uflacs.params import default_parameters
-from uflacs.datastructures.arrays import object_array
-from uflacs.analysis.modified_terminals import analyse_modified_terminal
-from uflacs.representation.compute_expr_ir import compute_expr_ir
-from uflacs.elementtables.terminaltables import build_element_tables, optimize_element_tables
-
-
-def compute_uflacs_integral_ir(psi_tables, entitytype,
-                               integrals_dict, form_data,
-                               parameters):
-    # TODO: Hack before we get default parameters properly into ffc
-    p = default_parameters()
-    p.update(parameters)
-    parameters = p
-    # FIXME: Should be epsilon from ffc parameters
-    from uflacs.language.format_value import get_float_threshold
-    epsilon = get_float_threshold()
-
-    uflacs_ir = {}
-    # uflacs_ir["name"] = form_data.name
-    # uflacs_ir["coefficient_names"] = form_data.coefficient_names
-    # uflacs_ir["argument_names"] = form_data.argument_names
-    # uflacs_ir["cell"] = form_data.integration_domains[0].ufl_cell()
-    # uflacs_ir["function_replace_map"] = form_data.function_replace_map
-
-    # Build coefficient numbering for UFC interface here, to avoid renumbering in UFL and application of replace mapping
-    uflacs_ir["coefficient_numbering"] = {}
-    #uflacs_ir["coefficient_element"] = {}
-    #uflacs_ir["coefficient_domain"] = {}
-    for i, f in enumerate(sorted_by_count(form_data.function_replace_map.keys())):
-        g = form_data.function_replace_map[f]
-        assert i == g.count()
-        uflacs_ir["coefficient_numbering"][g] = i # USING THIS ONE BECAUSE WE'RE CALLING REPLACE BELOW
-        #uflacs_ir["coefficient_numbering"][f] = i # If we make elements and domains well formed we can avoid replace below and use this line instead
-        #uflacs_ir["coefficient_element"][f] = g.ufl_element()
-        #uflacs_ir["coefficient_domain"][f] = g.ufl_domain()
-
-    # Build ir for each num_points/integrand
-    uflacs_ir["expr_ir"] = {}
-    for num_points in sorted(integrals_dict.keys()):
-        integral = integrals_dict[num_points]
-
-        # Get integrand
-        expr = integral.integrand()
-
-        # Replace coefficients so they all have proper element and domain for what's to come
-        # TODO: We can avoid this step when Expression is in place and
-        #       element/domain assignment removed from compute_form_data.
-        # TODO: Doesn't replace domain coefficient!!!
-        #       Merge replace functionality into change_to_reference_grad to fix?
-        #       When coordinate field coefficient is removed I guess this issue will disappear?
-        expr = replace(expr, form_data.function_replace_map) # FIXME: Still need to apply this mapping.
-
-        # Build the core uflacs ir of expressions
-        expr_ir = compute_expr_ir(expr, parameters)
-        uflacs_ir["expr_ir"][num_points] = expr_ir
-
-    for num_points in sorted(integrals_dict.keys()):
-        expr_ir = uflacs_ir["expr_ir"][num_points]
-
-        # Build set of modified terminal ufl expressions
-        V = expr_ir["V"]
-        modified_terminals = [analyse_modified_terminal(V[i])
-                              for i in expr_ir["modified_terminal_indices"]]
-
-        # Analyse modified terminals and store data about them
-        terminal_data = modified_terminals + expr_ir["modified_arguments"]
-
-        # Build tables needed by all modified terminals
-        # (currently build here means extract from ffc psi_tables)
-        #print '\n'.join([str(mt.expr) for mt in terminal_data])
-        tables, terminal_table_names = build_element_tables(psi_tables, num_points,
-                                                            entitytype, terminal_data,
-                                                            epsilon)
-
-        # Optimize tables and get table name and dofrange for each modified terminal
-        unique_tables, terminal_table_ranges = optimize_element_tables(tables, terminal_table_names, epsilon)
-        expr_ir["unique_tables"] = unique_tables
-
-        # Modify ranges for restricted form arguments (not geometry!)
-        # FIXME: Should not coordinate dofs get the same offset?
-        from ufl.classes import FormArgument
-        for i, mt in enumerate(terminal_data):
-            # TODO: Get the definition that - means added offset from somewhere
-            if mt.restriction == "-" and isinstance(mt.terminal, FormArgument):
-                # offset = number of dofs before table optimization
-                offset = int(tables[terminal_table_names[i]].shape[-1])
-                (unique_name, b, e) = terminal_table_ranges[i]
-                terminal_table_ranges[i] = (unique_name, b + offset, e + offset)
-
-        # Split into arguments and other terminals before storing in expr_ir
-        # TODO: Some tables are associated with num_points, some are not
-        #       (i.e. piecewise constant, averaged and x0)
-        n = len(expr_ir["modified_terminal_indices"])
-        m = len(expr_ir["modified_arguments"])
-        assert len(terminal_data) == n + m
-        assert len(terminal_table_ranges) == n + m
-        assert len(terminal_table_names) == n + m
-        expr_ir["modified_terminal_table_ranges"] = terminal_table_ranges[:n]
-        expr_ir["modified_argument_table_ranges"] = terminal_table_ranges[n:]
-
-        # Store table data in V indexing, this is used in integralgenerator
-        expr_ir["table_ranges"] = object_array(len(V))
-        expr_ir["table_ranges"][expr_ir["modified_terminal_indices"]] = \
-            expr_ir["modified_terminal_table_ranges"]
-
-    return uflacs_ir
diff --git a/uflacs/backends/ufc/utils.py b/uflacs/backends/ufc/utils.py
deleted file mode 100644
index acd7181..0000000
--- a/uflacs/backends/ufc/utils.py
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# TODO: Move to uflacs.language utils?
-def generate_return_new_switch(L, i, classnames, args=None):
-    if classnames:
-        cases = []
-        if args is None:
-            args = list(range(len(classnames)))
-        for j, classname in zip(args, classnames):
-            if classname:
-                cases.append((j, L.Return(L.New(classname))))
-        code = [L.Switch(i, cases, autobreak=False, autoscope=False)]
-    else:
-        code = []
-    code.append(L.Return(L.Null()))
-    return L.StatementList(code)
diff --git a/uflacs/datastructures/__init__.py b/uflacs/datastructures/__init__.py
deleted file mode 100644
index bb44aa5..0000000
--- a/uflacs/datastructures/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Basic data structures."""
diff --git a/uflacs/datastructures/arrays.py b/uflacs/datastructures/arrays.py
deleted file mode 100644
index 58fe8ec..0000000
--- a/uflacs/datastructures/arrays.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Basic data structures."""
-
-import numpy
-
-def int_array(size):
-    return numpy.zeros(size, dtype=int)
-
-def object_array(size):
-    return numpy.empty(size, dtype=object)
-
-def bool_array(size):
-    #return numpy.zeros(size, dtype=numpy.bool8)
-    return numpy.zeros(size, dtype=numpy.int8)
diff --git a/uflacs/datastructures/types.py b/uflacs/datastructures/types.py
deleted file mode 100644
index 0772049..0000000
--- a/uflacs/datastructures/types.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Basic data structures."""
-
-import numpy
-
-def sufficient_int_type(maxvalue):
-    if maxvalue < 2 ** 7:
-        dtype = numpy.int8
-    elif maxvalue < 2 ** 15:
-        dtype = numpy.int16
-    elif maxvalue < 2 ** 31:
-        dtype = numpy.int32
-    else:
-        dtype = numpy.int64
-    return dtype
-
-def sufficient_uint_type(maxvalue):
-    if maxvalue < 2 ** 8:
-        dtype = numpy.uint8
-    elif maxvalue < 2 ** 16:
-        dtype = numpy.uint16
-    elif maxvalue < 2 ** 32:
-        dtype = numpy.uint32
-    else:
-        dtype = numpy.uint64
-    return dtype
diff --git a/uflacs/elementtables/table_utils.py b/uflacs/elementtables/table_utils.py
deleted file mode 100644
index c350d8c..0000000
--- a/uflacs/elementtables/table_utils.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Utilities for precomputed table manipulation."""
-
-from __future__ import print_function # used in some debugging
-
-from six import itervalues, iterkeys
-from six import advance_iterator as next
-from six.moves import map
-from six.moves import xrange as range
-import numpy as np
-
-
-def equal_tables(a, b, eps):
-    "Compare tables to be equal within a tolerance."
-    a = np.asarray(a)
-    b = np.asarray(b)
-    if a.shape != b.shape:
-        return False
-    if len(a.shape) > 1:
-        return all(equal_tables(a[i], b[i], eps) for i in range(a.shape[0]))
-
-    def scalars_equal(x, y, eps):
-        return abs(x-y) < eps
-    return all(scalars_equal(a[i], b[i], eps) for i in range(a.shape[0]))
-
-
-def strip_table_zeros(table, eps):
-    "Strip zero columns from table. Returns column range (begin,end) and the new compact table."
-    # Get shape of table and number of columns, defined as the last axis
-    table = np.asarray(table)
-    sh = table.shape
-    nc = sh[-1]
-
-    # Find first nonzero column
-    begin = nc
-    for i in range(nc):
-        if np.linalg.norm(table[..., i]) > eps:
-            begin = i
-            break
-
-    # Find (one beyond) last nonzero column
-    end = begin
-    for i in range(nc-1, begin-1, -1):
-        if np.linalg.norm(table[..., i]) > eps:
-            end = i+1
-            break
-
-    # Make subtable by stripping first and last columns
-    return begin, end, table[..., begin:end]
-
-
-def build_unique_tables(tables, eps):
-    """Given a list or dict of tables, return a list of unique tables
-    and a dict of unique table indices for each input table key."""
-    unique = []
-    mapping = {}
-    if isinstance(tables, list):
-        keys = list(range(len(tables)))
-    elif isinstance(tables, dict):
-        keys = sorted(tables.keys())
-    for k in keys:
-        t = tables[k]
-        found = -1
-        for i, u in enumerate(unique):
-            if equal_tables(u, t, eps):
-                found = i
-                break
-        if found == -1:
-            i = len(unique)
-            unique.append(t)
-        mapping[k] = i
-    return unique, mapping
-
-
-def build_unique_tables2(tables, eps):
-    """Given a list or dict of tables, return a list of unique tables
-    and a dict of unique table indices for each input table key."""
-    unique = []
-    mapping = {}
-
-    if isinstance(tables, list):
-        keys = list(range(len(tables)))
-    elif isinstance(tables, dict):
-        keys = sorted(tables.keys())
-
-    for k in keys:
-        t = tables[k]
-        found = -1
-        for i, u in enumerate(unique):
-            if equal_tables(u, t, eps):
-                found = i
-                break
-        if found == -1:
-            i = len(unique)
-            unique.append(t)
-        mapping[k] = i
-
-    return unique, mapping
-
-
-def get_ffc_table_values(tables, entitytype, num_points, element, flat_component, derivative_counts, epsilon):
-    """Extract values from ffc element table.
-
-    Returns a 3D numpy array with axes
-    (entity number, quadrature point number, dof number)
-    """
-    # Get quadrule/element subtable
-    element_table = tables[num_points][element]
-
-    # Temporary fix for new table structure TODO: Handle avg properly
-    if len(element_table) != 1:
-        print()
-        print(element_table)
-    assert len(element_table) == 1
-    element_table = element_table[None]
-
-    # FFC property:
-    # element_counter = element_map[num_points][element]
-
-    # Figure out shape of final array by inspecting tables
-    num_entities = len(element_table)
-    tmp = next(itervalues(element_table)) # Pick subtable for arbitrary chosen cell entity
-    if derivative_counts is None: # Workaround for None vs (0,)*tdim
-        dc = next(iterkeys(tmp))
-        derivative_counts = (0,)*len(dc)
-    num_dofs = len(tmp[derivative_counts])
-
-    # Make 3D array for final result
-    shape = (num_entities, num_points, num_dofs)
-    res = np.zeros(shape)
-
-    # Loop over entities and fill table blockwise (each block = points x dofs)
-    sh = element.value_shape()
-    for entity in range(num_entities):
-        # Access subtable
-        entity_key = None if entitytype == "cell" else entity
-        tbl = element_table[entity_key][derivative_counts]
-
-        # Extract array for right component and order axes as (points, dofs)
-        if sh == ():
-            arr = np.transpose(tbl)
-        else:
-            arr = np.transpose(tbl[:, flat_component,:])
-
-        # Assign block of values for this entity
-        res[entity,:,:] = arr
-
-    # Clamp almost-zeros to zero
-    res[np.where(np.abs(res) < epsilon)] = 0.0
-    return res
-
-
-def generate_psi_table_name(element_counter, flat_component, derivative_counts, averaged, entitytype, num_quadrature_points):
-    """Generate a name for the psi table of the form:
-    FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value.
-
-    FE  - is a simple counter to distinguish the various bases, it will be
-          assigned in an arbitrary fashion.
-
-    C   - is the component number if any (this does not yet take into account
-          tensor valued functions)
-
-    D   - is the number of derivatives in each spatial direction if any.
-          If the element is defined in 3D, then D012 means d^3(*)/dydz^2.
-
-    AC  - marks that the element values are averaged over the cell
-
-    AF  - marks that the element values are averaged over the facet
-
-    F   - marks that the first array dimension enumerates facets on the cell
-
-    V   - marks that the first array dimension enumerates vertices on the cell
-
-    Q   - number of quadrature points, to distinguish between tables in a mixed quadrature degree setting
-
-    """
-
-    name = "FE%d" % element_counter
-
-    if isinstance(flat_component, int):
-        name += "_C%d" % flat_component
-    else:
-        assert flat_component is None
-
-    if derivative_counts and any(derivative_counts):
-        name += "_D" + "".join(map(str, derivative_counts))
-
-    if averaged == "cell":
-        name += "_AC"
-    elif averaged == "facet":
-        name += "_AF"
-
-    if entitytype == "cell":
-        pass
-    elif entitytype == "facet":
-        name += "_F"
-    elif entitytype == "vertex":
-        name += "_V"
-    else:
-        error("Unknown entity type %s." % entitytype)
-
-    if isinstance(num_quadrature_points, int):
-        name += "_Q%d" % num_quadrature_points
-    else:
-        assert num_quadrature_points is None
-
-    return name
-
-
-def _examples(tables):
-    eps = 1e-14
-    name = generate_psi_table_name(counter, flat_component, derivative_counts, averaged, entitytype, None)
-    values = get_ffc_table_values(tables, entitytype, num_points, element, flat_component, derivative_counts, eps)
-
-    begin, end, table = strip_table_zeros(table, eps)
-    all_zeros = table.shape[-1] == 0
-    all_ones = equal_tables(table, np.ones(table.shape), eps)
diff --git a/uflacs/elementtables/terminaltables.py b/uflacs/elementtables/terminaltables.py
deleted file mode 100644
index ee6e64f..0000000
--- a/uflacs/elementtables/terminaltables.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for precomputed tables of terminal values."""
-
-from six import iteritems, iterkeys
-from six.moves import xrange as range
-import numpy as np
-import ufl
-from ufl import product
-from ufl.utils.derivativetuples import derivative_listing_to_counts
-from ufl.classes import FormArgument, GeometricQuantity, SpatialCoordinate, Jacobian
-from ufl.algorithms.analysis import unique_tuple
-
-from ffc.log import ffc_assert
-
-from uflacs.datastructures.arrays import object_array
-from uflacs.elementtables.table_utils import (generate_psi_table_name,
-                                              get_ffc_table_values,
-                                              strip_table_zeros,
-                                              build_unique_tables)
-
-def extract_terminal_elements(terminal_data):
-    "Extract a list of unique elements from terminal data."
-    elements = []
-    xs = {}
-    for mt in terminal_data:
-        t = mt.terminal
-        if isinstance(t, FormArgument):
-            # Add element for function and its coordinates
-            elements.append(t.ufl_domain().ufl_coordinate_element())
-            elements.append(t.ufl_element())
-
-        elif isinstance(t, GeometricQuantity):
-            # Add element for coordinate field of domain
-            elements.append(t.ufl_domain().ufl_coordinate_element())
-
-    return unique_tuple(elements)
-
-
-def build_element_counter_map(elements):
-    "Given a sequence of elements, build a unique mapping: element->int."
-    element_counter_map = {}
-    for element in sorted(elements):  # TODO: Stable sorting?
-        if element not in element_counter_map:
-            element_counter_map[element] = len(element_counter_map)
-    return element_counter_map
-
-
-def build_element_tables(psi_tables, num_points, entitytype, terminal_data, epsilon):
-    """Build the element tables needed for a list of modified terminals.
-
-    Concepts:
-
-
-    Input:
-      psi_tables
-      entitytype
-      terminal_data
-
-    New output:
-      tables
-      terminal_table_names
-    """
-    element_counter_map = {}  # build_element_counter_map(extract_terminal_elements(terminal_data))
-    terminal_table_names = object_array(len(terminal_data))
-    tables = {}
-    for i, mt in enumerate(terminal_data):
-        t = mt.terminal
-        rv = mt.reference_value
-        gd = mt.global_derivatives
-        ld = mt.local_derivatives
-        gc = mt.component
-        fc = mt.flat_component
-
-        # Add to element tables for FormArguments and relevant GeometricQuantities
-        if isinstance(t, FormArgument):
-            if rv:
-                if gd:
-                    error("Global derivatives of reference values not defined.")
-            else:
-                if ld:
-                    error("Local derivatives of global values not defined.")
-            element = t.ufl_element()
-
-        elif isinstance(t, SpatialCoordinate):
-            if rv:
-                error("Not expecting reference value of x.")
-            if gd:
-                error("Not expecting global derivatives of x.")
-                
-            # TODO: Only need table for component element, does that matter?
-            element = t.ufl_domain().ufl_coordinate_element()
-
-            if ld:
-                # Actually the Jacobian, translate component gc to x element context
-                fc, ld = gc
-                ld = (ld,)
-
-        elif isinstance(t, Jacobian):
-            if rv:
-                error("Not expecting reference value of J.")
-            if gd:
-                error("Not expecting global derivatives of J.")
-
-            # TODO: Only need table for component element, does that matter?
-            element = t.ufl_domain().ufl_coordinate_element()
-
-            fc = gc[0]
-            ld = tuple(sorted((gc[1],) + ld))
-            #fc, ld = gc
-            #ld = (ld,)
-
-        else:
-            continue
-
-        # Count elements as we go
-        element_counter = element_counter_map.get(element)
-        if element_counter is None:
-            element_counter = len(element_counter_map)
-            element_counter_map[element] = element_counter
-
-        # Change derivatives format for table lookup
-        if gd:
-            gdim = t.ufl_domain().geometric_dimension()
-            global_derivatives = tuple(derivative_listing_to_counts(gd, gdim))
-        else:
-            global_derivatives = None
-        if ld:
-            tdim = t.ufl_domain().topological_dimension()
-            local_derivatives = tuple(derivative_listing_to_counts(ld, tdim))
-        else:
-            local_derivatives = None
-
-        # Build name for this particular table
-        name = generate_psi_table_name(element_counter, fc,
-                                     local_derivatives, mt.averaged, entitytype, num_points)
-
-        # Extract the values of the table from ffc table format
-        table = tables.get(name)
-        if table is None:
-            table = get_ffc_table_values(psi_tables, entitytype, num_points,
-                                         element, fc, local_derivatives, epsilon)
-            tables[name] = table
-
-        # Store table name with modified terminal
-        terminal_table_names[i] = name
-
-    return tables, terminal_table_names
-
-
-def optimize_element_tables(tables, terminal_table_names, eps):
-    """Optimize tables.
-
-    Input:
-      tables - a mapping from name to table values
-      terminal_table_names - a list of table names
-
-    Output:
-      unique_tables_dict - a new and mapping from name to table values with stripped zero columns
-      terminal_table_ranges - a list of (table name, begin, end) for each of the input table names
-    """
-
-    # Names here are a bit long and slightly messy...
-
-    # Apply zero stripping to all tables
-    stripped_tables = {}
-    table_ranges = {}
-    for name, table in iteritems(tables):
-        begin, end, stripped_table = strip_table_zeros(table, eps)
-        stripped_tables[name] = stripped_table
-        table_ranges[name] = (begin, end)
-
-    # Build unique table mapping
-    unique_tables_list, table_name_to_unique_index = build_unique_tables(stripped_tables, eps)
-
-    # Build mapping of constructed table names to unique names,
-    # pick first constructed name
-    unique_table_names = {}
-    for name in sorted(iterkeys(table_name_to_unique_index)):
-        unique_index = table_name_to_unique_index[name]
-        if unique_index in unique_table_names:
-            continue
-        unique_table_names[unique_index] = name
-
-    # Build mapping from unique table name to the table itself
-    unique_tables = dict((unique_table_names[unique_index], unique_tables_list[unique_index])
-                         for unique_index in range(len(unique_tables_list)))
-
-    # Build mapping from terminal data index to compacted table data:
-    # terminal data index -> (unique name, table range begin, table range end)
-    terminal_table_ranges = object_array(len(terminal_table_names))
-    for i, name in enumerate(terminal_table_names):
-        if name is not None:
-            unique_index = table_name_to_unique_index[name]
-            unique_name = unique_table_names[unique_index]
-            b, e = table_ranges[name]
-            terminal_table_ranges[i] = (unique_name, b, e)
-
-    return unique_tables, terminal_table_ranges
-
-# TODO: This seems to be unused, remove?
-def generate_element_table_definitions(L, tables):
-    "Format a dict of name->table into code."
-    code = []
-    for name in sorted(tables):
-        table = tables[name]
-        if product(table.shape) > 0:
-            code += [L.ArrayDecl("static const double",
-                                 name, table.shape, table)]
-    return code
diff --git a/uflacs/generation/integralgenerator.py b/uflacs/generation/integralgenerator.py
deleted file mode 100644
index 03c4bea..0000000
--- a/uflacs/generation/integralgenerator.py
+++ /dev/null
@@ -1,520 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Controlling algorithm for building the tabulate_tensor source structure from factorized representation."""
-
-from six import iteritems, iterkeys
-from six.moves import zip
-from six.moves import xrange as range
-
-from ufl import product
-from ufl.classes import ConstantValue, Condition
-
-from ffc.log import error
-
-from uflacs.analysis.modified_terminals import analyse_modified_terminal, is_modified_terminal
-
-
-class IntegralGenerator(object):
-
-    def __init__(self, ir, backend):
-        # Store ir
-        self.ir = ir
-
-        # Consistency check on quadrature rules
-        nps1 = sorted(iterkeys(ir["uflacs"]["expr_ir"]))
-        nps2 = sorted(iterkeys(ir["quadrature_rules"]))
-        if nps1 != nps2:
-            uflacs_warning("Got different num_points for expression irs and quadrature rules:\n{0}\n{1}".format(
-                nps1, nps2))
-
-        # Compute shape of element tensor
-        if self.ir["integral_type"] == "interior_facet":
-            self._A_shape = [2 * n for n in self.ir["prim_idims"]]
-        else:
-            self._A_shape = self.ir["prim_idims"]
-
-        #self._using_names = set()
-        #self._includes = set()
-        self._ufl_names = set()
-
-        # Backend specific plugin with attributes
-        # - language: for translating ufl operators to target language
-        # - definitions: for defining backend specific variables
-        # - access: for accessing backend specific variables
-        self.backend = backend
-
-    def generate_using_statements(self):
-        L = self.backend.language
-        return []  # [L.Using(name) for name in sorted(self._using_names)]
-
-    def get_includes(self):
-        includes = set()  # self._includes)
-
-        includes.add("#include <cstring>")  # for using memset
-        #includes.add("#include <algorithm>")  # for using std::fill instead of memset
-
-        cmath_names = set((
-                "abs", "sign", "pow", "sqrt",
-                "exp", "ln",
-                "cos", "sin", "tan",
-                "acos", "asin", "atan", "atan_2",
-                "cosh", "sinh", "tanh",
-                "acosh", "asinh", "atanh",
-                "erf", "erfc",
-            ))
-
-        boost_math_names = set((
-            "bessel_j", "bessel_y", "bessel_i", "bessel_k",
-            ))
-
-        # Only return the necessary headers
-        if cmath_names & self._ufl_names:
-            includes.add("#include <cmath>")
-
-        if boost_math_names & self._ufl_names:
-            includes.add("#include <boost/math/special_functions.hpp>")
-
-        includes.update(self.backend.definitions.get_includes())
-
-        return sorted(includes)
-
-    def generate(self):
-        """Generate entire tabulate_tensor body.
-
-        Assumes that the code returned from here will be wrapped in a context
-        that matches a suitable version of the UFC tabulate_tensor signatures.
-        """
-        L = self.backend.language
-
-        parts = []
-        parts += self.generate_using_statements()
-        parts += self.backend.definitions.initial()
-        parts += self.generate_quadrature_tables()
-        parts += self.generate_element_tables()
-        parts += self.generate_tensor_reset()
-
-        # If we have integrals with different number of quadrature points,
-        # we wrap each integral in a separate scope, avoiding having to
-        # think about name clashes for now. This is a bit wasteful in that
-        # piecewise quantities are not shared, but at least it should work.
-        expr_irs = self.ir["uflacs"]["expr_ir"]
-        all_num_points = sorted(expr_irs)
-
-        # Reset variables, separate sets for quadrature loop
-        self.vaccesses = { num_points: {} for num_points in all_num_points }
-
-        for num_points in all_num_points:
-            pp = self.generate_piecewise_partition(num_points)
-            ql = self.generate_quadrature_loops(num_points)
-            if len(all_num_points) > 1:
-                # Wrapping in Scope to avoid thinking about scoping issues
-                parts += [L.Scope([pp, ql])]
-            else:
-                parts += [pp, ql]
-
-        parts += self.generate_finishing_statements()
-
-        return L.StatementList(parts)
-
-    def generate_quadrature_tables(self):
-        "Generate static tables of quadrature points and weights."
-        L = self.backend.language
-
-        parts = []
-
-        # No quadrature tables for custom (given argument) or point (evaluation in single vertex)
-        if self.ir["integral_type"] in ("custom", "vertex"):
-            return parts
-
-        qrs = self.ir["quadrature_rules"]
-        if qrs:
-            parts += [L.Comment("Section for quadrature weights and points")]
-
-        for num_points in sorted(qrs):
-            weights = qrs[num_points][0]
-            points = qrs[num_points][1]
-
-            # Size of quadrature points depends on context, assume this is correct:
-            pdim = len(points[0])
-
-            wname = self.backend.access.weights_array_name(num_points)
-            pname = self.backend.access.points_array_name(num_points)
-
-            parts += [L.ArrayDecl("static const double", wname, num_points, weights)]
-            if pdim > 0:
-                # Flatten array:
-                points = points.reshape(product(points.shape))
-                parts += [L.ArrayDecl("static const double", pname, num_points * pdim, points)]
-
-        return parts
-
-    def generate_element_tables(self):
-        """Generate static tables with precomputed element basis
-        function values in quadrature points."""
-
-        L = self.backend.language
-        parts = []
-        parts += [L.Comment("Section for precomputed element basis function values"),
-                  L.Comment("Table dimensions: num_entities, num_points, num_dofs")]
-        expr_irs = self.ir["uflacs"]["expr_ir"]
-        for num_points in sorted(expr_irs):
-            tables = expr_irs[num_points]["unique_tables"]
-
-            comment = "Definitions of {0} tables for {1} quadrature points".format(len(tables), num_points)
-            parts += [L.Comment(comment)]
-
-            for name in sorted(tables):
-                table = tables[name]
-                if product(table.shape) > 0:
-                    parts += [L.ArrayDecl("static const double", name, table.shape, table)]
-        return parts
-
-    def generate_tensor_reset(self):
-        "Generate statements for resetting the element tensor to zero."
-        L = self.backend.language
-
-        # Could move this to codeutils or backend
-        def memzero(ptrname, size): # FIXME: Make CStatement Memzero
-            tmp = "memset({ptrname}, 0, {size} * sizeof(*{ptrname}));"
-            code = tmp.format(ptrname=ptrname, size=size)
-            return L.VerbatimStatement(code)
-
-        # Compute tensor size
-        A_size = product(self._A_shape)
-        A = self.backend.access.element_tensor_name()
-
-        parts = []
-        parts += [L.Comment("Reset element tensor")]
-        parts += [memzero(A, A_size)]
-        return parts
-
-    def generate_quadrature_loops(self, num_points):
-        "Generate all quadrature loops."
-        L = self.backend.language
-        parts = []
-
-        body = self.generate_quadrature_body(num_points)
-        iq = self.backend.access.quadrature_loop_index()
-
-        if num_points == 1:
-            # Wrapping body in Scope to avoid thinking about scoping issues
-            # TODO: Specialize generated code with iq=0 instead of defining iq here.
-            parts += [L.Comment("Only 1 quadrature point, no loop"),
-                      L.VariableDecl("const int", iq, 0),
-                      L.Scope(body)]
-
-        else:
-            parts += [L.ForRange(iq, 0, num_points, body=body)]
-        return parts
-
-    def generate_quadrature_body(self, num_points):
-        """
-        """
-        parts = []
-        L = self.backend.language
-        parts += self.generate_varying_partition(num_points)
-        if parts:
-            parts = [L.Comment("Quadrature loop body setup (num_points={0})".format(num_points))] + parts
-
-        # Compute single argument partitions outside of the dofblock loops
-        for iarg in range(self.ir["rank"]):
-            for dofrange in []:  # TODO: Move f*arg0 out here
-                parts += self.generate_argument_partition(num_points, iarg, dofrange)
-
-        # Nested argument loops and accumulation into element tensor
-        parts += self.generate_quadrature_body_dofblocks(num_points)
-
-        return parts
-
-    def generate_quadrature_body_dofblocks(self, num_points, outer_dofblock=()):
-        parts = []
-        L = self.backend.language
-
-        # The loop level iarg here equals the argument count (in renumbered >= 0 format)
-        iarg = len(outer_dofblock)
-        if iarg == self.ir["rank"]:
-            # At the innermost argument loop level we accumulate into the element tensor
-            parts += [self.generate_integrand_accumulation(num_points, outer_dofblock)]
-            return parts
-        assert iarg < self.ir["rank"]
-
-        expr_ir = self.ir["uflacs"]["expr_ir"][num_points]
-        # tuple(modified_argument_indices) -> code_index
-        AF = expr_ir["argument_factorization"]
-
-        # modified_argument_index -> (tablename, dofbegin, dofend)
-        MATR = expr_ir["modified_argument_table_ranges"]
-
-        # Find dofranges at this loop level iarg starting with outer_dofblock
-        dofranges = set()
-        for mas in AF:
-            mas_full_dofblock = tuple(MATR[j][1:3] for j in mas)
-            if tuple(mas_full_dofblock[:iarg]) == tuple(outer_dofblock):
-                dofrange = mas_full_dofblock[iarg]
-                # Skip empty dofranges TODO: Possible to remove these and related code earlier?
-                if dofrange[0] != dofrange[1]:
-                    dofranges.add(dofrange)
-        dofranges = sorted(dofranges)
-
-        # Build loops for each dofrange
-        for dofrange in dofranges:
-            dofblock = outer_dofblock + (dofrange,)
-
-            # Generate nested inner loops (only triggers for forms with two or more arguments
-            body = self.generate_quadrature_body_dofblocks(num_points, dofblock)
-
-            # Wrap setup, subloops, and accumulation in a loop for this level
-            idof = self.backend.access.argument_loop_index(iarg)
-            parts += [L.ForRange(idof, dofrange[0], dofrange[1], body=body)]
-        return parts
-
-    def generate_partition(self, name, V, partition, table_ranges, num_points):
-        L = self.backend.language
-
-        definitions = []
-        intermediates = []
-
-        vaccesses = self.vaccesses[num_points]
-
-        partition_indices = [i for i, p in enumerate(partition) if p]
-
-        for i in partition_indices:
-            v = V[i]
-
-            if is_modified_terminal(v):
-                mt = analyse_modified_terminal(v)
-
-                # Backend specific modified terminal translation
-                vaccess = self.backend.access(mt.terminal, mt, table_ranges[i], num_points)
-                vdef = self.backend.definitions(mt.terminal, mt, table_ranges[i], vaccess)
-
-                # Store definitions of terminals in list
-                if vdef is not None:
-                    definitions.append(vdef)
-            else:
-                # Get previously visited operands (TODO: use edges of V instead of ufl_operands?)
-                vops = [vaccesses[op] for op in v.ufl_operands]
-
-                # Mapping UFL operator to target language
-                self._ufl_names.add(v._ufl_handler_name_)
-                vexpr = self.backend.ufl_to_language(v, *vops)
-
-                # TODO: Let optimized ir provide mapping of vertex indices to
-                # variable indices, marking which subexpressions to store in variables
-                # and in what order:
-                #j = variable_id[i]
-
-                # Currently instead creating a new intermediate for each subexpression except boolean conditions
-                if isinstance(v, Condition):
-                    # Inline the conditions x < y, condition values
-                    # 'x' and 'y' may still be stored in intermediates.
-                    # This removes the need to handle boolean intermediate variables.
-                    # With tensor-valued conditionals it may not be optimal but we
-                    # let the C++ compiler take responsibility for optimizing those cases.
-                    j = None
-                else:
-                    j = len(intermediates)
-
-                if j is not None:
-                    # Record assignment of vexpr to intermediate variable
-                    vaccess = L.ArrayAccess(name, j)
-                    intermediates.append(L.Assign(vaccess, vexpr))
-                else:
-                    # Access the inlined expression
-                    vaccess = vexpr
-
-            # Store access node for future reference
-            vaccesses[v] = vaccess
-
-        # Join terminal computation, array of intermediate expressions, and intermediate computations
-        parts = [definitions]
-        if intermediates:
-            parts += [L.ArrayDecl("double", name, len(intermediates))]
-            parts += intermediates
-        return parts
-
-    # TODO: Rather take list of vertices, not markers
-    # XXX FIXME: Fix up this function and use it instead!
-    def alternative_generate_partition(self, name, C, MT, partition, table_ranges, num_points):
-        L = self.backend.language
-
-        definitions = []
-        intermediates = []
-
-        # XXX FIXME: create these!
-        # C = input CRS representation of expression DAG
-        # MT = input list/dict of modified terminals
-
-        self.ast_variables = [None]*len(C) # FIXME: Create outside
-
-        # TODO: Get this as input instead of partition?
-        partition_indices = [i for i, p in enumerate(partition) if p]
-        for i in partition_indices:
-            row = C[i] # XXX FIXME: Get this as input
-            if len(row) == 1:
-                # Modified terminal
-                t, = row
-                mt = MT[t] # XXX FIXME: Get this as input
-
-                if isinstance(mt.terminal, ConstantValue):
-                    # Format literal value for the chosen language
-                    vaccess = modified_literal_to_ast_node[tc](mt) # XXX FIXME: Implement this mapping
-                    vdef = None
-                else:
-                    # Backend specific modified terminal formatting
-                    vaccess = self.backend.access(mt.terminal, mt, table_ranges[i], num_points)
-                    vdef = self.backend.definitions(mt.terminal, mt, table_ranges[i], vaccess)
-
-                # Store definitions of terminals in list
-                if vdef is not None:
-                    definitions.append(vdef)
-
-            else:
-                # Application of operator with typecode tc to operands with indices ops
-                tc = mt[0]
-                ops = mt[1:]
-
-                # Get operand AST nodes
-                opsaccess = [self.ast_variables[k] for k in ops]
-
-                # Generate expression for this operator application
-                vexpr = typecode2astnode[tc](opsaccess) # XXX FIXME: Implement this mapping
-
-                store_this_in_variable = True # TODO: Don't store all subexpressions
-                if store_this_in_variable:
-                    # Record assignment of vexpr to intermediate variable
-                    j = len(intermediates)
-                    vaccess = L.ArrayAccess(name, j)
-                    intermediates.append(L.Assign(vaccess, vexpr))
-                else:
-                    # Access the inlined expression
-                    vaccess = vexpr
-
-            # Store access string, either a variable symbol or an inlined expression
-            self.ast_variables[i] = vaccess
-
-        # Join terminal computation, array of intermediate expressions, and intermediate computations
-        parts = [definitions]
-        if intermediates:
-            parts += [L.ArrayDecl("double", name, len(intermediates))]
-            parts += intermediates
-        return parts
-
-    def generate_piecewise_partition(self, num_points):
-        """Generate statements prior to the quadrature loop.
-
-        This mostly includes computations involving piecewise constant geometry and coefficients.
-        """
-        L = self.backend.language
-        expr_ir = self.ir["uflacs"]["expr_ir"][num_points]
-        arrayname = "sp{0}".format(num_points)
-        parts = self.generate_partition(arrayname,
-                                        expr_ir["V"],
-                                        expr_ir["piecewise"],
-                                        expr_ir["table_ranges"],
-                                        num_points)
-        if parts:
-            parts.insert(0, L.Comment("Section for piecewise constant computations"))
-        return parts
-
-    def generate_varying_partition(self, num_points):
-        L = self.backend.language
-        expr_ir = self.ir["uflacs"]["expr_ir"][num_points]
-        arrayname = "sv{0}".format(num_points)
-        parts = self.generate_partition(arrayname,
-                                        expr_ir["V"],
-                                        expr_ir["varying"],
-                                        expr_ir["table_ranges"],
-                                        num_points)
-        if parts:
-            parts.insert(0, L.Comment("Section for geometrically varying computations"))
-        return parts
-
-    def generate_argument_partition(self, num_points, iarg, dofrange):
-        """Generate code for the partition corresponding to arguments 0..iarg within given dofblock."""
-        parts = []
-        # TODO: What do we want to do here? Define!
-        # Should this be a single loop over i0, i1 separately
-        # outside of the double loop over (i0,i1)?
-        return parts
-
-    def generate_integrand_accumulation(self, num_points, dofblock):
-        parts = []
-        L = self.backend.language
-
-        expr_ir = self.ir["uflacs"]["expr_ir"][num_points]
-        AF = expr_ir["argument_factorization"]
-        V = expr_ir["V"]
-        MATR = expr_ir["modified_argument_table_ranges"]
-        MA = expr_ir["modified_arguments"]
-
-        idofs = [self.backend.access.argument_loop_index(i) for i in range(self.ir["rank"])]
-
-        # Find the blocks to build: (TODO: This is rather awkward,
-        # having to rediscover these relations here)
-        arguments_and_factors = sorted(iteritems(expr_ir["argument_factorization"]),
-                                       key=lambda x: x[0])
-        for args, factor_index in arguments_and_factors:
-            if not all(tuple(dofblock[iarg]) == tuple(MATR[ma][1:3])
-                       for iarg, ma in enumerate(args)):
-                continue
-
-            factors = []
-
-            # Get factor expression
-            v = V[factor_index]
-            if v._ufl_is_literal_ and float(v) == 1.0:
-                # TODO: Nicer way to check for f=1?
-                pass
-            else:
-                fexpr = self.vaccesses[num_points][v]
-                factors.append(fexpr)
-
-            # Get table names
-            argfactors = []
-            for i, ma in enumerate(args):
-                access = self.backend.access(MA[ma].terminal, MA[ma], MATR[ma], num_points)
-                argfactors += [access]
-
-            factors.extend(argfactors)
-
-            # Format index access to A
-            A_access = self.backend.access.element_tensor_entry(idofs, self._A_shape)
-
-            # Emit assignment
-            parts += [L.AssignAdd(A_access, L.Product(factors))]
-
-        return parts
-
-    def generate_finishing_statements(self):
-        """Generate finishing statements.
-
-        This includes assigning to output array if there is no integration.
-        """
-        parts = []
-
-        if not self.ir["quadrature_rules"]:  # Rather check ir["integral_type"]?
-            # TODO: Implement for expression support
-            error("Expression generation not implemented yet.")
-            # TODO: If no integration, assuming we generate an expression, and assign results here
-            # Corresponding code from compiler.py:
-            # assign_to_variables = tfmt.output_variable_names(len(final_variable_names))
-            # parts += list(format_assignments(zip(assign_to_variables, final_variable_names)))
-
-        return parts
diff --git a/uflacs/language/typenodes.py b/uflacs/language/typenodes.py
deleted file mode 100644
index d7d5fe1..0000000
--- a/uflacs/language/typenodes.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""FIXME: Translate these classes to the CNode hierarchy."""
-
-
-class TemplateArgumentList(ASTNode):
-    singlelineseparators = ('<', ', ', '>')
-    multilineseparators = ('<\n', ',\n', '\n>')
-
-    def __init__(self, args, multiline=True):
-        self.args = args
-        self.multiline = multiline
-
-    def format(self, level):
-        if self.multiline:
-            container = Indented
-            start, sep, end = self.multilineseparators
-        else:
-            container = tuple
-            start, sep, end = self.singlelineseparators
-            # Add space to avoid >> template issue
-            last = self.args[-1]
-            if isinstance(last, TemplateArgumentList) or (
-                    isinstance(last, Type) and last.template_arguments):
-                end = ' ' + end
-        code = [sep.join(format_code(arg) for arg in self.args)]
-        code = (start, container(code), end)
-        return format_code(code, level)
-
-
-class Type(ASTNode):
-    def __init__(self, name, template_arguments=None, multiline=False):
-        self.name = name
-        self.template_arguments = template_arguments
-        self.multiline = multiline
-
-    def format(self, level):
-        code = self.name
-        if self.template_arguments:
-            code = code, TemplateArgumentList(self.template_arguments, self.multiline)
-        return format_code(code, level)
-
-
-class TypeDef(ASTNode):
-    def __init__(self, type_, typedef):
-        self.type_ = type_
-        self.typedef = typedef
-
-    def format(self, level):
-        code = ('typedef ', self.type_, " %s;" % self.typedef)
-        return format_code(code, level)
-
-
-# TODO: Add variable access type with type checking to replace explicit str instances all over the place.
-
-
-class ArrayAccess(ASTOperator):
-    def __init__(self, arraydecl, indices):
-        if isinstance(arraydecl, ArrayDecl):
-            self.arrayname = arraydecl.name
-        else:
-            self.arrayname = arraydecl
-
-        if isinstance(indices, (list, tuple)):
-            self.indices = indices
-        else:
-            self.indices = (indices,)
-
-        # Early error checking of array dimensions
-        if any(isinstance(i, int) and i < 0 for i in self.indices):
-            raise ValueError("Index value < 0.")
-
-        # Additional checks possible if we get an ArrayDecl instead of just a name
-        if isinstance(arraydecl, ArrayDecl):
-            if len(self.indices) != len(arraydecl.sizes):
-                raise ValueError("Invalid number of indices.")
-            if any((isinstance(i, int) and isinstance(d, int) and i >= d)
-                   for i, d in zip(self.indices, arraydecl.sizes)):
-                raise ValueError("Index value >= array dimension.")
-
-    def format(self, level):
-        brackets = tuple(("[", n, "]") for n in self.indices)
-        code = (self.arrayname, brackets)
-        return format_code(code, level)
-
-
-class Class(ASTStatement):
-    def __init__(self, name, superclass=None, public_body=None,
-                 protected_body=None, private_body=None,
-                 template_arguments=None, template_multiline=False):
-        self.name = name
-        self.superclass = superclass
-        self.public_body = public_body
-        self.protected_body = protected_body
-        self.private_body = private_body
-        self.template_arguments = template_arguments
-        self.template_multiline = template_multiline
-
-    def format(self, level):
-        code = []
-        if self.template_arguments:
-            code += [('template', TemplateArgumentList(self.template_arguments,
-                                                       self.template_multiline))]
-        if self.superclass:
-            code += ['class %s: public %s' % (self.name, self.superclass)]
-        else:
-            code += ['class %s' % self.name]
-        code += ['{']
-        if self.public_body:
-            code += ['public:', Indented(self.public_body)]
-        if self.protected_body:
-            code += ['protected:', Indented(self.protected_body)]
-        if self.private_body:
-            code += ['private:', Indented(self.private_body)]
-        code += ['};']
-        return format_code(code, level)
diff --git a/uflacs/params.py b/uflacs/params.py
deleted file mode 100644
index 4b5dcdd..0000000
--- a/uflacs/params.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Collection of exposed parameters available to tune form compiler algorithms."""
-
-def default_parameters():
-    return {
-        "enable_profiling": False,
-        "enable_factorization": False,  # True, # Fails for hyperelasticity demo in dolfin, needs debugging
-        "max_registers": 1024,  # 8 B * 1024 = 8 KB # TODO: Tune this for something very complex
-        "score_threshold": 3,  # TODO: Scoring is work in progress and this will change meaning later
-    }
diff --git a/uflacs/representation/compute_expr_ir.py b/uflacs/representation/compute_expr_ir.py
deleted file mode 100644
index 140126c..0000000
--- a/uflacs/representation/compute_expr_ir.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2015 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Algorithms for the representation phase of the form compilation."""
-
-from ufl import product
-from ufl.checks import is_cellwise_constant
-from uflacs.analysis.modified_terminals import is_modified_terminal, analyse_modified_terminal
-
-from uflacs.analysis.graph import build_graph
-from uflacs.analysis.graph_vertices import build_scalar_graph_vertices
-from uflacs.analysis.graph_rebuild import rebuild_with_scalar_subexpressions
-from uflacs.analysis.graph_dependencies import (compute_dependencies,
-                                                mark_active, mark_image)
-from uflacs.analysis.graph_ssa import (compute_dependency_count,
-                                       invert_dependencies,
-                                       default_cache_score_policy,
-                                       compute_cache_scores,
-                                       allocate_registers)
-
-from uflacs.analysis.factorization import compute_argument_factorization
-
-
-def build_scalar_graph(expressions):
-    """Build list representation of expression graph covering the given expressions.
-
-    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
-    """
-
-    # Build the initial coarse computational graph of the expression
-    G = build_graph(expressions)
-
-    assert len(expressions) == 1, "Multiple expressions in graph building needs more work from this point on."
-
-    # Build more fine grained computational graph of scalar subexpressions
-    # TODO: Make it so that
-    #   expressions[k] <-> NV[nvs[k][:]],
-    #   len(nvs[k]) == value_size(expressions[k])
-    scalar_expressions = rebuild_with_scalar_subexpressions(G)
-
-    assert len(scalar_expressions) == sum(product(expr.ufl_shape) for expr in expressions)
-
-    # Build new list representation of graph where all vertices of V represent single scalar operations
-    e2i, V, target_variables = build_scalar_graph_vertices(scalar_expressions)
-
-    return e2i, V, target_variables
-
-
-def compute_expr_ir(expressions, parameters):
-    """FIXME: Refactoring in progress!
-
-    TODO: assuming more symbolic preprocessing
-    - Make caller apply pullback mappings for vector element functions
-
-    TODO:
-    Work for later:
-    - Apply some suitable renumbering of vertices and corresponding arrays prior to returning
-    - Allocate separate registers for each partition
-      (but e.g. argument[iq][i0] may need to be accessible in other loops)
-    - Improve register allocation algorithm
-
-    - Take a list of expressions as input to compile several expressions in one joined graph
-      (e.g. to compile a,L,M together for nonlinear problems)
-    """
-    # Wrap in list if we only get one expression
-    if not isinstance(expressions, list):
-        expressions = [expressions]
-
-    # TODO: Can we merge these three calls to something more efficient overall?
-    # Build scalar list-based graph representation
-    e2i, V, target_variables = build_scalar_graph(expressions)
-
-    # Compute sparse dependency matrix
-    dependencies = compute_dependencies(e2i, V)
-
-    # Compute factorization of arguments
-    argument_factorization, modified_arguments, V, target_variables, dependencies = \
-        compute_argument_factorization(V, target_variables, dependencies)
-
-    # Store modified arguments in analysed form
-    for i in range(len(modified_arguments)):
-        modified_arguments[i] = analyse_modified_terminal(modified_arguments[i])
-
-    # --- Various dependency analysis ---
-
-    # Count the number of dependencies every subexpr has
-    depcount = compute_dependency_count(dependencies)
-
-    # Build the 'inverse' of the sparse dependency matrix
-    inverse_dependencies = invert_dependencies(dependencies, depcount)
-
-    # Mark subexpressions of V that are actually needed for final result
-    active, num_active = mark_active(dependencies, target_variables)
-
-    # Build set of modified_terminal indices into factorized_vertices
-    modified_terminal_indices = [i for i, v in enumerate(V)
-                                 if is_modified_terminal(v)]
-
-    # Build piecewise/varying markers for factorized_vertices
-    spatially_dependent_terminal_indices = [i for i in modified_terminal_indices
-                                            if not is_cellwise_constant(V[i])]
-    varying, num_spatial = mark_image(inverse_dependencies,
-                                      spatially_dependent_terminal_indices)
-    piecewise = 1 - varying
-    # Skip non-active things
-    varying *= active
-    piecewise *= active
-
-    # TODO: Skip literals in both varying and piecewise
-    # nonliteral = ...
-    # varying *= nonliteral
-    # piecewise *= nonliteral
-
-    # TODO: Inspection of varying shows that factorization is
-    # needed for effective loop invariant code motion w.r.t. quadrature loop as well.
-    # Postphoning that until everything is working fine again.
-    # Core ingredients for such factorization would be:
-    # - Flatten products of products somehow
-    # - Sorting flattened product factors by loop dependency then by canonical ordering
-    # Or to keep binary products:
-    # - Rebalancing product trees ((a*c)*(b*d) -> (a*b)*(c*d)) to make piecewise quantities 'float' to the top of the list
-
-    # rank = max(len(k) for k in argument_factorization.keys())
-    # for i,a in enumerate(modified_arguments):
-    #    iarg = a.number()
-    # ipart = a.part()
-
-    # Build IR for the given expressions
-    expr_ir = {}
-
-    # Core expression graph:
-    expr_ir["V"] = V                               # (array) V-index -> UFL subexpression
-    expr_ir["target_variables"] = target_variables  # (array) Flattened input expression component index -> V-index
-
-    # Result of factorization:
-    expr_ir["modified_arguments"] = modified_arguments         # (array) MA-index -> UFL expression of modified arguments
-    expr_ir["argument_factorization"] = argument_factorization  # (dict) tuple(MA-indices) -> V-index of monomial factor
-
-    # TODO: More structured MA organization?
-    #modified_arguments[rank][block][entry] -> UFL expression of modified argument
-    #dofranges[rank][block] -> (begin, end)
-    # or
-    #modified_arguments[rank][entry] -> UFL expression of modified argument
-    #dofrange[rank][entry] -> (begin, end)
-    #argument_factorization: (dict) tuple(MA-indices (only relevant ones!)) -> V-index of monomial factor
-    # becomes
-    #argument_factorization: (dict) tuple(entry for each(!) rank) -> V-index of monomial factor ## doesn't cover intermediate f*u in f*u*v!
-
-    # Dependency structure of graph:
-    expr_ir["modified_terminal_indices"] = modified_terminal_indices  # (array) list of V-indices to modified terminals
-    #expr_ir["dependencies"] = dependencies                           # (CRS) V-index -> direct dependency V-index list
-    #expr_ir["inverse_dependencies"] = inverse_dependencies           # (CRS) V-index -> direct dependee V-index list
-
-    # Metadata about each vertex
-    #expr_ir["active"] = active       # (array) V-index -> bool
-    expr_ir["piecewise"] = piecewise  # (array) V-index -> bool
-    expr_ir["varying"] = varying     # (array) V-index -> bool
-
-    return expr_ir
-
-"""
-def old_code_useful_for_optimization():
-
-    # Use heuristics to mark the usefulness of storing every subexpr in a variable
-    scores = compute_cache_scores(V,
-                                  active,
-                                  dependencies,
-                                  inverse_dependencies,
-                                  partitions,  # TODO: Rewrite in terms of something else, this doesn't exist anymore
-                                  cache_score_policy=default_cache_score_policy)
-
-    # Allocate variables to store subexpressions in
-    allocations = allocate_registers(active, partitions, target_variables,
-                                     scores, int(parameters["max_registers"]), int(parameters["score_threshold"]))
-    target_registers = [allocations[r] for r in target_variables]
-    num_registers = sum(1 if x >= 0 else 0 for x in allocations)
-    # TODO: If we renumber we can allocate registers separately for each partition, which is probably a good idea.
-
-    expr_oir = {}
-    expr_oir["num_registers"] = num_registers
-    expr_oir["partitions"] = partitions
-    expr_oir["allocations"] = allocations
-    expr_oir["target_registers"] = target_registers
-    return expr_oir
-"""
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/fenics/ffc.git



More information about the debian-science-commits mailing list