[python-bumps] 01/20: New upstream version 0.7.6

Drew Parsons dparsons at moszumanska.debian.org
Sun Oct 29 06:29:23 UTC 2017


This is an automated email from the git hooks/post-receive script.

dparsons pushed a commit to tag debian/0.7.6-1
in repository python-bumps.

commit 176f534464e842d7677171cd2e838986e841b90c
Author: Drew Parsons <dparsons at debian.org>
Date:   Thu Oct 26 20:22:02 2017 +0800

    New upstream version 0.7.6
---
 .gitignore                                         |   36 +
 LICENSE.txt                                        |   64 +
 MANIFEST.in                                        |   18 +
 README.rst                                         |  115 +
 bin/bumps                                          |   20 +
 bin/bumps_gui                                      |   20 +
 bin/bumps_serve                                    |    9 +
 bin/bumps_workerd                                  |   26 +
 bin/launch.bat                                     |    5 +
 bumps.iss                                          |  148 +
 bumps/__init__.py                                  |   45 +
 bumps/bounds.py                                    |  788 +++++
 bumps/bspline.py                                   |  402 +++
 bumps/cheby.py                                     |  153 +
 bumps/cli.py                                       |  574 ++++
 bumps/curve.py                                     |  296 ++
 bumps/data.py                                      |  226 ++
 bumps/dream/__init__.py                            |   25 +
 bumps/dream/acr.py                                 |   91 +
 bumps/dream/bounds.py                              |  207 ++
 bumps/dream/core.py                                |  432 +++
 bumps/dream/corrplot.py                            |  175 +
 bumps/dream/crossover.py                           |  230 ++
 bumps/dream/diffev.py                              |  164 +
 bumps/dream/entropy.py                             |  351 ++
 bumps/dream/exppow.py                              |   55 +
 bumps/dream/formatnum.py                           |  451 +++
 bumps/dream/gelman.py                              |   81 +
 bumps/dream/geweke.py                              |   63 +
 bumps/dream/initpop.py                             |   98 +
 bumps/dream/ksmirnov.py                            |   35 +
 bumps/dream/mahal.py                               |   52 +
 bumps/dream/matlab.py                              |  270 ++
 bumps/dream/metropolis.py                          |   92 +
 bumps/dream/model.py                               |  244 ++
 bumps/dream/outliers.py                            |  140 +
 bumps/dream/state.py                               | 1029 ++++++
 bumps/dream/stats.py                               |  225 ++
 bumps/dream/tile.py                                |   55 +
 bumps/dream/util.py                                |   90 +
 bumps/dream/views.py                               |  375 +++
 bumps/dream/walk.py                                |  118 +
 bumps/errplot.py                                   |  133 +
 bumps/fitproblem.py                                |  641 ++++
 bumps/fitservice.py                                |  107 +
 bumps/fitters.py                                   |  979 ++++++
 bumps/formatnum.py                                 |  450 +++
 bumps/gui/__init__.py                              |    0
 bumps/gui/about.py                                 |  195 ++
 bumps/gui/app_frame.py                             |  239 ++
 bumps/gui/app_panel.py                             |  602 ++++
 bumps/gui/console.py                               |  208 ++
 bumps/gui/convergence_view.py                      |   67 +
 bumps/gui/data_view.py                             |  232 ++
 bumps/gui/fit_dialog.py                            |  312 ++
 bumps/gui/fit_thread.py                            |  191 ++
 bumps/gui/fit_view.py                              |   56 +
 bumps/gui/gui_app.py                               |  307 ++
 bumps/gui/input_list.py                            | 1093 +++++++
 bumps/gui/log_view.py                              |   51 +
 bumps/gui/parameter_view.py                        |  283 ++
 bumps/gui/plot_view.py                             |  220 ++
 bumps/gui/resfiles.py                              |  157 +
 bumps/gui/resources/__init__.py                    |    9 +
 bumps/gui/resources/bumps.ico                      |  Bin 0 -> 353118 bytes
 bumps/gui/resources/bumps_splash.jpg               |  Bin 0 -> 73466 bytes
 bumps/gui/resources/done.wav                       |  Bin 0 -> 2196 bytes
 bumps/gui/resources/import_script.png              |  Bin 0 -> 1166 bytes
 bumps/gui/resources/reload.png                     |  Bin 0 -> 6235 bytes
 bumps/gui/resources/start_fit.png                  |  Bin 0 -> 1675 bytes
 bumps/gui/resources/stop_fit.png                   |  Bin 0 -> 1965 bytes
 bumps/gui/signal.py                                |   61 +
 bumps/gui/summary_view.py                          |  212 ++
 bumps/gui/uncertainty_view.py                      |  101 +
 bumps/gui/util.py                                  |  105 +
 bumps/gui/utilities.py                             |  468 +++
 bumps/history.py                                   |  283 ++
 bumps/initpop.py                                   |  310 ++
 bumps/lsqerror.py                                  |  326 ++
 bumps/mapper.py                                    |  239 ++
 bumps/monitor.py                                   |  123 +
 bumps/mono.py                                      |  115 +
 bumps/mystic/__init__.py                           |    0
 bumps/mystic/condition.py                          |  249 ++
 bumps/mystic/examples/__init__.py                  |   31 +
 bumps/mystic/examples/circle.py                    |   86 +
 bumps/mystic/examples/corana.py                    |   63 +
 bumps/mystic/examples/decay.py                     |   49 +
 bumps/mystic/examples/model.py                     |   77 +
 bumps/mystic/examples/simple.py                    |    2 +
 bumps/mystic/optimizer/__init__.py                 |    0
 bumps/mystic/optimizer/de.py                       |  263 ++
 bumps/mystic/optimizer/diffev_compat.py            |  144 +
 bumps/mystic/solver.py                             |  331 ++
 bumps/mystic/stop.py                               |  874 +++++
 bumps/mystic/util.py                               |   81 +
 bumps/names.py                                     |   46 +
 bumps/numdifftools/LICENSE.txt                     |   27 +
 bumps/numdifftools/README.txt                      |    3 +
 bumps/numdifftools/__init__.py                     |    5 +
 bumps/numdifftools/core.py                         | 1365 ++++++++
 bumps/numdifftools/extrapolation.py                |  346 ++
 bumps/numdifftools/info.py                         |   93 +
 bumps/numdifftools/multicomplex.py                 |  370 +++
 bumps/openmp_ext.py                                |   69 +
 bumps/options.py                                   |  432 +++
 bumps/parameter.py                                 |  907 ++++++
 bumps/partemp.py                                   |  343 ++
 bumps/pdfwrapper.py                                |  294 ++
 bumps/plotutil.py                                  |  184 ++
 bumps/plugin.py                                    |  112 +
 bumps/pmath.py                                     |   93 +
 bumps/pymcfit.py                                   |  153 +
 bumps/pytwalk.py                                   |  710 ++++
 bumps/quasinewton.py                               |  844 +++++
 bumps/random_lines.py                              |  298 ++
 bumps/simplex.py                                   |  352 ++
 bumps/util.py                                      |  290 ++
 bumps/wsolve.py                                    |  452 +++
 check_examples.py                                  |   54 +
 check_fitters.py                                   |  106 +
 doc/Makefile                                       |  139 +
 doc/_extensions/README.txt                         |    4 +
 doc/_extensions/dollarmath.py                      |   47 +
 doc/_extensions/png.py                             | 3445 ++++++++++++++++++++
 doc/_extensions/slink.py                           |   62 +
 doc/_extensions/wx_directive.py                    |  424 +++
 doc/_static/haiku-site.css                         |   20 +
 doc/_static/logo.png                               |  Bin 0 -> 6268 bytes
 doc/conf.py                                        |  285 ++
 doc/examples/constraints/model.py                  |   18 +
 doc/examples/curvefit/curve.py                     |   88 +
 doc/examples/curvefit/poisson.py                   |  198 ++
 doc/examples/curvefit/readme.rst                   |   22 +
 doc/examples/curvefit/sim.png                      |  Bin 0 -> 78845 bytes
 doc/examples/curvefit/sim.py                       |   93 +
 doc/examples/entropy/check_entropy.py              |   71 +
 doc/examples/entropy/peak.py                       |   50 +
 doc/examples/index.rst                             |   17 +
 doc/examples/peaks/XY_mesh2.txt                    |   23 +
 doc/examples/peaks/model.py                        |  113 +
 doc/examples/peaks/peaks.py                        |  111 +
 doc/examples/peaks/plot.py                         |   33 +
 doc/examples/peaks/readme.rst                      |    8 +
 doc/examples/pymc/disaster_model.py                |    3 +
 doc/examples/pymc/model.py                         |   11 +
 doc/examples/test_functions/anticor.py             |   40 +
 doc/examples/test_functions/bounded.py             |   37 +
 doc/examples/test_functions/cross.py               |   46 +
 doc/examples/test_functions/mixture.py             |   96 +
 doc/examples/test_functions/model.py               |  255 ++
 doc/examples/test_functions/readme.rst             |   15 +
 doc/genmods.py                                     |  225 ++
 doc/gentut.py                                      |  101 +
 doc/getting_started/contributing.rst               |   53 +
 doc/getting_started/index.rst                      |   33 +
 doc/getting_started/install.rst                    |  254 ++
 doc/getting_started/license.rst                    |  122 +
 doc/getting_started/server.rst                     |  177 +
 doc/guide/corr.png                                 |  Bin 0 -> 772618 bytes
 doc/guide/data.rst                                 |   10 +
 doc/guide/dream-complete.png                       |  Bin 0 -> 333301 bytes
 doc/guide/dream-incomplete.png                     |  Bin 0 -> 142566 bytes
 doc/guide/entropy-continuous.png                   |  Bin 0 -> 36944 bytes
 doc/guide/entropy-discrete.png                     |  Bin 0 -> 16694 bytes
 doc/guide/entropy.rst                              |  100 +
 doc/guide/error.png                                |  Bin 0 -> 185525 bytes
 doc/guide/experiment.rst                           |  256 ++
 doc/guide/fit-amoeba.png                           |  Bin 0 -> 27669 bytes
 doc/guide/fit-de.png                               |  Bin 0 -> 28469 bytes
 doc/guide/fit-dream.png                            |  Bin 0 -> 28281 bytes
 doc/guide/fit-lm.png                               |  Bin 0 -> 25423 bytes
 doc/guide/fit-newton.png                           |  Bin 0 -> 26656 bytes
 doc/guide/fitting.rst                              |  491 +++
 doc/guide/index.rst                                |   97 +
 doc/guide/intro.rst                                |   70 +
 doc/guide/optimizer.rst                            |  767 +++++
 doc/guide/options.rst                              |  535 +++
 doc/guide/parameter.rst                            |  180 +
 doc/guide/var.png                                  |  Bin 0 -> 272133 bytes
 doc/index.rst                                      |   19 +
 doc/make.bat                                       |  155 +
 doc/pylit.py                                       | 1869 +++++++++++
 doc/rst_prolog                                     |   15 +
 doc/sitedoc.py                                     |   64 +
 extra/amqp_map/USECASE.txt                         |   60 +
 extra/amqp_map/__init__.py                         |    0
 extra/amqp_map/config.py                           |   28 +
 extra/amqp_map/core.py                             |  207 ++
 extra/amqp_map/example/echo.py                     |   10 +
 extra/amqp_map/example/rpc_echo.py                 |   16 +
 extra/amqp_map/example/square.py                   |   26 +
 extra/amqp_map/example/square_worker.py            |   17 +
 extra/amqp_map/jsonrpc.txt                         |  197 ++
 extra/amqp_map/pmap.py                             |  217 ++
 extra/amqp_map/rpc.py                              |  163 +
 extra/amqp_map/threaded.py                         |  165 +
 extra/amqp_map/url.py                              |  139 +
 extra/appbin/bumps                                 |   19 +
 extra/appbin/ipython                               |   22 +
 extra/bumps.icns                                   |  Bin 0 -> 103949 bytes
 extra/dmgpack.sh                                   |   65 +
 extra/dream_examples/anticor.py                    |   53 +
 extra/dream_examples/banana.m                      |   51 +
 extra/dream_examples/banana.py                     |   28 +
 extra/dream_examples/mixture.py                    |   35 +
 extra/dream_examples/mixture2.py                   |   67 +
 extra/dream_examples/noisybanana.py                |   26 +
 extra/dream_examples/quadfit.py                    |   23 +
 extra/fit_functions/__init__.py                    |   55 +
 extra/fit_functions/br8.py                         |   40 +
 extra/fit_functions/circle.py                      |   66 +
 extra/fit_functions/corana.py                      |   46 +
 extra/fit_functions/dejong.py                      |   83 +
 extra/fit_functions/fosc3d.py                      |   22 +
 extra/fit_functions/griewangk.py                   |   34 +
 extra/fit_functions/lorentzian.py                  |   94 +
 extra/fit_functions/mogi.py                        |   32 +
 extra/fit_functions/poly.py                        |   70 +
 extra/fit_functions/wavy.py                        |   32 +
 extra/fit_functions/zimmermann.py                  |   31 +
 extra/installer-hooks/hook-bumps.py                |   11 +
 .../installer-hooks/hook-scipy.special._ufuncs.py  |    1 +
 extra/jobqueue/README                              |    5 +
 extra/jobqueue/__init__.py                         |    0
 extra/jobqueue/client.py                           |  147 +
 extra/jobqueue/daemon.py                           |  310 ++
 extra/jobqueue/db.py                               |  112 +
 extra/jobqueue/dispatcher.py                       |  189 ++
 extra/jobqueue/jobid.py                            |   25 +
 extra/jobqueue/mimedict.py                         |   57 +
 extra/jobqueue/notify.py                           |   64 +
 extra/jobqueue/resourcelimits.py                   |   43 +
 extra/jobqueue/rest.py                             |  176 +
 extra/jobqueue/runjob.py                           |   87 +
 extra/jobqueue/server.py                           |  322 ++
 extra/jobqueue/services.py                         |   29 +
 extra/jobqueue/simplequeue.py                      |   83 +
 extra/jobqueue/slurm.py                            |  184 ++
 extra/jobqueue/store.py                            |   53 +
 extra/jobqueue/templates/index.html                |    8 +
 extra/jobqueue/test/__init__.py                    |    0
 extra/jobqueue/test/test_crud.py                   |   49 +
 extra/jobqueue/test/test_db.py                     |  102 +
 extra/jobqueue/worker.py                           |  129 +
 extra/jobqueue/www/hello.html                      |    1 +
 extra/jobqueue/www/jobqueue.wsgi                   |   50 +
 extra/jobqueue/www/test.wsgi                       |    8 +
 extra/sasview/FitPage2.fitv                        |  393 +++
 extra/sasview/cyl_400_40.txt                       |   56 +
 extra/sasview/model.py                             |   51 +
 extra/sasview/modelcyl.py                          |   36 +
 extra/sasview/sasbumps.py                          |  247 ++
 extra/sasview/smodel.py                            |   27 +
 installer.spec                                     |   31 +
 master_builder.py                                  |  543 +++
 rtd-requirements                                   |    3 +
 run.py                                             |   65 +
 setup.py                                           |   57 +
 setup_py2app.py                                    |  113 +
 setup_py2exe.py                                    |  315 ++
 test.py                                            |   89 +
 unix_build.sh                                      |    9 +
 263 files changed, 45052 insertions(+)

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3d37513
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,36 @@
+# Eclipse/pycharm settings files
+.idea
+.project
+.pydevproject
+
+# editor backup files
+*.swp
+*~
+*.bak
+
+# build/test
+.settings
+.coverage
+/build/
+/dist/
+/bumps.egg-info/
+bumps.iss-include
+iss-version
+
+# doc targets
+/doc/_build/
+/doc/api/
+/doc/tutorial/
+/doc/dream/
+
+# python droppings from running in place
+__pycache__/
+*.pyc
+*.pyo
+*.so
+*.pyd
+*.dll
+*.dyld
+
+# run in place sets .mplconfig
+.mplconfig
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100755
index 0000000..4e407cd
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,64 @@
+Bumps is in the public domain.
+
+Code in individual files has copyright and license set by individual authors.
+
+Bumps GUI
+---------
+
+Copyright (C) 2006-2011, University of Maryland
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+DREAM
+-----
+
+Copyright (c) 2008, Los Alamos National Security, LLC
+All rights reserved.
+
+Copyright 2008. Los Alamos National Security, LLC. This software was produced under U.S.
+Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL), which is
+operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S.
+Government has rights to use, reproduce, and distribute this software.
+
+NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES A NY WARRANTY, EXPRESS OR
+IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.  If software is modified to
+produce derivative works, such modified software should be clearly marked, so as not to
+confuse it with the version available from LANL.
+
+Additionally, redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright notice, this list of
+  conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this list of
+  conditions and the following disclaimer in the documentation and/or other materials
+  provided with the distribution.
+* Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL
+  the U.S. Government, nor the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS
+ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100755
index 0000000..2d1764d
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,18 @@
+# The purpose of this file is to modify the list of files to include/exclude in
+# the source archive created by the 'python setup.py sdist' command.  Executing
+# setup.py in the top level directory creates a default list (or manifest) and
+# the directives in this file add or subtract files from the resulting MANIFEST
+# file that drives the creation of the archive.
+#
+# Note: apparently due to a bug in setup, you cannot include a file whose name
+# starts with 'build' as in 'build_everything.py'.
+
+# Add files to the archive in addition to those that are installed by running
+# 'python setup.py install'.  Typically these extra files are build related.
+include MANIFEST.in  # this file
+include master_builder.py
+include bumps.iss
+include setup_py2exe.py
+
+# Delete files
+#prune this that
diff --git a/README.rst b/README.rst
new file mode 100755
index 0000000..4893f9c
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,115 @@
+==============================================
+Bumps: data fitting and uncertainty estimation
+==============================================
+
+Bumps provides data fitting and Bayesian uncertainty modeling for inverse
+problems.  It has a variety of optimization algorithms available for locating
+the most like value for function parameters given data, and for exploring
+the uncertainty around the minimum.
+
+Installation is with the usual python installation command::
+
+    pip install bumps
+
+Once the system is installed, you can verify that it is working with::
+
+    bumps doc/examples/peaks/model.py --chisq
+
+Documentation is available at `readthedocs <http://bumps.readthedocs.org>`_
+
+.. image:: https://zenodo.org/badge/18489/bumps/bumps.svg
+    :target: https://zenodo.org/badge/latestdoi/18489/bumps/bumps
+
+Release notes
+=============
+
+v0.7.6 2016-08-05
+-----------------
+
+* add --view option to command line which gets propagated to the model plotter
+* add support for probability p(x) for vector x using VectorPDF(f,x0)
+* rename DirectPDF to DirectProblem, and allow it to run in GUI
+* data reader supports multi-part files, with parts separated by blank lines
+* add gaussian mixture and laplace examples
+* bug fix: plots were failing if model name contains a '.'
+* miscellaneous code cleanup
+
+v0.7.5.10 2016-05-04
+--------------------
+
+* gui: undo code cleaning operation which broke the user interface
+
+v0.7.5.9 2016-04-22
+-------------------
+
+* population initializers allow indefinite bounds
+* use single precision criterion for levenberg-marquardt and bfgs
+* implement simple, faster, less accurate Hessian & Jacobian
+* compute uncertainty estimate from Jacobian if problem is sum of squares
+* gui: fit selection window acts like a dialog
+
+v0.7.5.8 2016-04-18
+-------------------
+
+* accept model.par output from a different model
+* show residuals with curve fit output
+* only show correlations for selected variables
+* show tics on correlations if small number
+* improve handling of uncertainty estimate from curvature
+* tweak dream algorithm -- maybe improve the acceptance ratio?
+* allow model to set visible variables in output
+* improve handling of arbitrary probability density functions
+* simplify loading of pymc models
+* update to numdifftools 0.9.14
+* bug fix: improved handling of ill-conditioned fits
+* bug fix: avoid copying mcmc chain during run
+* bug fix: more robust handling of --time limit
+* bug fix: support newer versions of matplotlib and numpy
+* miscellaneous tweaks and fixes
+
+v0.7.5.7 2015-09-21
+-------------------
+
+* add entropy calculator (still unreliable for high dimensional problems)
+* adjust scaling of likelihood (the green line) to match histogram area
+* use --samples to specify the number of samples from the distribution
+* mark this and future releases with a DOI at zenodo.org
+
+v0.7.5.6 2015-06-03
+-------------------
+
+* tweak uncertainty calculations so they don't fail on bad models
+
+v0.7.5.5 2015-05-07
+-------------------
+
+* documentation updates
+
+v0.7.5.4 2014-12-05
+-------------------
+
+* use relative rather than absolute noise in dream, which lets us fit target
+  values in the order of 1e-6 or less.
+* fix covariance population initializer
+
+v0.7.5.3 2014-11-21
+-------------------
+
+* use --time to stop after a given number of hours
+* Levenberg-Marquardt: fix "must be 1-d or 2-d" bug
+* improve curvefit interface
+
+v0.7.5.2 2014-09-26
+-------------------
+
+* pull numdifftools dependency into the repository
+
+v0.7.5.1 2014-09-25
+-------------------
+
+* improve the load_model interface
+
+v0.7.5 2014-09-10
+-----------------
+
+* Pure python release
diff --git a/bin/bumps b/bin/bumps
new file mode 100755
index 0000000..d9e84b1
--- /dev/null
+++ b/bin/bumps
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+# This program is in the public domain.
+# Authors: Paul Kienzle and James Krycka
+
+"""
+This script starts the command line interface of the Bumps
+Modeler application to process the command just entered.
+"""
+
+# ========================== Start program ==================================
+# Process the command line that has been entered.
+if __name__ == "__main__":
+    # This is necessary when running the application from a frozen image and
+    # using the --parallel option.  Note that freeze_support() has no effect
+    # when running from a python script (i.e., in a non-frozen environment).
+    import multiprocessing
+    multiprocessing.freeze_support()
+
+    import bumps.cli
+    bumps.cli.main()
diff --git a/bin/bumps_gui b/bin/bumps_gui
new file mode 100755
index 0000000..c9818ca
--- /dev/null
+++ b/bin/bumps_gui
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+# This program is in the public domain.
+# Authors: Paul Kienzle and James Krycka
+
+"""
+This script starts the command line interface of the Bumps
+Modeler application to process the command just entered.
+"""
+
+# ========================== Start program ==================================
+# Process the command line that has been entered.
+if __name__ == "__main__":
+    # This is necessary when running the application from a frozen image and
+    # using the --parallel option.  Note that freeze_support() has no effect
+    # when running from a python script (i.e., in a non-frozen environment).
+    import multiprocessing
+    multiprocessing.freeze_support()
+
+    import bumps.gui.gui_app
+    bumps.gui.gui_app.main()
diff --git a/bin/bumps_serve b/bin/bumps_serve
new file mode 100755
index 0000000..9fca6d7
--- /dev/null
+++ b/bin/bumps_serve
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+"""
+Debug server for the bumps fit infrastructure.
+"""
+
+from jobqueue.serve import serve
+serve()
+
+
diff --git a/bin/bumps_workerd b/bin/bumps_workerd
new file mode 100755
index 0000000..407d13f
--- /dev/null
+++ b/bin/bumps_workerd
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import logging
+from jobqueue import daemon, worker
+
+LOGDIR='/var/log/bumps'
+LOGFILE=os.path.join(LOGDIR,'.bumps-worker.log')
+PIDFILE=os.path.join(LOGDIR,'.bumps-worker.pid')
+ERRFILE=os.path.join(LOGDIR,'.bumps-worker.out')
+
+def startup():
+    if not os.path.exists(LOGDIR): os.makedirs(LOGDIR)
+    if len(sys.argv) > 1 and sys.argv[1] == 'debug':
+        loglevel, logfile = logging.DEBUG, None
+    else:
+        loglevel, logfile = logging.ERROR, LOGFILE
+        daemon.startstop(pidfile=PIDFILE, stdout=ERRFILE)
+    logging.basicConfig(level=loglevel,
+                        format='%(asctime)s %(levelname)-8s %(message)s',
+                        datefmt = '%y-%m-%d %H:%M:%S',
+                        filename = logfile, filemode='a')
+    worker.main()
+
+if __name__ == "__main__": startup()
diff --git a/bin/launch.bat b/bin/launch.bat
new file mode 100755
index 0000000..36e62f8
--- /dev/null
+++ b/bin/launch.bat
@@ -0,0 +1,5 @@
+ at echo off
+rem Add location of executing batch file to path for duration of command window.
+SET BATLOC=%~dp0
+PATH %BATLOC%;%PATH%
+cmd /k bumps --help
diff --git a/bumps.iss b/bumps.iss
new file mode 100755
index 0000000..511c6f5
--- /dev/null
+++ b/bumps.iss
@@ -0,0 +1,148 @@
+; -- bumps.iss -- an Inno Setup Script for Bumps
+; This script is used by the Inno Setup Compiler to build a Windows XP
+; installer/uninstaller.
+; The script is written to explicitly allow multiple versions of the
+; application to be installed simulaneously in separate subdirectories such
+; as "Bumps 0.5.0", "Bumps 0.7.2", and "Bumps 1.0" under a group directory.
+
+; NOTE: In order to support more than one version of the application
+; installed simultaneously, the AppName, Desktop shortcut name, and Quick
+; Start shortcut name must be unique among versions.  This is in addition to
+; having unique names (in the more obvious places) for DefaultDirNam,
+; DefaultGroupName, and output file name.
+
+; By default, when installing:
+; - The destination folder will be "C:\Program Files\DANSE\Bumps x.y.z"
+; - A desktop icon will be created with the label "Bumps x.y.z"
+; - A quickstart icon is optional
+; - A start menu folder will be created with the name DANSE -> Bumps x.y.z
+; By default, when uninstalling Bumps x.y.z
+; - The uninstall can be initiated from either the:
+;   * Start menu via DANSE -> Bumps x.y.z -> Uninstall Bumps
+;   * Start menu via Control Panel - > Add or Remove Programs -> Bumps x.y.z
+; - It will not delete the C:\Program Files\DANSE\Bumps x.y.z folder if it
+;   contains any user created files
+; - It will delete any desktop or quickstart icons for Bumps that were
+;   created on installation
+
+; NOTE: The Quick Start Pack for the Inno Setup Compiler needs to be installed
+; with the Preprocessor add-on selected to support use of #define statements.
+#define MyAppName "Bumps"
+#define MyAppNameLowercase "bumps"
+#define MyGroupFolderName "DANSE"
+#define MyAppPublisher "NIST & University of Maryland"
+#define MyAppURL "http://www.reflectometry.org/danse/"
+; Use a batch file to launch bumps.exe to setup a custom environment.
+#define MyAppCLIFileName "launch.bat"
+#define MyAppGUIFileName "bumps.exe"
+#define MyIconFileName "bumps.ico"
+#define MyIconPath = "bumps-data/bumps.ico"
+#define MyReadmeFileName "README.txt"
+#define MyLicenseFileName "LICENSE.txt"
+#define Space " "
+; Use updated version string if present in the include file.  It is expected that the Bumps
+; build script will create this file using the application's internal version string to create
+; a define statement in the format shown below.
+#define MyAppVersion "0.0.0"
+#ifexist "iss-version"
+    #include "iss-version"
+#endif
+
+[Setup]
+; Make the AppName string unique so that other versions of the program can be installed simultaneously.
+; This is done by using the name and version of the application together as the AppName.
+AppName={#MyAppName}{#Space}{#MyAppVersion}
+AppVerName={#MyAppName}{#Space}{#MyAppVersion}
+AppPublisher={#MyAppPublisher}
+ChangesAssociations=yes
+; If you do not want a space in folder names, omit {#Space} or replace it with a hyphen char, etc.
+DefaultDirName={pf}\{#MyGroupFolderName}\{#MyAppName}{#Space}{#MyAppVersion}
+DefaultGroupName={#MyGroupFolderName}\{#MyAppName}{#Space}{#MyAppVersion}
+Compression=lzma/max
+SolidCompression=yes
+DisableProgramGroupPage=yes
+; A file extension of .exe will be appended to OutputBaseFilename.
+OutputBaseFilename={#MyAppNameLowercase}-{#MyAppVersion}-win32
+OutputManifestFile={#MyAppNameLowercase}-{#MyAppVersion}-win32-manifest.txt
+; Note that the icon file is in the bin subdirectory, not in the top-level directory.
+SetupIconFile=bumps\gui\resources\{#MyIconFileName}
+LicenseFile={#MyLicenseFileName}
+SourceDir=.
+OutputDir=.
+PrivilegesRequired=none
+;;;InfoBeforeFile=display_before_install.txt
+;;;InfoAfterFile=display_after_install.txt
+
+; The App*URL directives are for display in the Add/Remove Programs control panel and are all optional
+AppPublisherURL={#MyAppURL}
+AppSupportURL={#MyAppURL}
+AppUpdatesURL={#MyAppURL}
+
+[Languages]
+Name: "english"; MessagesFile: "compiler:Default.isl"
+
+[Files]
+; This script assumes that the output from the previously run py2exe packaging process is in .\dist\...
+; NOTE: Don't use "Flags: ignoreversion" on any shared system files
+Source: "dist\*"; Excludes: "examples,doc"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
+;Source: "dist\examples\*"; DestDir: "{userdocs}\{#MyAppName}\examples"; Flags: ignoreversion recursesubdirs createallsubdirs
+Source: "doc\tutorial\*"; DestDir: "{userdocs}\{#MyAppName}\examples"; Flags: ignoreversion recursesubdirs createallsubdirs
+
+; The following Pascal function checks for the presence of the VC++ 2008 DLL folder on the target system
+; to determine if the VC++ 2008 Redistributable kit needs to be installed.
+[Code]
+function InstallVC90CRT(): Boolean;
+begin
+    Result := not DirExists('C:\WINDOWS\WinSxS\x86_Microsoft.VC90.CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375');
+end;
+
+[Tasks]
+Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"
+Name: "quicklaunchicon"; Description: "{cm:CreateQuickLaunchIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
+
+[Icons]
+; This section creates shortcuts.
+; - {group} refers to shortcuts in the Start Menu.
+; - {commondesktop} refers to shortcuts on the desktop.
+; - {userappdata} refers to shortcuts in the Quick Start menu on the tool bar.
+;
+; When running the application in command line mode, we want to keep the command window open when it
+; exits so that the user can run it again from the window.  Unfortunately, this section does not have
+; a flag for keeping the command window open on exit.  To accomplish this, a batch file is run that
+; creates the command window and starts the Windows command interpreter.  This provides the same
+; environment as starting a command window using the run dialog box from the Windows start menu and
+; entering a command such as "cmd" or "cmd /k <file-to-execute>".
+;
+; When running the application in GUI mode, we simply run the executable without a console window.
+Name: "{group}\Launch {#MyAppName} GUI"; Filename: "{app}\{#MyAppGUIFileName}"; IconFilename: "{app}\{#MyIconPath}"; WorkingDir: "{userdocs}\{#MyAppName}"
+Name: "{group}\Launch {#MyAppName} CLI"; Filename: "{app}\{#MyAppCLIFileName}"; IconFilename: "{app}\{#MyIconPath}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: runmaximized
+Name: "{group}\{cm:ProgramOnTheWeb,{#MyAppName}}"; Filename: "{#MyAppURL}"
+Name: "{group}\{cm:UninstallProgram,{#MyAppName}}"; Filename: "{uninstallexe}"
+Name: "{commondesktop}\{#MyAppName} GUI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppGUIFileName}"; Tasks: desktopicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"
+Name: "{commondesktop}\{#MyAppName} CLI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppCLIFileName}"; Tasks: desktopicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"; Flags: runmaximized
+Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#MyAppName} GUI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppGUIFileName}"; Tasks: quicklaunchicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"
+Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#MyAppName} CLI{#Space}{#MyAppVersion}"; Filename: "{app}\{#MyAppCLIFileName}"; Tasks: quicklaunchicon; WorkingDir: "{userdocs}\{#MyAppName}"; IconFilename: "{app}\{#MyIconPath}"; Flags: runmaximized
+
+[Run]
+;;;Filename: "{app}\{#MyAppGUIFileName}"; Description: "{cm:LaunchProgram,{#MyAppName} GUI}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: nowait postinstall skipifsilent
+;;;Filename: "{app}\{#MyAppCLIFileName}"; Description: "{cm:LaunchProgram,{#MyAppName} CLI}"; WorkingDir: "{userdocs}\{#MyAppName}"; Flags: nowait postinstall skipifsilent runmaximized unchecked
+Filename: "{app}\{#MyReadmeFileName}"; Description: "Read Release Notes"; Verb: "open"; Flags: shellexec skipifdoesntexist waituntilterminated postinstall skipifsilent unchecked
+; Install the Microsoft C++ DLL redistributable package if it is provided and the DLLs are not present on the target system.
+; Note that the redistributable package is included if the app was built using Python 2.6 or 2.7, but not with 2.5.
+; Parameter options:
+; - for silent install use: "/q"
+; - for silent install with progress bar use: "/qb"
+; - for silent install with progress bar but disallow cancellation of operation use: "/qb!"
+; Note that we do not use the postinstall flag as this would display a checkbox and thus require the user to decide what to do.
+Filename: "{app}\vcredist_x86.exe"; Parameters: "/qb!"; WorkingDir: "{tmp}"; StatusMsg: "Installing Microsoft Visual C++ 2008 Redistributable Package ..."; Check: InstallVC90CRT(); Flags: skipifdoesntexist waituntilterminated
+
+[UninstallDelete]
+; Delete directories and files that are dynamically created by the application (i.e. at runtime).
+Type: filesandordirs; Name: "{localappdata}\bumps-{#MyAppVersion}"
+Type: files; Name: "{app}\*.exe.log"
+; The following is a workaround for the case where the application is installed and uninstalled but the
+;{app} directory is not deleted because it has user files.  Then the application is installed into the
+; existing directory, user files are deleted, and the application is un-installed again.  Without the
+; directive below, {app} will not be deleted because Inno Setup did not create it during the previous
+; installation.
+Type: dirifempty; Name: "{app}"
diff --git a/bumps/__init__.py b/bumps/__init__.py
new file mode 100644
index 0000000..ae6e0dc
--- /dev/null
+++ b/bumps/__init__.py
@@ -0,0 +1,45 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Bumps: curve fitter with uncertainty estimation
+
+This package provides tools for modeling parametric systems in a Bayesian
+context, with routines for finding the maximum likelihood and the
+posterior probability density function.
+
+A graphical interface allows direct manipulation of the model parameters.
+
+See http://www.reflectometry.org/danse/reflectometry for online manuals.
+"""
+
+__version__ = "0.7.6"
+
+
+def data_files():
+    """
+    Return the data files associated with the package for setup_py2exe.py.
+
+    The format is a list of (directory, [files...]) pairs which can be
+    used directly in the py2exe setup script as::
+
+        setup(...,
+              data_files=data_files(),
+              ...)
+    """
+    from .gui.utilities import data_files
+    return data_files()
+
+
+def package_data():
+    """
+    Return the data files associated with the package for setup.py.
+
+    The format is a dictionary of {'fully.qualified.module', [files...]}
+    used directly in the setup script as::
+
+        setup(...,
+              package_data=package_data(),
+              ...)
+    """
+    from .gui.utilities import package_data
+    return package_data()
diff --git a/bumps/bounds.py b/bumps/bounds.py
new file mode 100644
index 0000000..447601d
--- /dev/null
+++ b/bumps/bounds.py
@@ -0,0 +1,788 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Parameter bounds and prior probabilities.
+
+Parameter bounds encompass several features of our optimizers.
+
+First and most trivially they allow for bounded constraints on
+parameter values.
+
+Secondly, for parameter values known to follow some distribution,
+the bounds encodes a penalty function as the value strays from
+its nominal value.  Using a negative log likelihood cost function
+on the fit, then this value naturally contributes to the overall
+likelihood measure.
+
+Predefined bounds are::
+
+    Unbounded
+        range (-inf, inf)
+    BoundedBelow
+        range (base, inf)
+    BoundedAbove
+        range (-inf, base)
+    Bounded
+        range (low, high)
+    Normal
+        range (-inf, inf) with gaussian probability
+    BoundedNormal
+        range (low, high) with gaussian probability within
+    SoftBounded
+        range (low, high) with gaussian probability outside
+
+New bounds can be defined following the abstract base class
+interface defined in :class:`Bounds`, or using Distribution(rv)
+where rv is a scipy.stats continuous distribution.
+
+For generating bounds given a value, we provide a few helper
+functions::
+
+    v +/- d:  pm(x,dx) or pm(x,-dm,+dp) or pm(x,+dp,-dm)
+        return (x-dm,x+dm) limited to 2 significant digits
+    v +/- p%: pmp(x,p) or pmp(x,-pm,+pp) or pmp(x,+pp,-pm)
+        return (x-pm*x/100, x+pp*x/100) limited to 2 sig. digits
+    pm_raw(x,dx) or raw_pm(x,-dm,+dp) or raw_pm(x,+dp,-dm)
+        return (x-dm,x+dm)
+    pmp_raw(x,p) or raw_pmp(x,-pm,+pp) or raw_pmp(x,+pp,-pm)
+        return (x-pm*x/100, x+pp*x/100)
+    nice_range(lo,hi)
+        return (lo,hi) limited to 2 significant digits
+"""
+from __future__ import division
+__all__ = ['pm', 'pmp', 'pm_raw', 'pmp_raw', 'nice_range', 'init_bounds',
+           'Bounds', 'Unbounded', 'Bounded', 'BoundedAbove', 'BoundedBelow',
+           'Distribution', 'Normal', 'BoundedNormal', 'SoftBounded']
+
+import math
+from math import log, log10, sqrt, pi, ceil, floor
+
+from numpy import inf, isinf, isfinite, clip
+import numpy.random as RNG
+
+try:
+    from scipy.stats import norm as normal_distribution
+except ImportError:
+    def normal_distribution(*args, **kw):
+        raise RuntimeError("scipy.stats unavailable")
+
+
+def pm(v, *args):
+    """
+    Return the tuple (~v-dv,~v+dv), where ~expr is a 'nice' number near to
+    to the value of expr.  For example::
+
+        >>> r = pm(0.78421, 0.0023145)
+        >>> print("%g - %g"%r)
+        0.7818 - 0.7866
+
+    If called as pm(value, +dp, -dm) or pm(value, -dm, +dp),
+    return (~v-dm, ~v+dp).
+    """
+    return nice_range(pm_raw(v, *args))
+
+
+def pmp(v, *args):
+    """
+    Return the tuple (~v-%v,~v+%v), where ~expr is a 'nice' number near to
+    the value of expr.  For example::
+
+        >>> r = pmp(0.78421, 10)
+        >>> print("%g - %g"%r)
+        0.7 - 0.87
+        >>> r = pmp(0.78421, 0.1)
+        >>> print("%g - %g"%r)
+        0.7834 - 0.785
+
+    If called as pmp(value, +pp, -pm) or pmp(value, -pm, +pp),
+    return (~v-pm%v, ~v+pp%v).
+    """
+    return nice_range(pmp_raw(v, *args))
+
+# Generate ranges using x +/- dx or x +/- p%*x
+
+
+def pm_raw(v, *args):
+    """
+    Return the tuple [v-dv,v+dv].
+
+    If called as pm_raw(value, +dp, -dm) or pm_raw(value, -dm, +dp),
+    return (v-dm, v+dp).
+    """
+    if len(args) == 1:
+        dv = args[0]
+        return v - dv, v + dv
+    elif len(args) == 2:
+        plus, minus = args
+        if plus < minus:
+            plus, minus = minus, plus
+        # if minus > 0 or plus < 0:
+        #    raise TypeError("pm(value, p1, p2) requires both + and - values")
+        return v + minus, v + plus
+    else:
+        raise TypeError("pm(value, delta) or pm(value, -p1, +p2)")
+
+
+def pmp_raw(v, *args):
+    """
+    Return the tuple [v-%v,v+%v]
+
+    If called as pmp_raw(value, +pp, -pm) or pmp_raw(value, -pm, +pp),
+    return (v-pm%v, v+pp%v).
+    """
+    if len(args) == 1:
+        percent = args[0]
+        b1, b2 = v * (1 - 0.01 * percent), v * (1 + 0.01 * percent)
+    elif len(args) == 2:
+        plus, minus = args
+        if plus < minus:
+            plus, minus = minus, plus
+        # if minus > 0 or plus < 0:
+        #    raise TypeError("pmp(value, p1, p2) requires both + and - values")
+        b1, b2 = v * (1 + 0.01 * minus), v * (1 + 0.01 * plus)
+    else:
+        raise TypeError("pmp(value, delta) or pmp(value, -p1, +p2)")
+
+    return (b1, b2) if v > 0 else (b2, b1)
+
+
+def nice_range(bounds):
+    """
+    Given a range, return an enclosing range accurate to two digits.
+    """
+    step = bounds[1] - bounds[0]
+    if step > 0:
+        d = 10 ** (floor(log10(step)) - 1)
+        return floor(bounds[0]/d)*d, ceil(bounds[1]/d)*d
+    else:
+        return bounds
+
+
+def init_bounds(v):
+    """
+    Returns a bounds object of the appropriate type given the arguments.
+
+    This is a helper factory to simplify the user interface to parameter
+    objects.
+    """
+    # if it is none, then it is unbounded
+    if v is None:
+        return Unbounded()
+
+    # if it isn't a tuple, assume it is a bounds type.
+    try:
+        lo, hi = v
+    except TypeError:
+        return v
+
+    # if it is a tuple, then determine what kind of bounds we have
+    if lo is None:
+        lo = -inf
+    if hi is None:
+        hi = inf
+    # TODO: consider issuing a warning instead of correcting reversed bounds
+    if lo >= hi:
+        lo, hi = hi, lo
+    if isinf(lo) and isinf(hi):
+        return Unbounded()
+    elif isinf(lo):
+        return BoundedAbove(hi)
+    elif isinf(hi):
+        return BoundedBelow(lo)
+    else:
+        return Bounded(lo, hi)
+
+
+class Bounds(object):
+
+    """
+    Bounds abstract base class.
+
+    A range is used for several purposes.  One is that it transforms parameters
+    between unbounded and bounded forms depending on the needs of the optimizer.
+
+    Another is that it generates random values in the range for stochastic
+    optimizers, and for initialization.
+
+    A third is that it returns the likelihood of seeing that particular value
+    for optimizers which use soft constraints.  Assuming the cost function that
+    is being optimized is also a probability, then this is an easy way to
+    incorporate information from other sorts of measurements into the model.
+    """
+    limits = (-inf, inf)
+    # TODO: need derivatives wrt bounds transforms
+
+    def get01(self, x):
+        """
+        Convert value into [0,1] for optimizers which are bounds constrained.
+
+        This can also be used as a scale bar to show approximately how close to
+        the end of the range the value is.
+        """
+
+    def put01(self, v):
+        """
+        Convert [0,1] into value for optimizers which are bounds constrained.
+        """
+
+    def getfull(self, x):
+        """
+        Convert value into (-inf,inf) for optimizers which are unconstrained.
+        """
+
+    def putfull(self, v):
+        """
+        Convert (-inf,inf) into value for optimizers which are unconstrained.
+        """
+
+    def random(self, n=1, target=1.0):
+        """
+        Return a randomly generated valid value.
+
+        *target* gives some scale independence to the random number
+        generator, allowing the initial value of the parameter to influence
+        the randomly generated value.  Otherwise fits without bounds have
+        too large a space to search through.
+        """
+
+    def nllf(self, value):
+        """
+        Return the negative log likelihood of seeing this value, with
+        likelihood scaled so that the maximum probability is one.
+
+        For uniform bounds, this either returns zero or inf.  For bounds
+        based on a probability distribution, this returns values between
+        zero and inf.  The scaling is necessary so that indefinite and
+        semi-definite ranges return a sensible value.  The scaling does
+        not affect the likelihood maximization process, though the resulting
+        likelihood is not easily interpreted.
+        """
+
+    def residual(self, value):
+        """
+        Return the parameter 'residual' in a way that is consistent with
+        residuals in the normal distribution.  The primary purpose is to
+        graphically display exceptional values in a way that is familiar
+        to the user.  For fitting, the scaled likelihood should be used.
+
+        To do this, we will match the cumulative density function value
+        with that for N(0,1) and find the corresponding percent point
+        function from the N(0,1) distribution.  In this way, for example,
+        a value to the right of 2.275% of the distribution would correspond
+        to a residual of -2, or 2 standard deviations below the mean.
+
+        For uniform distributions, with all values equally probable, we
+        use a value of +/-4 for values outside the range, and 0 for values
+        inside the range.
+        """
+
+    def start_value(self):
+        """
+        Return a default starting value if none given.
+        """
+        return self.put01(0.5)
+
+    def __contains__(self, v):
+        return self.limits[0] <= v <= self.limits[1]
+
+    def __str__(self):
+        limits = tuple(num_format(v) for v in self.limits)
+        return "(%s,%s)" % limits
+
+# CRUFT: python 2.5 doesn't format indefinite numbers properly on windows
+
+
+def num_format(v):
+    """
+    Number formating which supports inf/nan on windows.
+    """
+    if isfinite(v):
+        return "%g" % v
+    elif isinf(v):
+        return "inf" if v > 0 else "-inf"
+    else:
+        return "NaN"
+
+
+class Unbounded(Bounds):
+
+    """
+    Unbounded parameter.
+
+    The random initial condition is assumed to be between 0 and 1
+
+    The probability is uniformly 1/inf everywhere, which means the negative
+    log likelihood of P is inf everywhere.  A value inf will interfere
+    with optimization routines, and so we instead choose P == 1 everywhere.
+    """
+
+    def random(self, n=1, target=1.0):
+        scale = target + (target==0.)
+        return RNG.randn(n)*scale
+
+    def nllf(self, value):
+        return 0
+
+    def residual(self, value):
+        return 0
+
+    def get01(self, x):
+        return _get01_inf(x)
+
+    def put01(self, v):
+        return _put01_inf(v)
+
+    def getfull(self, x):
+        return x
+
+    def putfull(self, v):
+        return v
+
+
+class BoundedBelow(Bounds):
+
+    """
+    Semidefinite range bounded below.
+
+    The random initial condition is assumed to be within 1 of the maximum.
+
+    [base,inf] <-> (-inf,inf) is direct above base+1, -1/(x-base) below
+    [base,inf] <-> [0,1] uses logarithmic compression.
+
+    Logarithmic compression works by converting sign*m*2^e+base to
+    sign*(e+1023+m), yielding a value in [0,2048]. This can then be
+    converted to a value in [0,1].
+
+    Note that the likelihood function is problematic: the true probability
+    of seeing any particular value in the range is infinitesimal, and that
+    is indistinguishable from values outside the range.  Instead we say
+    that P = 1 in range, and 0 outside.
+    """
+
+    def __init__(self, base):
+        self.limits = (base, inf)
+        self._base = base
+
+    def start_value(self):
+        return self._base + 1
+
+    def random(self, n=1, target=1.):
+        target = max(abs(target), abs(self._base))
+        scale = target + (target==0.)
+        return self._base + abs(RNG.randn(n)*scale)
+
+    def nllf(self, value):
+        return 0 if value >= self._base else inf
+
+    def residual(self, value):
+        return 0 if value >= self._base else -4
+
+    def get01(self, x):
+        m, e = math.frexp(x - self._base)
+        if m >= 0 and e <= _E_MAX:
+            v = (e + m) / (2. * _E_MAX)
+            return v
+        else:
+            return 0 if m < 0 else 1
+
+    def put01(self, v):
+        v = v * 2 * _E_MAX
+        e = int(v)
+        m = v - e
+        x = math.ldexp(m, e) + self._base
+        return x
+
+    def getfull(self, x):
+        v = x - self._base
+        return v if v >= 1 else 2 - 1. / v
+
+    def putfull(self, v):
+        x = v if v >= 1 else 1. / (2 - v)
+        return x + self._base
+
+
+class BoundedAbove(Bounds):
+
+    """
+    Semidefinite range bounded above.
+
+    [-inf,base] <-> [0,1] uses logarithmic compression
+    [-inf,base] <-> (-inf,inf) is direct below base-1, 1/(base-x) above
+
+    Logarithmic compression works by converting sign*m*2^e+base to
+    sign*(e+1023+m), yielding a value in [0,2048].  This can then be
+    converted to a value in [0,1].
+
+    Note that the likelihood function is problematic: the true probability
+    of seeing any particular value in the range is infinitesimal, and that
+    is indistinguishable from values outside the range.  Instead we say
+    that P = 1 in range, and 0 outside.
+    """
+
+    def __init__(self, base):
+        self.limits = (-inf, base)
+        self._base = base
+
+    def start_value(self):
+        return self._base - 1
+
+    def random(self, n=1, target=1.0):
+        target = max(abs(self._base), abs(target))
+        scale = target + (target==0.)
+        return self._base - abs(RNG.randn(n)*scale)
+
+    def nllf(self, value):
+        return 0 if value <= self._base else inf
+
+    def residual(self, value):
+        return 0 if value <= self._base else 4
+
+    def get01(self, x):
+        m, e = math.frexp(self._base - x)
+        if m >= 0 and e <= _E_MAX:
+            v = (e + m) / (2. * _E_MAX)
+            return 1 - v
+        else:
+            return 1 if m < 0 else 0
+
+    def put01(self, v):
+        v = (1 - v) * 2 * _E_MAX
+        e = int(v)
+        m = v - e
+        x = -(math.ldexp(m, e) - self._base)
+        return x
+
+    def getfull(self, x):
+        v = x - self._base
+        return v if v <= -1 else -2 - 1. / v
+
+    def putfull(self, v):
+        x = v if v <= -1 else -1. / (v + 2)
+        return x + self._base
+
+
+class Bounded(Bounds):
+
+    """
+    Bounded range.
+
+    [lo,hi] <-> [0,1] scale is simple linear
+    [lo,hi] <-> (-inf,inf) scale uses exponential expansion
+
+    While technically the probability of seeing any value within the
+    range is 1/range, for consistency with the semi-infinite ranges
+    and for a more natural mapping between nllf and chisq, we instead
+    set the probability to 0.  This choice will not affect the fits.
+    """
+
+    def __init__(self, lo, hi):
+        self.limits = (lo, hi)
+        self._nllf_scale = log(hi - lo)
+
+    def random(self, n=1, target=1.0):
+        lo, hi = self.limits
+        #print("= uniform",lo,hi)
+        return RNG.uniform(lo, hi, size=n)
+
+    def nllf(self, value):
+        lo, hi = self.limits
+        return 0 if lo <= value <= hi else inf
+        # return self._nllf_scale if lo<=value<=hi else inf
+
+    def residual(self, value):
+        lo, hi = self.limits
+        return -4 if lo > value else (4 if hi < value else 0)
+
+    def get01(self, x):
+        lo, hi = self.limits
+        return float(x - lo) / (hi - lo) if hi - lo > 0 else 0
+
+    def put01(self, v):
+        lo, hi = self.limits
+        return (hi - lo) * v + lo
+
+    def getfull(self, x):
+        return _put01_inf(self.get01(x))
+
+    def putfull(self, v):
+        return self.put01(_get01_inf(v))
+
+
+class Distribution(Bounds):
+
+    """
+    Parameter is pulled from a distribution.
+
+    *dist* must implement the distribution interface from scipy.stats.
+    In particular, it should define methods rvs, nnlf, cdf and ppf and
+    attributes args and dist.name.
+    """
+
+    def __init__(self, dist):
+        self.dist = dist
+
+    def random(self, n=1, target=1.0):
+        return self.dist.rvs(n)
+
+    def nllf(self, value):
+        return -log(self.dist.pdf(value))
+
+    def residual(self, value):
+        return normal_distribution.ppf(self.dist.cdf(value))
+
+    def get01(self, x):
+        return self.dist.cdf(x)
+
+    def put01(self, v):
+        return self.dist.ppf(v)
+
+    def getfull(self, x):
+        return x
+
+    def putfull(self, v):
+        return v
+
+    def __getstate__(self):
+        # WARNING: does not preserve and restore seed
+        return self.dist.__class__, self.dist.args, self.dist.kwds
+
+    def __setstate__(self, state):
+        cls, args, kwds = state
+        self.dist = cls(*args, **kwds)
+
+    def __str__(self):
+        return "%s(%s)" % (self.dist.dist.name,
+                           ",".join(str(s) for s in self.dist.args))
+
+
+class Normal(Distribution):
+
+    """
+    Parameter is pulled from a normal distribution.
+
+    If you have measured a parameter value with some uncertainty (e.g., the
+    film thickness is 35+/-5 according to TEM), then you can use this
+    measurement to restrict the values given to the search, and to penalize
+    choices of this fitting parameter which are different from this value.
+
+    *mean* is the expected value of the parameter and *std* is the 1-sigma
+    standard deviation.
+    """
+
+    def __init__(self, mean=0, std=1):
+        Distribution.__init__(self, normal_distribution(mean, std))
+        self._nllf_scale = log(sqrt(2 * pi * std ** 2))
+
+    def nllf(self, value):
+        # P(v) = exp(-0.5*(v-mean)**2/std**2)/sqrt(2*pi*std**2)
+        # -log(P(v)) = -(-0.5*(v-mean)**2/std**2 - log( (2*pi*std**2) ** 0.5))
+        #            = 0.5*(v-mean)**2/std**2 + log(2*pi*std**2)/2
+        mean, std = self.dist.args
+        return 0.5 * ((value-mean)/std)**2 + self._nllf_scale
+
+    def residual(self, value):
+        mean, std = self.dist.args
+        return (value-mean)/std
+
+    def __getstate__(self):
+        return self.dist.args  # args is mean,std
+
+    def __setstate__(self, state):
+        mean, std = state
+        self.__init__(mean=mean, std=std)
+
+
+class BoundedNormal(Bounds):
+
+    """
+    truncated normal bounds
+    """
+
+    def __init__(self, sigma=1, mu=0, limits=(-inf, inf)):
+        self.limits = limits
+        self.sigma, self.mu = sigma, mu
+
+        self._left = normal_distribution.cdf((limits[0]-mu)/sigma)
+        self._delta = normal_distribution.cdf((limits[1]-mu)/sigma) - self._left
+        self._nllf_scale = log(sqrt(2 * pi * sigma ** 2)) + log(self._delta)
+
+    def get01(self, x):
+        """
+        Convert value into [0,1] for optimizers which are bounds constrained.
+
+        This can also be used as a scale bar to show approximately how close to
+        the end of the range the value is.
+        """
+        v = ((normal_distribution.cdf((x-self.mu)/self.sigma) - self._left)
+             / self._delta)
+        return clip(v, 0, 1)
+
+    def put01(self, v):
+        """
+        Convert [0,1] into value for optimizers which are bounds constrained.
+        """
+        x = v * self._delta + self._left
+        return normal_distribution.ppf(x) * self.sigma + self.mu
+
+    def getfull(self, x):
+        """
+        Convert value into (-inf,inf) for optimizers which are unconstrained.
+        """
+        raise NotImplementedError
+
+    def putfull(self, v):
+        """
+        Convert (-inf,inf) into value for optimizers which are unconstrained.
+        """
+        raise NotImplementedError
+
+    def random(self, n=1, target=1.0):
+        """
+        Return a randomly generated valid value, or an array of values
+        """
+        return self.get01(RNG.rand(n))
+
+    def nllf(self, value):
+        """
+        Return the negative log likelihood of seeing this value, with
+        likelihood scaled so that the maximum probability is one.
+        """
+        if value in self:
+            return 0.5 * ((value-self.mu)/self.sigma)**2 + self._nllf_scale
+        else:
+            return inf
+
+    def residual(self, value):
+        """
+        Return the parameter 'residual' in a way that is consistent with
+        residuals in the normal distribution.  The primary purpose is to
+        graphically display exceptional values in a way that is familiar
+        to the user.  For fitting, the scaled likelihood should be used.
+
+        For the truncated normal distribution, we can just use the normal
+        residuals.
+        """
+        return (value - self.mu) / self.sigma
+
+    def start_value(self):
+        """
+        Return a default starting value if none given.
+        """
+        return self.put01(0.5)
+
+    def __contains__(self, v):
+        return self.limits[0] <= v <= self.limits[1]
+
+    def __str__(self):
+        vals = (
+            self.limits[0], self.limits[1],
+            self.mu, self.sigma,
+        )
+        return "(%s,%s), norm(%s,%s)" % tuple(num_format(v) for v in vals)
+
+
+class SoftBounded(Bounds):
+
+    """
+    Parameter is pulled from a stretched normal distribution.
+
+    This is like a rectangular distribution, but with gaussian tails.
+
+    The intent of this distribution is for soft constraints on the values.
+    As such, the random generator will return values like the rectangular
+    distribution, but the likelihood will return finite values based on
+    the distance from the from the bounds rather than returning infinity.
+
+    Note that for bounds constrained optimizers which force the value
+    into the range [0,1] for each parameter we don't need to use soft
+    constraints, and this acts just like the rectangular distribution.
+    """
+
+    def __init__(self, lo, hi, std=None):
+        self._lo, self._hi, self._std = lo, hi, std
+        self._nllf_scale = log(hi - lo + sqrt(2 * pi * std))
+
+    def random(self, n=1, target=1.0):
+        return RNG.uniform(self._lo, self._hi, size=n)
+
+    def nllf(self, value):
+        # To turn f(x) = 1 if x in [lo,hi] else G(tail)
+        # into a probability p, we need to normalize by \int{f(x)dx},
+        # which is just hi-lo + sqrt(2*pi*std**2).
+        if value < self._lo:
+            z = self._lo - value
+        elif value > self._hi:
+            z = value - self._hi
+        else:
+            z = 0
+        return (z / self._std) ** 2 / 2 + self._nllf_scale
+
+    def residual(self, value):
+        if value < self._lo:
+            z = self._lo - value
+        elif value > self._hi:
+            z = value - self._hi
+        else:
+            z = 0
+        return z / self._std
+
+    def get01(self, x):
+        v = float(x - self._lo) / (self._hi - self._lo)
+        return v if 0 <= v <= 1 else (0 if v < 0 else 1)
+
+    def put01(self, v):
+        return v * (self._hi - self._lo) + self._lo
+
+    def getfull(self, x):
+        return x
+
+    def putfull(self, v):
+        return v
+
+    def __str__(self):
+        return "box_norm(%g,%g,sigma=%g)" % (self._lo, self._hi, self._std)
+
+
+_E_MIN = -1023
+_E_MAX = 1024
+
+def _get01_inf(x):
+    """
+    Convert a floating point number to a value in [0,1].
+
+    The value sign*m*2^e to sign*(e+1023+m), yielding a value in [-2048,2048].
+    This can then be converted to a value in [0,1].
+
+    Sort order is preserved.  At least 14 bits of precision are lost from
+    the 53 bit mantissa.
+    """
+    # Arctan alternative
+    # Arctan is approximately linear in (-0.5, 0.5), but the
+    # transform is only useful up to (-10**15,10**15).
+    # return atan(x)/pi + 0.5
+    m, e = math.frexp(x)
+    s = math.copysign(1.0, m)
+    v = (e - _E_MIN + m * s) * s
+    v = v / (4 * _E_MAX) + 0.5
+    v = 0 if _E_MIN > e else (1 if _E_MAX < e else v)
+    return v
+
+
+def _put01_inf(v):
+    """
+    Convert a value in [0,1] to a full floating point number.
+
+    Sort order is preserved.  Reverses :func:`_get01_inf`, but with fewer
+    bits of precision.
+    """
+    # Arctan alternative
+    # return tan(pi*(v-0.5))
+
+    v = (v - 0.5) * 4 * _E_MAX
+    s = math.copysign(1., v)
+    v *= s
+    e = int(v)
+    m = v - e
+    x = math.ldexp(s * m, e + _E_MIN)
+    # print "< x,e,m,s,v",x,e+_e_min,s*m,s,v
+    return x
diff --git a/bumps/bspline.py b/bumps/bspline.py
new file mode 100644
index 0000000..f3c20fe
--- /dev/null
+++ b/bumps/bspline.py
@@ -0,0 +1,402 @@
+# This program is public domain
+"""
+BSpline calculator.
+
+Given a set of knots, compute the cubic B-spline interpolation.
+"""
+from __future__ import division, print_function
+
+__all__ = ['bspline', 'pbs']
+
+import numpy as np
+from numpy import maximum as max, minimum as min
+
+
+def pbs(x, y, t, clamp=True, parametric=True):
+    """
+    Evaluate the parametric B-spline px(t),py(t).
+
+    *x* and *y* are the control points, and *t* are the points
+    in [0,1] at which they are evaluated.   The *x* values are
+    sorted so that the spline describes a function.
+
+    The spline goes through the control points at the ends. If *clamp*
+    is True, the derivative of the spline at both ends is zero. If *clamp*
+    is False, the derivative at the ends is equal to the slope connecting
+    the final pair of control points.
+
+    If *parametric* is False, then parametric points t' are chosen such
+    that x(t') = *t*.
+
+    The B-spline knots are chosen to be equally spaced within [0,1].
+    """
+    x = list(sorted(x))
+    knot = np.hstack((0, 0, np.linspace(0, 1, len(y)), 1, 1))
+    cx = np.hstack((x[0], x[0], x[0], (2 * x[0] + x[1]) / 3,
+                    x[1:-1], (2 * x[-1] + x[-2]) / 3, x[-1]))
+    if clamp:
+        cy = np.hstack((y[0], y[0], y[0], y, y[-1]))
+    else:
+        cy = np.hstack((y[0], y[0], y[0],
+                        y[0] + (y[1] - y[0]) / 3,
+                        y[1:-1],
+                        y[-1] + (y[-2] - y[-1]) / 3,
+                        y[-1]))
+
+    if parametric:
+        return _bspline3(knot, cx, t), _bspline3(knot, cy, t)
+
+    # Find parametric t values corresponding to given z values
+    # First try a few newton steps
+    xt = np.interp(t, x, np.linspace(0, 1, len(x)))
+    with np.errstate(all='ignore'):
+        for _ in range(6):
+            pt, dpt = _bspline3(knot, cx, xt, nderiv=1)
+            xt -= (pt - t) / dpt
+        idx = np.isnan(xt) | (abs(_bspline3(knot, cx, xt) - t) > 1e-9)
+
+    # Use bisection when newton fails
+    if idx.any():
+        missing = t[idx]
+        # print missing
+        t_lo, t_hi = 0 * missing, 1 * missing
+        for _ in range(30):  # bisection with about 1e-9 tolerance
+            trial = (t_lo + t_hi) / 2
+            ptrial = _bspline3(knot, cx, trial)
+            tidx = ptrial < missing
+            t_lo[tidx] = trial[tidx]
+            t_hi[~tidx] = trial[~tidx]
+        xt[idx] = (t_lo + t_hi) / 2
+    # print "err",np.max(abs(_bspline3(knot,cx,t)-xt))
+
+    # Return y evaluated at the interpolation points
+    return _bspline3(knot, cx, xt), _bspline3(knot, cy, xt)
+
+
+def bspline(y, xt, clamp=True):
+    """
+    Evaluate the B-spline with control points *y* at positions *xt* in [0,1].
+
+    The spline goes through the control points at the ends.  If *clamp*
+    is True, the derivative of the spline at both ends is zero.  If *clamp*
+    is False, the derivative at the ends is equal to the slope connecting
+    the final pair of control points.
+
+    B-spline knots are chosen to be equally spaced within [0,1].
+    """
+    knot = np.hstack((0, 0, np.linspace(0, 1, len(y)), 1, 1))
+    if clamp:
+        cy = np.hstack(([y[0]] * 3, y, y[-1]))
+    else:
+        cy = np.hstack((y[0], y[0], y[0],
+                           y[0] + (y[1] - y[0]) / 3,
+                           y[1:-1],
+                           y[-1] + (y[-2] - y[-1]) / 3, y[-1]))
+    return _bspline3(knot, cy, xt)
+
+
+def _bspline3(knot, control, t, nderiv=0):
+    """
+    Evaluate the B-spline specified by the given *knot* sequence and
+    *control* values at the parametric points *t*.  *nderiv* selects
+    the function or derivative to evaluate.
+    """
+    knot, control, t = [np.asarray(v) for v in (knot, control, t)]
+
+    # Deal with values outside the range
+    valid = (t > knot[0]) & (t <= knot[-1])
+    tv = t[valid]
+    f = np.zeros(t.shape)
+    f[t <= knot[0]] = control[0]
+    f[t >= knot[-1]] = control[-1]
+
+    # Find B-Spline parameters for the individual segments
+    end = len(knot) - 1
+    segment = knot.searchsorted(tv) - 1
+    tm2 = knot[max(segment - 2, 0)]
+    tm1 = knot[max(segment - 1, 0)]
+    tm0 = knot[max(segment - 0, 0)]
+    tp1 = knot[min(segment + 1, end)]
+    tp2 = knot[min(segment + 2, end)]
+    tp3 = knot[min(segment + 3, end)]
+
+    p4 = control[min(segment + 3, end)]
+    p3 = control[min(segment + 2, end)]
+    p2 = control[min(segment + 1, end)]
+    p1 = control[min(segment + 0, end)]
+
+    # Compute second and third derivatives.
+    if nderiv > 1:
+        # Normally we require a recursion for Q, R and S to compute
+        # df, d2f and d3f respectively, however Q can be computed directly
+        # from intermediate values of P, S has a recursion of depth 0,
+        # which leaves only the R recursion of depth 1 in the calculation
+        # below.
+        q4 = (p4 - p3) * 3 / (tp3 - tm0)
+        q3 = (p3 - p2) * 3 / (tp2 - tm1)
+        q2 = (p2 - p1) * 3 / (tp1 - tm2)
+        r4 = (q4 - q3) * 2 / (tp2 - tm0)
+        r3 = (q3 - q2) * 2 / (tp1 - tm1)
+        if nderiv > 2:
+            s4 = (r4 - r3) / (tp1 - tm0)
+            d3f = np.zeros(t.shape)
+            d3f[valid] = s4
+        r4 = ((tv - tm0) * r4 + (tp1 - tv) * r3) / (tp1 - tm0)
+        d2f = np.zeros(t.shape)
+        d2f[valid] = r4
+
+    # Compute function value and first derivative
+    p4 = ((tv - tm0) * p4 + (tp3 - tv) * p3) / (tp3 - tm0)
+    p3 = ((tv - tm1) * p3 + (tp2 - tv) * p2) / (tp2 - tm1)
+    p2 = ((tv - tm2) * p2 + (tp1 - tv) * p1) / (tp1 - tm2)
+    p4 = ((tv - tm0) * p4 + (tp2 - tv) * p3) / (tp2 - tm0)
+    p3 = ((tv - tm1) * p3 + (tp1 - tv) * p2) / (tp1 - tm1)
+    if nderiv >= 1:
+        df = np.zeros(t.shape)
+        df[valid] = (p4 - p3) * 3 / (tp1 - tm0)
+    p4 = ((tv - tm0) * p4 + (tp1 - tv) * p3) / (tp1 - tm0)
+    f[valid] = p4
+
+    if nderiv == 0:
+        return f
+    elif nderiv == 1:
+        return f, df
+    elif nderiv == 2:
+        return f, df, d2f
+    else:
+        return f, df, d2f, d3f
+
+
+def bspline_control(y, clamp=True):
+    return _find_control(y, clamp=clamp)
+
+
+def pbs_control(x, y, clamp=True):
+    return _find_control(x, clamp=clamp), _find_control(y, clamp=clamp)
+
+
+def _find_control(v, clamp=True):
+    raise NotImplementedError("B-spline interpolation doesn't work yet")
+    from scipy.linalg import solve_banded
+    n = len(v)
+    udiag = np.hstack([0, 0, 0, [1 / 6] * (n - 3), 0.25, 0.3])
+    ldiag = np.hstack([-0.3, 0.25, [1 / 6] * (n - 3), 0, 0, 0])
+    mdiag = np.hstack([1, 0.3, 7 / 12, [2 / 3] * (n - 4), 7 / 12, -0.3, 1])
+    A = np.vstack([ldiag, mdiag, udiag])
+    if clamp:
+        # First derivative is zero at ends
+        bl, br = 0, 0
+    else:
+        # First derivative at ends follows line between final control points
+        bl, br = (v[1] - v[0]) * n, (v[-1] - v[-2]) * n
+    b = np.hstack([v[0], bl, v[1:n - 1], br, v[-1]])
+    x = solve_banded((1, 1), A, b)
+    return x  # x[1:-1]
+
+# ===========================================================================
+# test code
+
+def speed_check():
+    """
+    Print the time to evaluate 400 points on a 7 knot spline.
+    """
+    import time
+    x = np.linspace(0, 1, 7)
+    x[1], x[-2] = x[2], x[-3]
+    y = [9, 11, 2, 3, 8, 0, 2]
+    t = np.linspace(0, 1, 400)
+    t0 = time.time()
+    for _ in range(1000):
+        bspline(y, t, clamp=True)
+    print("bspline (ms)", (time.time() - t0) / 1000)
+
+
+def _check(expected, got, tol):
+    """
+    Check that value matches expected within tolerance.
+
+    If *expected* is never zero, use relative error for tolerance.
+    """
+    relative = (np.isscalar(expected) and expected != 0) \
+        or (not np.isscalar(expected) and all(expected != 0))
+    if relative:
+        norm = np.linalg.norm((expected - got) / expected)
+    else:
+        norm = np.linalg.norm(expected - got)
+    if norm >= tol:
+        msg = [
+            "expected %s"%str(expected),
+            "got %s"%str(got),
+            "tol %s norm %s"%(tol, norm),
+        ]
+        raise ValueError("\n".join(msg))
+
+
+def _derivs(x, y):
+    """
+    Compute numerical derivative for a function evaluated on a fine grid.
+    """
+    # difference formula
+    return (y[1] - y[0]) / (x[1] - x[0]), (y[-1] - y[-2]) / (x[-1] - x[-2])
+    # 5-point difference formula
+    #left = (y[0]-8*y[1]+8*y[3]-y[4]) / 12 / (x[1]-x[0])
+    #right = (y[-5]-8*y[-4]+8*y[-2]-y[-1]) / 12 / (x[-1]-x[-2])
+    # return left,right
+
+
+def test():
+    h = 1e-10
+    t = np.linspace(0, 1, 100)
+    dt = np.array([0, h, 2 * h, 3 * h, 4 * h,
+                      1 - 4 * h, 1 - 3 * h, 1 - 2 * h, 1 - h, 1])
+    y = [9, 11, 2, 3, 8, 0, 2]
+    n = len(y)
+    xeq = np.linspace(0, 1, n)
+    x = xeq + 0
+    x[0], x[-1] = (x[0] + x[1]) / 2, (x[-2] + x[-1]) / 2
+    dx = np.array([x[0], x[0] + h, x[0] + 2*h, x[0] + 3*h, x[0] + 4*h,
+                      x[-1] - 4*h, x[-1] - 3*h, x[-1] - 2*h, x[-1] - h, x[-1]])
+
+    # ==== Check that bspline matches pbs with equally spaced x
+
+    yt = bspline(y, t, clamp=True)
+    xtp, ytp = pbs(xeq, y, t, clamp=True, parametric=False)
+    _check(t, xtp, 1e-8)
+    _check(yt, ytp, 1e-8)
+
+    xtp, ytp = pbs(xeq, y, t, clamp=True, parametric=True)
+    _check(t, xtp, 1e-8)
+    _check(yt, ytp, 1e-8)
+
+    yt = bspline(y, t, clamp=False)
+    xtp, ytp = pbs(xeq, y, t, clamp=False, parametric=False)
+    _check(t, xtp, 1e-8)
+    _check(yt, ytp, 1e-8)
+
+    xtp, ytp = pbs(xeq, y, t, clamp=False, parametric=True)
+    _check(t, xtp, 1e-8)
+    _check(yt, ytp, 1e-8)
+
+    # ==== Check bspline f at end points
+
+    yt = bspline(y, t, clamp=True)
+    _check(y[0], yt[0], 1e-12)
+    _check(y[-1], yt[-1], 1e-12)
+
+    yt = bspline(y, t, clamp=False)
+    _check(y[0], yt[0], 1e-12)
+    _check(y[-1], yt[-1], 1e-12)
+
+    xt, yt = pbs(x, y, t, clamp=True, parametric=False)
+    _check(x[0], xt[0], 1e-8)
+    _check(x[-1], xt[-1], 1e-8)
+    _check(y[0], yt[0], 1e-8)
+    _check(y[-1], yt[-1], 1e-8)
+
+    xt, yt = pbs(x, y, t, clamp=True, parametric=True)
+    _check(x[0], xt[0], 1e-8)
+    _check(x[-1], xt[-1], 1e-8)
+    _check(y[0], yt[0], 1e-8)
+    _check(y[-1], yt[-1], 1e-8)
+
+    xt, yt = pbs(x, y, t, clamp=False, parametric=False)
+    _check(x[0], xt[0], 1e-8)
+    _check(x[-1], xt[-1], 1e-8)
+    _check(y[0], yt[0], 1e-8)
+    _check(y[-1], yt[-1], 1e-8)
+
+    xt, yt = pbs(x, y, t, clamp=False, parametric=True)
+    _check(x[0], xt[0], 1e-8)
+    _check(x[-1], xt[-1], 1e-8)
+    _check(y[0], yt[0], 1e-8)
+    _check(y[-1], yt[-1], 1e-8)
+
+    # ==== Check f' at end points
+    yt = bspline(y, dt, clamp=True)
+    left, right = _derivs(dt, yt)
+    _check(0, left, 1e-8)
+    _check(0, right, 1e-8)
+
+    xt, yt = pbs(x, y, dx, clamp=True, parametric=False)
+    left, right = _derivs(xt, yt)
+    _check(0, left, 1e-8)
+    _check(0, right, 1e-8)
+
+    xt, yt = pbs(x, y, dt, clamp=True, parametric=True)
+    left, right = _derivs(xt, yt)
+    _check(0, left, 1e-8)
+    _check(0, right, 1e-8)
+
+    yt = bspline(y, dt, clamp=False)
+    left, right = _derivs(dt, yt)
+    _check((y[1] - y[0]) * (n - 1), left, 5e-4)
+    _check((y[-1] - y[-2]) * (n - 1), right, 5e-4)
+
+    xt, yt = pbs(x, y, dx, clamp=False, parametric=False)
+    left, right = _derivs(xt, yt)
+    _check((y[1] - y[0]) / (x[1] - x[0]), left, 5e-4)
+    _check((y[-1] - y[-2]) / (x[-1] - x[-2]), right, 5e-4)
+
+    xt, yt = pbs(x, y, dt, clamp=False, parametric=True)
+    left, right = _derivs(xt, yt)
+    _check((y[1] - y[0]) / (x[1] - x[0]), left, 5e-4)
+    _check((y[-1] - y[-2]) / (x[-1] - x[-2]), right, 5e-4)
+
+    # ==== Check interpolator
+    #yc = bspline_control(y)
+    # print("y",y)
+    # print("p(yc)",bspline(yc,xeq))
+
+
+def demo():
+    from pylab import hold, linspace, subplot, plot, legend, show
+    hold(True)
+    #y = [9,6,1,3,8,4,2]
+    #y = [9,11,13,3,-2,0,2]
+    y = [9, 11, 2, 3, 8, 0]
+    #y = [9,9,1,3,8,2,2]
+    x = linspace(0, 1, len(y))
+    t = linspace(x[0], x[-1], 400)
+    subplot(211)
+    plot(t, bspline(y, t, clamp=False), '-.y',
+         label="unclamped bspline")  # bspline
+    # bspline
+    plot(t, bspline(y, t, clamp=True), '-y', label="clamped bspline")
+    plot(sorted(x), y, ':oy', label="control points")
+    legend()
+    #left, right = _derivs(t, bspline(y, t, clamp=False))
+    #print(left, (y[1] - y[0]) / (x[1] - x[0]))
+
+    subplot(212)
+    xt, yt = pbs(x, y, t, clamp=False)
+    plot(xt, yt, '-.b', label="unclamped pbs")  # pbs
+    xt, yt = pbs(x, y, t, clamp=True)
+    plot(xt, yt, '-b', label="clamped pbs")  # pbs
+    #xt,yt = pbs(x,y,t,clamp=True, parametric=True)
+    # plot(xt,yt,'-g') # pbs
+    plot(sorted(x), y, ':ob', label="control points")
+    legend()
+    show()
+
+
+def demo_interp():
+    # B-Spline control point inverse function is not yet implemented
+    from pylab import hold, linspace, plot, show
+    hold(True)
+    x = linspace(0, 1, 7)
+    y = [9, 11, 2, 3, 8, 0, 2]
+    t = linspace(0, 1, 400)
+    yc = bspline_control(y, clamp=True)
+    xc = linspace(x[0], x[-1], 9)
+    plot(xc, yc, ':oy', x, y, 'xg')
+    #knot = np.hstack((0, np.linspace(0,1,len(y)), 1))
+    #fy = _bspline3(knot,yc,t)
+    fy = bspline(yc, t, clamp=True)
+    plot(t, fy, '-.y')
+    show()
+
+if __name__ == "__main__":
+    # test()
+    demo()
+    # demo_interp()
+    # speed_check()
diff --git a/bumps/cheby.py b/bumps/cheby.py
new file mode 100644
index 0000000..25648a6
--- /dev/null
+++ b/bumps/cheby.py
@@ -0,0 +1,153 @@
+r"""
+Freeform modeling with Chebyshev polynomials.
+
+`Chebyshev polynomials <http://en.wikipedia.org/wiki/Chebyshev_polynomials>`_
+$T_k$ form a basis set for functions over $[-1,1]$.  The truncated
+interpolating polynomial $P_n$ is a weighted sum of Chebyshev polynomials
+up to degree $n$:
+
+.. math::
+
+    f(x) \approx P_n(x) = \sum_{k=0}^n c_i T_k(x)
+
+The interpolating polynomial exactly matches $f(x)$ at the chebyshev
+nodes $z_k$ and is near the optimal polynomial approximation to $f$
+of degree $n$ under the maximum norm.  For well behaved functions,
+the coefficients $c_k$ decrease rapidly, and furthermore are independent
+of the degree $n$ of the polynomial.
+
+The models can either be defined directly in terms of the Chebyshev
+coefficients $c_k$ with *method* = 'direct', or in terms of control
+points $(z_k, f(z_k))$ at the Chebyshev nodes :func:`cheby_points`
+with *method* = 'interp'.  Bounds on the parameters are easier to
+control using 'interp', but the function may oscillate wildly outside
+the bounds.  Bounds on the oscillation are easier to control using
+'direct', but the shape of the profile is difficult to control.
+"""
+# TODO: clipping volume fraction to [0,1] distorts parameter space
+# Option 0: clip to [0,1]
+# - Bayesian analysis: parameter values outside the domain will be equally
+#   probable out to infinity
+# - Newton methods: the fit space is flat outside the domain, which leads
+#   to a degenerate hessian.
+# - Direct methods: won't fail, but will be subject to random walk
+#   performance outside the domain.
+# - trivial to implement!
+# Option 1: compress (-inf,0.001] and [0.999,inf) into (0,0.001], [0.999,1)
+# - won't address any of the problems of clipping
+# Option 2: have chisq return inf for points outside the domain
+# - Bayesian analysis: correctly assigns probability zero
+# - Newton methods: degenerate Hessian outside domain
+# - Direct methods: random walk outside domain
+# - easy to implement
+# Option 3: clip outside domain but add penalty based on amount of clipping
+#   A profile based on clipping may have lower chisq than any profile that
+#   can be described by a valid model (e.g., by having a sharper transition
+#   than would be allowed by the model), leading to a minimum outside D.
+#   Adding a penalty constant outside D would help, but there is no constant
+#   that works everywhere.  We could use a constant greater than the worst
+#   chisq seen so far in D, which can guarantee an arbitrarily low P(x) and
+#   a global minimum within D, but for Newton methods, the boundary may still
+#   have spurious local minima and objective value now depends on history.
+#   Linear compression of profile to fit within the domain would avoid
+#   unreachable profile shapes (this is just a linear transform on chebyshev
+#   coefficients), and the addition of the penalty value would reduce
+#   parameter correlations that result from having transformed parameters
+#   resulting in identical profiles.  Returning T = ||A(x)|| from render,
+#   with A being a transform that brings the profile within [0,1], the
+#   objective function can return P'(x) = P(x)/(10*(1+sum(T_i)^4) for all
+#   slabs i, or P(x) if no slabs return a penalty value.  So long as T is
+#   monotonic with increasing badness, with value of 0 within D, and so long
+#   as no values of x outside D can generate models that cannot be
+#   expressed for any x within D, then any optimizer should return a valid
+#   result at the global minimum.  There may still be local minima outside
+#   the boundary, so information that the the value is outside the domain
+#   still needs to pass through a local optimizer to the fitting program.
+#   This approach could be used to transform a box constrained
+#   problem to an unconstrained problem using clipping+penalty on the
+#   parameter values and removing the need for constrained Newton optimizers.
+# - Bayesian analysis: parameters outside D have incorrect probability, but
+#   with a sufficiently large penalty, P(x) ~ 0; if the penalty value is
+#   too low, details of the correlations outside D may leak into D.
+# - Newton methods: Hessian should point back to domain
+# - Direct methods: random walk should be biased toward the domain
+# - moderately complicated
+__all__ = ["profile", "cheby_approx",
+           "cheby_val", "cheby_points", "cheby_coeff"]
+
+import numpy as np
+from numpy import real, exp, pi, cos, arange, asarray
+from numpy.fft import fft
+
+
+def profile(c, t, method):
+    r"""
+    Evaluate the chebyshev approximation c at points x.
+
+    If method is 'direct' then $c_i$ are the coefficients for the chebyshev
+    polynomials $T_i$ yielding $P = \sum_i{c_i T_i(x)}$.
+
+    If method is 'interp' then $c_i$ are the values of the interpolated
+    function $f$ evaluated at the chebyshev points returned by
+    :func:`cheby_points`.
+    """
+    if method == 'interp':
+        c = cheby_coeff(c)
+    return cheby_val(c, t)
+
+
+def cheby_approx(n, f, range=(0, 1)):
+    """
+    Return the coefficients for the order n chebyshev approximation to
+    function f evaluated over the range [low,high].
+    """
+    fx = f(cheby_points(n, range=range))
+    return cheby_coeff(fx)
+
+
+def cheby_val(c, x):
+    r"""
+    Evaluate the chebyshev approximation c at points x.
+
+    The values $c_i$ are the coefficients for the chebyshev
+    polynomials $T_i$ yielding $p(x) = \sum_i{c_i T_i(x)}$.
+    """
+    c = np.asarray(c)
+    if len(c) == 0:
+        return 0 * x
+
+    # Crenshaw recursion from numerical recipes sec. 5.8
+    y = 4 * x - 2
+    d = dd = 0
+    for c_j in c[:0:-1]:
+        d, dd = y * d + (c_j - dd), d
+    return y * (0.5 * d) + (0.5 * c[0] - dd)
+
+
+def cheby_points(n, range=(0, 1)):
+    r"""
+    Return the points in at which a function must be evaluated to
+    generate the order $n$ Chebyshev approximation function.
+
+    Over the range [-1,1], the points are $p_k = \cos(\pi(2 k + 1)/(2n))$.
+    Adjusting the range to $[x_L,x_R]$, the points become
+    $x_k = \frac{1}{2} (p_k - x_L + 1)/(x_R-x_L)$.
+    """
+    return 0.5 * (cos(pi * (arange(n) + 0.5) / n)
+                  - range[0] + 1) / (range[1] - range[0])
+
+
+def cheby_coeff(fx):
+    """
+    Compute chebyshev coefficients for a polynomial of order n given
+    the function evaluated at the chebyshev points for order n.
+
+    This can be used as the basis of a direct interpolation method where
+    the n control points are positioned at cheby_points(n).
+    """
+    fx = asarray(fx)
+    n = len(fx)
+    w = exp((-0.5j * pi / n) * arange(n))
+    y = np.hstack((fx[0::2], fx[1::2][::-1]))
+    c = (2. / n) * real(fft(y) * w)
+    return c
diff --git a/bumps/cli.py b/bumps/cli.py
new file mode 100644
index 0000000..df7c052
--- /dev/null
+++ b/bumps/cli.py
@@ -0,0 +1,574 @@
+"""
+Bumps command line interface.
+
+The functions in this module are used by the bumps command to implement
+the command line interface.  Bumps plugin models can use them to create
+stand alone applications with a similar interface.  For example, the
+Refl1D application uses the following::
+
+    from . import fitplugin
+    import bumps.cli
+    bumps.cli.set_mplconfig(appdatadir='Refl1D')
+    bumps.cli.install_plugin(fitplugin)
+    bumps.cli.main()
+
+After completing a set of fits on related systems, a post-analysis script
+can use :func:`load_model` to load the problem definition and
+:func:`load_best` to load the best value  found in the fit.  This can
+be used for example in experiment design, where you look at the expected
+parameter uncertainty when fitting simulated data from a range of experimental
+systems.
+"""
+from __future__ import with_statement, print_function
+
+__all__ = ["main", "install_plugin", "set_mplconfig", "config_matplotlib",
+           "load_model", "preview", "load_best", "save_best", "resynth"]
+
+import sys
+import os
+import re
+import warnings
+import traceback
+
+import shutil
+try:
+    import dill as pickle
+except ImportError:
+    import pickle
+
+import numpy as np
+# np.seterr(all="raise")
+
+from . import fitters
+from .fitters import FitDriver, StepMonitor, ConsoleMonitor, nllf_scale
+from .mapper import MPMapper, AMQPMapper, MPIMapper, SerialMapper
+from .formatnum import format_uncertainty
+from . import util
+from . import initpop
+from . import __version__
+from . import plugin
+from . import options
+
+from .util import pushdir
+
+
+def install_plugin(p):
+    """
+    Replace symbols in :mod:`bumps.plugin` with application specific
+    methods.
+    """
+    for symbol in plugin.__all__:
+        if hasattr(p, symbol):
+            setattr(plugin, symbol, getattr(p, symbol))
+
+
+def load_model(path, model_options=None):
+    """
+    Load a model file.
+
+    *path* contains the path to the model file.
+
+    *model_options* are any additional arguments to the model.  The sys.argv
+    variable will be set such that *sys.argv[1:] == model_options*.
+    """
+    from .fitproblem import load_problem
+
+    # Change to the target path before loading model so that data files
+    # can be given as relative paths in the model file.  This should also
+    # allow imports as expected from the model file.
+    directory, filename = os.path.split(path)
+    with pushdir(directory):
+        # Try a specialized model loader
+        problem = plugin.load_model(filename)
+        if problem is None:
+            # print "loading",filename,"from",directory
+            if filename.endswith('pickle'):
+                # First see if it is a pickle
+                problem = pickle.load(open(filename, 'rb'))
+            else:
+                # Then see if it is a python model script
+                problem = load_problem(filename, options=model_options)
+
+    # Guard against the user changing parameters after defining the problem.
+    problem.model_reset()
+    problem.path = os.path.abspath(path)
+    if not hasattr(problem, 'title'):
+        problem.title = filename
+    problem.name, _ = os.path.splitext(filename)
+    problem.options = model_options
+    return problem
+
+
+def preview(problem, view=None):
+    """
+    Show the problem plots and parameters.
+    """
+    import pylab
+    problem.show()
+    problem.plot(view=view)
+    pylab.show()
+
+
+def save_best(fitdriver, problem, best, view=None):
+    """
+    Save the fit data, including parameter values, uncertainties and plots.
+
+    *fitdriver* is the fitter that was used to drive the fit.
+
+    *problem* is a FitProblem instance.
+
+    *best* is the parameter set to save.
+    """
+    # Make sure the problem contains the best value
+    # TODO: avoid recalculating if problem is already at best.
+    problem.setp(best)
+    # print "remembering best"
+    pardata = "".join("%s %.15g\n" % (name, value)
+                      for name, value in zip(problem.labels(), problem.getp()))
+    open(problem.output_path + ".par", 'wt').write(pardata)
+
+    fitdriver.save(problem.output_path)
+    with util.redirect_console(problem.output_path + ".err"):
+        fitdriver.show()
+        fitdriver.plot(output_path=problem.output_path, view=view)
+    fitdriver.show()
+    # print "plotting"
+
+
+PARS_PATTERN = re.compile(r"^(?P<label>.*) (?P<value>[^ ]*)\n$")
+def load_best(problem, path):
+    """
+    Load parameter values from a file.
+    """
+    #targets = dict(zip(problem.labels(), problem.getp()))
+    targets = dict((name, np.NaN) for name in problem.labels())
+    with open(path, 'rt') as fid:
+        for line in fid:
+            m = PARS_PATTERN.match(line)
+            label, value = m.group('label'), float(m.group('value'))
+            if label in targets:
+                targets[label] = value
+    values = [targets[label] for label in problem.labels()]
+    problem.setp(np.asarray(values))
+#CRUFT
+recall_best = load_best
+
+
+def store_overwrite_query_gui(path):
+    """
+    Ask if store path should be overwritten.
+
+    Use this in a call to :func:`make_store` from a graphical user interface.
+    """
+    import wx
+    msg_dlg = wx.MessageDialog(None, path + " already exists. Press 'yes' to overwrite, or 'No' to abort and restart with newpath", 'Overwrite Directory',
+                               wx.YES_NO | wx.ICON_QUESTION)
+    retCode = msg_dlg.ShowModal()
+    msg_dlg.Destroy()
+    if retCode != wx.ID_YES:
+        raise RuntimeError("Could not create path")
+
+
+def store_overwrite_query(path):
+    """
+    Ask if store path should be overwritten.
+
+    Use this in a call to :func:`make_store` from a command line interface.
+    """
+    print(path, "already exists.")
+    print(
+        "Press 'y' to overwrite, or 'n' to abort and restart with --store=newpath")
+    ans = input("Overwrite [y/n]? ")
+    if ans not in ("y", "Y", "yes"):
+        sys.exit(1)
+
+
+def make_store(problem, opts, exists_handler):
+    """
+    Create the store directory and populate it with the model definition file.
+    """
+    # Determine if command line override
+    if opts.store:
+        problem.store = opts.store
+    problem.output_path = os.path.join(problem.store, problem.name)
+
+    # Check if already exists
+    if not opts.overwrite and os.path.exists(problem.output_path + '.out'):
+        if opts.batch:
+            print(
+                problem.store + " already exists.  Use --overwrite to replace.", file=sys.stderr)
+            sys.exit(1)
+        exists_handler(problem.output_path)
+
+    # Create it and copy model
+    if not os.path.exists(problem.store):
+        os.mkdir(problem.store)
+    shutil.copy2(problem.path, problem.store)
+
+    # Redirect sys.stdout to capture progress
+    if opts.batch:
+        sys.stdout = open(problem.output_path + ".mon", "w")
+
+def run_profiler(problem, steps):
+    """
+    Model execution profiler.
+
+    Run the program with "--profiler --steps=N" to generate a function
+    profile chart breaking down the cost of evaluating N models.
+    """
+    # Here is the findings from one profiling session::
+    #   23 ms total
+    #    6 ms rendering model
+    #    8 ms abeles
+    #    4 ms convolution
+    #    1 ms setting parameters and computing nllf
+    from .util import profile
+    p = initpop.random_init(int(steps), None, problem)
+    # Note: map is an iterator in python 3
+    profile(lambda *args: list(map(*args)), problem.nllf, p)
+
+
+def run_timer(mapper, problem, steps):
+    """
+    Model execution timer.
+
+    Run the program with "--timer --steps=N" to determine the average
+    run time of the model.  If --parallel is included, then the model
+    will be run in parallel on separate cores.
+    """
+    import time
+    T0 = time.time()
+    p = initpop.random_init(int(steps), None, problem)
+    mapper(p)
+    print("time per model eval: %g ms" % (1000 * (time.time() - T0) / steps,))
+
+
+def start_remote_fit(problem, options, queue, notify):
+    """
+    Queue remote fit.
+    """
+    from jobqueue.client import connect
+
+    data = dict(package='bumps',
+                version=__version__,
+                problem=pickle.dumps(problem),
+                options=pickle.dumps(options))
+    request = dict(service='fitter',
+                   version=__version__,  # fitter service version
+                   notify=notify,
+                   name=problem.title,
+                   data=data)
+
+    server = connect(queue)
+    job = server.submit(request)
+    return job
+
+# ==== Main ====
+
+
+def initial_model(opts):
+    """
+    Load and initialize the model.
+
+    *opts* are the processed command line options.
+
+    If --pars is in opts, then load the parameters from a .par file.
+
+    If --simulate is in opts, then generate random data from the model.
+
+    If --simrandom is in opts, then generate random data from a random model.
+
+    If --shake is in opts, then use a random initial state for the fit.
+    """
+    if opts.seed is not None:
+        np.random.seed(opts.seed)
+
+    if opts.args:
+        problem = load_model(opts.args[0],opts.args[1:])
+        if opts.pars is not None:
+            load_best(problem, opts.pars)
+        if opts.simrandom:
+            problem.randomize()
+        if opts.simulate or opts.simrandom:
+            noise = None if opts.noise == "data" else float(opts.noise)
+            problem.simulate_data(noise=noise)
+            print("simulation parameters")
+            print(problem.summarize())
+            print("chisq at simulation", problem.chisq())
+        if opts.shake:
+            problem.randomize()
+    else:
+        problem = None
+    return problem
+
+
+def resynth(fitdriver, problem, mapper, opts):
+    """
+    Generate maximum likelihood fits to resynthesized data sets.
+
+    *fitdriver* is a :class:`bumps.fitters.FitDriver` object with a fitter
+    already chosen.
+
+    *problem* is a :func:`bumps.fitproblem.FitProblem` object.  It should
+    be initialized with optimal values for the parameters.
+
+    *mapper* is one of the available :mod:`bumps.mapper` classes.
+
+    *opts* is a :class:`bumps.cli.BumpsOpts` object representing the command
+    line parameters.
+    """
+    make_store(problem, opts, exists_handler=store_overwrite_query)
+    fid = open(problem.output_path + ".rsy", 'at')
+    fitdriver.mapper = mapper.start_mapper(problem, opts.args)
+    for i in range(opts.resynth):
+        problem.resynth_data()
+        best, fbest = fitdriver.fit()
+        scale, err = nllf_scale(problem)
+        print("step %d chisq %g" % (i, scale * fbest))
+        fid.write('%.15g ' % (scale * fbest))
+        fid.write(' '.join('%.15g' % v for v in best))
+        fid.write('\n')
+    problem.restore_data()
+    fid.close()
+
+
+def set_mplconfig(appdatadir):
+    r"""
+    Point the matplotlib config dir to %LOCALAPPDATA%\{appdatadir}\mplconfig.
+    """
+    if hasattr(sys, 'frozen'):
+        if os.name == 'nt':
+            mplconfigdir = os.path.join(
+                os.environ['LOCALAPPDATA'], appdatadir, 'mplconfig')
+        elif sys.platform == 'darwin':
+            mplconfigdir = os.path.join(
+                os.path.expanduser('~/Library/Caches'), appdatadir, 'mplconfig')
+        else:
+            return  # do nothing on linux
+        mplconfigdir = os.environ.setdefault('MPLCONFIGDIR', mplconfigdir)
+        if not os.path.exists(mplconfigdir):
+            os.makedirs(mplconfigdir)
+
+
+def config_matplotlib(backend=None):
+    """
+    Setup matplotlib to use a particular backend.
+
+    The backend should be 'WXAgg' for interactive use, or 'Agg' for batch.
+    This distinction allows us to run in environments such as cluster computers
+    which do not have wx installed on the compute nodes.
+
+    This function must be called before any imports to pylab.  To allow
+    this, modules should not import pylab at the module level, but instead
+    import it for each function/method that uses it.  Exceptions can be made
+    for modules which are completely dedicated to plotting, but these modules
+    should never be imported at the module level.
+    """
+
+    # When running from a frozen environment created by py2exe, we will not
+    # have a range of backends available, and must set the default to WXAgg.
+    # With a full matplotlib distribution we can use whatever the user prefers.
+    if hasattr(sys, 'frozen'):
+        if 'MPLCONFIGDIR' not in os.environ:
+            raise RuntimeError(
+                "MPLCONFIGDIR should be set to e.g., %LOCALAPPDATA%\YourApp\mplconfig")
+        if backend is None:
+            backend = 'WXAgg'
+
+    import matplotlib
+
+    # Specify the backend to use for plotting and import backend dependent
+    # classes. Note that this must be done before importing pyplot to have an
+    # effect.  If no backend is given, let pyplot use the default.
+    if backend is not None:
+        matplotlib.use(backend)
+
+    # Disable interactive mode so that plots are only updated on show() or
+    # draw(). Note that the interactive function must be called before
+    # selecting a backend or importing pyplot, otherwise it will have no
+    # effect.
+
+    matplotlib.interactive(False)
+
+
+def beep():
+    """
+    Audio signal that fit is complete.
+    """
+    if sys.platform == "win32":
+        import winsound
+        winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
+    else:
+        print("\a", file=sys.__stdout__)
+
+
+def run_command(c):
+    """
+    Run an arbitrary python command.
+    """
+    exec(c, globals())
+
+
+def setup_logging():
+    import logging
+    logging.basicConfig(level=logging.INFO)
+
+# from http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
+# answered by mgab (2014-03-13)
+# edited by Gareth Rees (2015-11-28)
+def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
+    traceback.print_stack()
+    log = file if hasattr(file, 'write') else sys.stderr
+    log.write(warnings.formatwarning(message, category, filename, lineno, line))
+
+def main():
+    """
+    Run the bumps program with the command line interface.
+
+    Input parameters are taken from sys.argv.
+    """
+    # add full traceback to warnings
+    #warnings.showwarning = warn_with_traceback
+
+    if len(sys.argv) == 1:
+        sys.argv.append("-?")
+        print("\nNo modelfile parameter was specified.\n")
+
+    # run command with bumps in the environment
+    if sys.argv[1] == '-m':
+        import runpy
+        sys.argv = sys.argv[2:]
+        runpy.run_module(sys.argv[0], run_name="__main__")
+        sys.exit()
+    elif sys.argv[1] == '-p':
+        import runpy
+        sys.argv = sys.argv[2:]
+        runpy.run_path(sys.argv[0], run_name="__main__")
+        sys.exit()
+    elif sys.argv[1] == '-c':
+        run_command(sys.argv[2])
+        sys.exit()
+    elif sys.argv[1] == '-i':
+        sys.argv = ["ipython", "--pylab"]
+        from IPython import start_ipython
+        sys.exit(start_ipython())
+
+    opts = options.getopts()
+    setup_logging()
+
+    if opts.edit:
+        from .gui.gui_app import main as gui
+        gui()
+        return
+
+    # Set up the matplotlib backend to minimize the wx/gui dependency.
+    # If no GUI specified and not editing, then use the default mpl
+    # backend for the python version.
+    if opts.batch or opts.remote or opts.noshow:  # no interactivity
+        config_matplotlib(backend='Agg')
+    else:  # let preview use default graphs
+        config_matplotlib()
+
+    problem = initial_model(opts)
+
+    # TODO: AMQP mapper as implemented requires workers started up with
+    # the particular problem; need to be able to transport the problem
+    # to the worker instead.  Until that happens, the GUI shouldn't use
+    # the AMQP mapper.
+    if opts.mpi:
+        MPIMapper.start_worker(problem)
+        mapper = MPIMapper
+    elif opts.parallel or opts.worker:
+        if opts.transport == 'amqp':
+            mapper = AMQPMapper
+        elif opts.transport == 'mp':
+            mapper = MPMapper
+        elif opts.transport == 'celery':
+            mapper = CeleryMapper
+        else:
+            raise ValueError("unknown mapper")
+    else:
+        mapper = SerialMapper
+    if opts.worker:
+        mapper.start_worker(problem)
+        return
+
+    if np.isfinite(float(opts.time)):
+        import time
+        start_time = time.time()
+        stop_time = start_time + float(opts.time)*3600
+        abort_test=lambda: time.time() >= stop_time
+    else:
+        abort_test=lambda: False
+
+    fitdriver = FitDriver(
+        opts.fit_config.selected_fitter, problem=problem, abort_test=abort_test,
+        **opts.fit_config.selected_values)
+
+    if opts.time_model:
+        run_timer(mapper.start_mapper(problem, opts.args),
+                  problem, steps=int(opts.steps))
+    elif opts.profile:
+        run_profiler(problem, steps=int(opts.steps))
+    elif opts.chisq:
+        if opts.cov:
+            print(problem.cov())
+        print("chisq", problem.chisq_str())
+    elif opts.preview:
+        if opts.cov:
+            print(problem.cov())
+        preview(problem, view=opts.view)
+    elif opts.resynth > 0:
+        resynth(fitdriver, problem, mapper, opts)
+
+    elif opts.remote:
+
+        # Check that problem runs before submitting it remotely
+        chisq = problem()
+        print("initial chisq:", chisq)
+        job = start_remote_fit(problem, opts,
+                               queue=opts.queue, notify=opts.notify)
+        print("remote job:", job['id'])
+
+    else:
+        if opts.resume:
+            resume_path = os.path.join(opts.resume, problem.name)
+        else:
+            resume_path = None
+
+        make_store(problem, opts, exists_handler=store_overwrite_query)
+
+        # Show command line arguments and initial model
+        print("#", " ".join(sys.argv))
+        problem.show()
+        if opts.stepmon:
+            fid = open(problem.output_path + '.log', 'w')
+            fitdriver.monitors = [ConsoleMonitor(problem),
+                                  StepMonitor(problem, fid, fields=['step', 'value'])]
+
+        #import time; t0=time.clock()
+        fitdriver.mapper = mapper.start_mapper(problem, opts.args)
+        best, fbest = fitdriver.fit(resume=resume_path)
+        # print("time=%g"%(time.clock()-t0),file=sys.__stdout__)
+        save_best(fitdriver, problem, best, view=opts.view)
+        if opts.err or opts.cov:
+            fitdriver.show_err()
+        if opts.cov:
+            np.set_printoptions(linewidth=1000000)
+            print("=== Covariance matrix ===")
+            print(problem.cov())
+            print("=========================")
+        if opts.entropy:
+            print("Calculating entropy...")
+            S, dS = fitdriver.entropy()
+            print("Entropy: %s bits" % format_uncertainty(S, dS))
+        mapper.stop_mapper(fitdriver.mapper)
+        if not opts.batch and not opts.mpi and not opts.noshow:
+            beep()
+            import pylab
+            pylab.show()
+
+
+# Allow  "$python -m bumps.cli args" calling pattern
+if __name__ == "__main__":
+    main()
diff --git a/bumps/curve.py b/bumps/curve.py
new file mode 100644
index 0000000..7904a3b
--- /dev/null
+++ b/bumps/curve.py
@@ -0,0 +1,296 @@
+"""
+Build a bumps model from a function and data.
+
+Example
+-------
+
+Given a function *sin_model* which computes a sine wave at times *t*::
+
+    from numpy import sin
+    def sin_model(t, freq, phase):
+        return sin(2*pi*(freq*t + phase))
+
+and given data *(y,dy)* measured at times *t*, we can define the fit
+problem as follows::
+
+    from bumps.names import *
+    M = Curve(sin_model, t, y, dy, freq=20)
+
+The *freq* and *phase* keywords are optional initial values for the model
+parameters which otherwise default to zero.  The model parameters can be
+accessed as attributes on the model to set fit range::
+
+    M.freq.range(2, 100)
+    M.phase.range(0, 1)
+
+As usual, you can initialize or assign parameter expressions to the the
+parameters if you want to tie parameters together within or between models.
+
+Note: there is sometimes difficulty getting bumps to recognize the function
+during fits, which can be addressed by putting the definition in a separate
+file on the python path.  With the windows binary distribution of bumps,
+this can be done in the problem definition file with the following code::
+
+    import os
+    from bumps.names import *
+    sys.path.insert(0, os.getcwd())
+
+The model function can then be imported from the external module as usual::
+
+    from sin_model import sin_model
+"""
+__all__ = ["Curve", "PoissonCurve", "plot_err"]
+
+import inspect
+
+import numpy as np
+from numpy import log, pi, sqrt
+
+from .parameter import Parameter
+
+
+class Curve(object):
+    """
+    Model a measurement with a user defined function.
+
+    The function *fn(x,p1,p2,...)* should return the expected value *y* for
+    each point *x* given the parameters *p1*, *p2*, etc.  *dy* is the uncertainty
+    for each measured value *y*.  If not specified, it defaults to 1.
+    Initial values for the parameters can be set as *p=value* arguments to *Curve*.
+    If no value is set, then the initial value will be taken from the default
+    value given in the definition of *fn*, or set to 0 if the parameter is not
+    defined with an initial value.  Arbitrary non-fittable data can be passed
+    to the function as parameters, but only if the parameter is given a default
+    value of *None* in the function definition, and has the initial value set
+    as an argument to *Curve*.  Defining *state=dict(key=value, ...)* before
+    *Curve*, and calling *Curve* as *Curve(..., \*\*state)* works pretty well.
+
+    *Curve* takes two special keyword arguments: *name* and *plot*.
+    *name* is added to each parameter name when the parameter is defined.
+    The filename for the data is a good choice, since this allows you to keep
+    the parameters straight when fitting multiple datasets simultaneously.
+
+    Plotting defaults to a 1-D plot with error bars for the data, and a line
+    for the function value.  You can assign your own plot function with
+    the *plot* keyword.  The function should be defined as *plot(x,y,dy,fy,\*\*kw)*.
+    The keyword arguments will be filled with the values of the parameters
+    used to compute *fy*.  It will be easiest to list the parameters you
+    need to make your plot as positional arguments after *x,y,dy,fy* in the
+    plot function declaration.  For example, *plot(x,y,dy,fy,p3,\*\*kw)*
+    will make the value of parameter *p3* available as a variable in your
+    function.  The special keyword *view* will be a string containing
+    *linear*, *log*, *logx* or *loglog*.
+
+    The data uncertainty is assumed to follow a gaussian distribution.
+    If measurements draw from some other uncertainty distribution, then
+    subclass Curve and replace nllf with the correct probability given the
+    residuals.  See the implementation of :class:`PoissonCurve` for an example.
+    """
+    def __init__(self, fn, x, y, dy=None, name="", plot=None, **fnkw):
+        self.x, self.y = np.asarray(x), np.asarray(y)
+        if dy is None:
+            self.dy = 1
+        else:
+            self.dy = np.asarray(dy)
+            if (self.dy <= 0).any():
+                raise ValueError("measurement uncertainty must be positive")
+
+        self.fn = fn
+        self.name = name # if name else fn.__name__ + " "
+
+        # Make every name a parameter; initialize the parameters
+        # with the default value if function is defined with keyword
+        # initializers; override the initializers with any keyword
+        # arguments specified in the fit function constructor.
+        pnames, vararg, varkw, pvalues = inspect.getargspec(fn)
+        if vararg or varkw:
+            raise TypeError(
+                "Function cannot have *args or **kwargs in declaration")
+
+        # TODO: need "self" handling for passed methods
+        # assume the first argument is x
+        pnames = pnames[1:]
+
+        # Parameters default to zero
+        init = dict((p, 0) for p in pnames)
+        # If the function provides default values, use those
+        if pvalues:
+            # ignore default value for "x" parameter
+            if len(pvalues) > len(pnames):
+                pvalues = pvalues[1:]
+            init.update(zip(pnames[-len(pvalues):], pvalues))
+        # Non-fittable parameters need to be sent in as None
+        state_vars = set(p for p,v in init.items() if v is None)
+        # Regardless, use any values specified in the constructor, but first
+        # check that they exist as function parameters.
+        invalid = set(fnkw.keys()) - set(pnames)
+        if invalid:
+            raise TypeError("Invalid initializers: %s" %
+                            ", ".join(sorted(invalid)))
+        init.update(fnkw)
+
+        # Build parameters out of ranges and initial values
+        # maybe:  name=(p+name if name.startswith('_') else name+p)
+        pars = dict((p, Parameter.default(init[p], name=name + p))
+                    for p in pnames if p not in state_vars)
+
+        # Make parameters accessible as model attributes
+        for k, v in pars.items():
+            if hasattr(self, k):
+                raise TypeError("Parameter cannot be named %s" % k)
+            setattr(self, k, v)
+
+        # Remember the function, parameters, and number of parameters
+        self._function = fn
+        self._pnames = [p for p in pnames if not (p in state_vars)]
+        self._cached_theory = None
+        self._plot = plot if plot is not None else plot_err
+        self._state = dict((p,v) for p,v in init.items() if p in state_vars)
+
+    def update(self):
+        self._cached_theory = None
+
+    def parameters(self):
+        return dict((p, getattr(self, p)) for p in self._pnames)
+
+    def numpoints(self):
+        return np.prod(self.y.shape)
+
+    def theory(self, x=None):
+        if self._cached_theory is None:
+            if x is None:
+                x = self.x
+            kw = dict((p, getattr(self, p).value) for p in self._pnames)
+            kw.update(self._state)
+            self._cached_theory = self._function(x, **kw)
+        return self._cached_theory
+
+    def simulate_data(self, noise=None):
+        theory = self.theory()
+        if noise is not None:
+            if noise == 'data':
+                pass
+            elif noise < 0:
+                self.dy = -theory*noise*0.01
+            else:
+                self.dy = noise
+        self.y = theory + np.random.randn(*theory.shape)*self.dy
+
+    def residuals(self):
+        return (self.theory() - self.y) / self.dy
+
+    def nllf(self):
+        r = self.residuals()
+        return 0.5 * np.sum(r ** 2)
+
+    def save(self, basename):
+        # TODO: need header line with state vars as json
+        # TODO: need to support nD x,y,dy
+        data = np.vstack((self.x, self.y, self.dy, self.theory()))
+        np.savetxt(basename + '.dat', data.T)
+
+    def plot(self, view=None):
+        import pylab
+        kw = dict((p, getattr(self, p).value) for p in self._pnames)
+        kw.update(self._state)
+        #print "kw_plot",kw
+        if view == 'residual':
+            plot_resid(self.x, self.residuals())
+        else:
+            plot_ratio = 4
+            h = pylab.subplot2grid((plot_ratio,1), (0,0), rowspan=plot_ratio-1)
+            self._plot(self.x, self.y, self.dy, self.theory(), view=view, **kw)
+            for tick_label in pylab.gca().get_xticklabels():
+                tick_label.set_visible(False)
+            #pylab.gca().xaxis.set_visible(False)
+            #pylab.gca().spines['bottom'].set_visible(False)
+            #pylab.gca().set_xticks([])
+            pylab.subplot2grid((plot_ratio,1), (plot_ratio-1,0), sharex=h)
+            plot_resid(self.x, self.residuals())
+
+def plot_resid(x, resid):
+    import pylab
+    pylab.plot(x, resid, '.')
+    pylab.gca().locator_params(axis='y', tight=True, nbins=4)
+    pylab.axhline(y=1, hold=True, ls='dotted')
+    pylab.axhline(y=-1, hold=True, ls='dotted')
+    pylab.ylabel("Residuals")
+
+def plot_err(x, y, dy, fy, view=None, **kw):
+    """
+    Plot data *y* and error *dy* against *x*.
+
+    *view* is one of linear, log, logx or loglog.
+    """
+    import pylab
+    pylab.errorbar(x, y, yerr=dy, fmt='.')
+    pylab.plot(x, fy, '-', hold=True)
+    if view == 'log':
+        pylab.xscale('linear')
+        pylab.yscale('log')
+    elif view == 'logx':
+        pylab.xscale('log')
+        pylab.yscale('linear')
+    elif view == 'loglog':
+        pylab.xscale('log')
+        pylab.yscale('log')
+    else: # view == 'linear'
+        pylab.xscale('linear')
+        pylab.yscale('linear')
+
+_LOGFACTORIAL = np.array([log(np.prod(np.arange(1., k + 1)))
+                             for k in range(21)])
+
+
+def logfactorial(n):
+    """Compute the log factorial for each element of an array"""
+    result = np.empty(n.shape, dtype='double')
+    idx = (n <= 20)
+    result[idx] = _LOGFACTORIAL[np.asarray(n[idx], 'int32')]
+    n = n[~idx]
+    result[~idx] = n * \
+        log(n) - n + log(n * (1 + 4 * n * (1 + 2 * n))) / 6 + log(pi) / 2
+    return result
+
+
+class PoissonCurve(Curve):
+    r"""
+    Model a measurement with Poisson uncertainty.
+
+    The nllf is calculated using Poisson probabilities, but the curve itself
+    is displayed using the approximation that $\sigma_y \approx \sqrt(y)$.
+
+    See :class:`Curve` for details.
+    """
+    def __init__(self, fn, x, y, name="", **fnkw):
+        dy = sqrt(y) + (y==0) if y is not None else None
+        Curve.__init__(self, fn, x, y, dy, name=name, **fnkw)
+        self._logfacty = logfactorial(y) if y is not None else None
+        self._logfactysum = np.sum(self._logfacty)
+
+    ## Assume gaussian residuals for now
+    #def residuals(self):
+    #    # TODO: provide individual probabilities as residuals
+    #    # or perhaps the square roots --- whatever gives a better feel for
+    #    # which points are driving the fit
+    #    theory = self.theory()
+    #    return np.sqrt(self.y * log(theory) - theory - self._logfacty)
+
+    def nllf(self):
+        theory = self.theory()
+        if (theory <= 0).any():
+            return 1e308
+        return -sum(self.y * log(theory) - theory) + self._logfactysum
+
+    def simulate_data(self, noise=None):
+        theory = self.theory()
+        self.y = np.random.poisson(theory)
+        self.dy = sqrt(self.y) + (self.y==0)
+        self._logfacty = logfactorial(self.y)
+        self._logfactysum = np.sum(self._logfacty)
+
+    def save(self, basename):
+        # TODO: need header line with state vars as json
+        # TODO: need to support nD x,y,dy
+        data = np.vstack((self.x, self.y, self.theory()))
+        np.savetxt(basename + '.dat', data.T)
diff --git a/bumps/data.py b/bumps/data.py
new file mode 100644
index 0000000..8b41cee
--- /dev/null
+++ b/bumps/data.py
@@ -0,0 +1,226 @@
+"""
+Data handling utilities.
+"""
+from __future__ import division
+
+from contextlib import contextmanager
+
+import numpy as np
+from numpy import inf, nan
+
+__all__ = ["indfloat", "parse_file"]
+
+
+def parse_multi(file, keysep=None, sep=None, comment='#'):
+    """
+    Parse a multi-part file.
+
+    Return a list of (header, data) pairs, where header is a key: value
+    dictionary and data is a numpy array.
+
+    The header section is list of key value pairs, with the *comment* character
+    at the start of each line.  Key and value will be separated by *keysep*,
+    or by spaces if *keysep = None*.  The data section is a sequence of
+    floating point numbers separated by *sep*, or by spaces if *sep* is None.
+    inf and nan are parsed as inf and nan.  Comments at the end of the data
+    line will be ignored.  Data points can be commented out by including
+    a comment character at the start of the data line, assuming the next
+    character is a digit, plus, or decimal separator.
+
+    Quotes around keys are removed, but not around values.  Use
+    :func:`strip_quotes` to remove them if they are present.  This is different
+    from the :func:`parse_file` interface, which strips quotes around values.
+    The new interface allows *json.loads()* calls on values if values are
+    stored as *key: json.dumps(value)*.
+
+    Special hack for binned data: if the first column contains bin edges, then
+    the last row will only have the bin edge.  To make the array square,
+    we replace the bin edges with bin centers.  The original bins can be
+    found in the header using the 'bins' key (unless that key already exists
+    in the header, in which case the key will be ignored).
+    """
+    parts = []
+    with maybe_open(file) as fh:
+        while True:
+            header, data, bins = _read_part(fh, comment=comment, multi_part=True,
+                                            col_sep=sep, key_sep=keysep)
+            if header is None:
+                break
+            if bins is not None:
+                header.setdefault('bins', bins)
+            parts.append((header, data))
+    return parts
+
+def parse_file(file, keysep=None, sep=None, comment='#'):
+    """
+    Parse a file into a header and data.
+
+    Return a (header, data) pair, where header is a key: value
+    dictionary and data is a numpy array.
+
+    The header section is list of key value pairs, with the *comment* character
+    at the start of each line.  Key and value will be separated by *keysep*,
+    or by spaces if *keysep = None*.  The data section is a sequence of
+    floating point numbers separated by *sep*, or by spaces if *sep* is None.
+    inf and nan are parsed as inf and nan.  Comments at the end of the data
+    line will be ignored.  Data points can be commented out by including
+    a comment character at the start of the data line, assuming the next
+    character is a digit, plus, or decimal separator.
+
+    Quotes around keys are removed.  For compatibility with the old interface,
+    quotes around values are removed as well.
+
+    Special hack for binned data: if the first column contains bin edges, then
+    the last row will only have the bin edge.  To make the array square,
+    we replace the bin edges with bin centers.  The original bins can be
+    found in the header using the 'bins' key (unless that key already exists
+    in the header, in which case the key will be ignored).
+    """
+    with maybe_open(file) as fh:
+         header, data, bins = _read_part(fh, comment=comment, multi_part=False,
+                                         col_sep=sep, key_sep=keysep)
+    if header is None:
+        raise IOError("data file is empty")
+    # compatibility: strip quotes from values in key-value pairs
+    header = dict((k, strip_quotes(v)) for k, v in header.items())
+    if bins is not None:
+        header.setdefault('bins', bins)
+    return header, data
+
+def _read_part(fh, key_sep=None, col_sep=None, comment="#", multi_part=False):
+    header = {}
+    data = []
+    iseof = True
+    for line in fh:
+        # Blank lines indicate a section break.
+        if not line.strip():
+            # Skip blank lines if we are parsing the data as a single part file
+            if not multi_part: continue
+            # If we are at the beginning of a section, then iseof is True and
+            # continuing to the next loop iteration will skip them. If we have
+            # already consumed some non-blank lines, then iseof will be false,
+            # and we need to break this section of the data.  If we have blank
+            # lines at the end of the file, we will never set iseof to False
+            # and they will be ignored.
+            if iseof: continue
+            break
+
+        # Line is not blank, so process it.
+        columns, key, value = _parse_line(line, comment=comment,
+                                          col_sep=col_sep, key_sep=key_sep)
+        if columns:
+            data.append([indfloat(v) for v in columns])
+        if key is not None:
+            if key in header:
+                header[key] = "\n".join((header[key], value))
+            else:
+                header[key] = value
+
+        # We have processed some data, so
+        iseof = False
+
+    if iseof:
+        return None, None, None
+
+    # print data
+    # print "\n".join(k+":"+v for k,v in header.items())
+    if len(data) and len(data[-1]) == 1:
+        # For TOF data, the first column is the bin edge, which has one
+        # more row than the remaining columns; fill those columns with
+        # bin centers instead
+        last_edge = data[-1][0]
+        data = np.array(data[:-1]).T
+        edges = np.hstack((data[0],last_edge))
+        data[0] = 0.5*(edges[:-1] + edges[1:])
+        bins = edges
+    else:
+        data = np.array(data).T
+        bins = None
+
+    return header, data, bins
+
+ at contextmanager
+def maybe_open(file_or_path):
+    if hasattr(file_or_path, 'readline'):
+        fh = file
+    elif not string_like(file_or_path):
+        raise ValueError('file must be a name or a file handle')
+    elif file_or_path.endswith('.gz'):
+        import gzip
+        fh = gzip.open(file_or_path)
+    else:
+        fh = open(file_or_path)
+    yield fh
+    if fh is not file_or_path:
+        fh.close()
+
+def string_like(s):
+    """
+    Return True if s operates like a string.
+    """
+    try:
+        s + ''
+    except Exception:
+        return False
+    return True
+
+
+def _parse_line(line, key_sep=None, col_sep=None, comment='#'):
+    # Find location of the comment character on the line
+    idx = line.find(comment)
+
+    # If the line does not contain a comment character or if the comment
+    # character is not in the first column, then this is a data line which
+    # should be returned as a sequence of text columns separated by spaces.
+    # The caller can turn the columns into numbers or leave them as strings.
+    # Data on the line after the comment character is ignored.
+    # TODO: allow quoted strings or backslash escaped spaces for text columns
+    if idx != 0:
+        if idx > 0:
+            return line[:idx].split(col_sep), None, ''
+        else:
+            return line.split(col_sep), None, ''
+
+    # Split line on key separator
+    parts = [p.strip() for p in line[1:].split(key_sep, 1)]
+    key, value = parts if len(parts) > 1 else (parts[0], '')
+    key = strip_quotes(key)
+
+    # If key is a number assume it is simply a commented out data point
+    if len(key) and (key[0] in '.-+0123456789' or key=='inf' or key=='nan'):
+        return [], None, None
+
+    return [], key, value
+
+
+def strip_quotes(s):
+    return s[1:-1] if len(s) and s[0] in "'\"" and s[0] == s[-1] else s
+
+INF_VALUES = set(('inf', '1/0', '1.#inf', 'infinity'))
+NAN_VALUES = set(('nan', '0/0', '1.#qnan', 'na', 'n/a'))
+def indfloat(s):
+    """
+    Convert string to float, with support for inf and nan.
+
+    Example::
+
+        >>> from numpy import isinf, isnan
+        >>> print(isinf(indfloat('inf')))
+        True
+        >>> print(isinf(indfloat('-inf')))
+        True
+        >>> print(isnan(indfloat('nan')))
+        True
+    """
+    try:
+        return float(s)
+    except Exception:
+        s = s.lower()
+        if s in INF_VALUES:
+            return inf
+        elif s and s[0]=='-' and s[1:] in INF_VALUES:
+            return -inf
+        elif s in NAN_VALUES:
+            return nan
+        raise
+
diff --git a/bumps/dream/__init__.py b/bumps/dream/__init__.py
new file mode 100644
index 0000000..518ab91
--- /dev/null
+++ b/bumps/dream/__init__.py
@@ -0,0 +1,25 @@
+"""
+
+Notes on random numbers
+=======================
+
+Uses dream.util.rng as the random number generator.
+
+You can set the seed using::
+
+    dream.util.rng = numpy.random.RandomState(seed)
+
+This interface doesn't feel right, since one instance of DREAM may
+influence another if they are running within one another.  Putting
+the rng on the dream class may be a better option.
+"""
+
+from .model import MCMCModel
+from .core import Dream
+#from .core import dream
+#from .initpop import *  # cov_init, lhs_init
+#from .model import *    #
+#from .state import *    # load_state, save_state
+#from .views import *    # plotting routines
+#from .util import console
+#from .stats import *
diff --git a/bumps/dream/acr.py b/bumps/dream/acr.py
new file mode 100644
index 0000000..f3eb7b1
--- /dev/null
+++ b/bumps/dream/acr.py
@@ -0,0 +1,91 @@
+"""
+ACR upper percentiles critical value for test of single multivariate
+normal outlier.
+
+From the method given by Wilks (1963) and approaching to a F distribution
+function by the Yang and Lee (1987) formulation, we compute the critical
+value of the maximum squared Mahalanobis distance to detect outliers from
+a normal multivariate sample.
+
+We can generate all the critical values of the maximum squared Mahalanobis
+distance presented on the Table XXXII of by Barnett and Lewis (1978) and
+Table A.6 of Rencher (2002). Also with any given significance level (alpha).
+
+Example::
+
+    >>> print("%.4f"%ACR(3, 25, 0.01))
+    13.1753
+
+Created by::
+
+    A. Trujillo-Ortiz, R. Hernandez-Walls, A. Castro-Perez and K. Barba-Rojo
+    Facultad de Ciencias Marinas
+    Universidad Autonoma de Baja California
+    Apdo. Postal 453
+    Ensenada, Baja California
+    Mexico.
+    atrujo at uabc.mx
+
+Copyright. August 20, 2006.
+
+To cite this file, this would be an appropriate format::
+
+    Trujillo-Ortiz, A., R. Hernandez-Walls, A. Castro-Perez and K. Barba-Rojo.
+    (2006). *ACR:Upper percentiles critical value for test of single
+    multivariate  normal outlier.* A MATLAB file. [WWW document].  URL
+    http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=12161
+
+The function's name is given in honour of Dr. Alvin C. Rencher for his
+invaluable contribution to multivariate statistics with his text 'Methods of
+Multivariate Analysis'.
+
+References:
+
+[1] Barnett, V. and Lewis, T. (1978), Outliers on Statistical Data.
+     New-York:John Wiley & Sons.
+[2] Rencher, A. C. (2002), Methods of Multivariate Analysis. 2nd. ed.
+     New-Jersey:John Wiley & Sons. Chapter 13 (pp. 408-450).
+[3] Wilks, S. S. (1963), Multivariate Statistical Outliers. Sankhya,
+     Series A, 25: 407-426.
+[4] Yang, S. S. and Lee, Y. (1987), Identification of a Multivariate
+     Outlier. Presented at the Annual  Meeting of the American
+     Statistical Association, San Francisco, August 1987.
+"""
+
+from __future__ import division
+
+__all__ = ["ACR"]
+
+from scipy.stats import f
+finv = f.ppf
+
+
+def ACR(p, n, alpha=0.05):
+    """
+    Return critical value for test of single multivariate normal outlier
+    using the Mahalanobis distance metric.
+
+    *p* is the number of independent variables,
+    *n* is the number of samples, and
+    *alpha* is the significance level cutoff (default=0.05).
+    """
+
+    if alpha <= 0 or alpha >= 1:
+        raise ValueError("significance level must be between 0 and 1")
+
+    a = alpha
+    # F distribution critical value with p and n-p-1 degrees of freedom
+    # using the Bonferroni correction.
+    fc = finv(1-a/n, p, n-p-1)
+    result = (p*(n-1)**2*fc) / (n*(n-p-1)+(n*p*fc))
+    # = ((-1*((1/(1+(fc*p/(n-p-1))))-1))*((n-1)^2))/n;
+
+    return result
+
+
+def test():
+    assert abs(ACR(3, 25, 0.01) - 13.1753251622586) < 1e-13
+
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/bounds.py b/bumps/dream/bounds.py
new file mode 100644
index 0000000..34efdca
--- /dev/null
+++ b/bumps/dream/bounds.py
@@ -0,0 +1,207 @@
+"""
+Bounds handling.
+
+Use bounds(low, high, style) to create a bounds handling object.  This
+function operates on a point x, transforming it so that all dimensions
+are within the bounds.  Options are available, including reflecting,
+wrapping, clipping or randomizing the point, or ignoring the bounds.
+
+The returned bounds object should have an apply(x) method which
+transforms the point *x*.
+"""
+__all__ = ["make_bounds_handler", "ReflectBounds", "ClipBounds",
+           "FoldBounds", "RandomBounds", "IgnoreBounds"]
+
+from numpy import inf, isinf, asarray
+from . import util
+
+
+def make_bounds_handler(bounds, style='reflect'):
+    """
+    Return a bounds object which can update the bounds.
+
+    Bounds handling *style* name is one of::
+
+        reflect:   reflect off the boundary
+        clip:      stop at the boundary
+        fold:      wrap values to the other side of the boundary
+        randomize: move to a random point in the bounds
+        none:      ignore the bounds
+
+    With semi-infinite intervals folding and randomizing aren't well
+    defined, and reflection is used instead.
+
+    With finite intervals the the reflected or folded point may still be
+    outside the bounds (which can happen if the step size is too large),
+    and a random uniform value is used instead.
+    """
+    if bounds is None:
+        return IgnoreBounds()
+
+    low, high = bounds
+
+    # Do boundary handling -- what to do when points fall outside bound
+    style = style.lower()
+    if style == 'reflect':
+        f = ReflectBounds(low, high)
+    elif style == 'clip':
+        f = ClipBounds(low, high)
+    elif style == 'fold':
+        f = FoldBounds(low, high)
+    elif style == 'randomize':
+        f = RandomBounds(low, high)
+    elif style == 'none' or style is None:
+        f = IgnoreBounds()
+    else:
+        raise ValueError("bounds style %s is not valid" % style)
+    return f
+
+
+class Bounds(object):
+    def apply(self, x):
+        raise NotImplementedError
+
+    def __call__(self, pop):
+        for x in pop:
+            self.apply(x)
+        return pop
+
+
+class ReflectBounds(Bounds):
+    """
+    Reflect parameter values into bounded region
+    """
+    def __init__(self, low, high):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        """
+        Update x so all values lie within bounds
+
+        Returns x for convenience.  E.g., y = bounds.apply(x+0)
+        """
+        minn, maxn = self.low, self.high
+
+        # Reflect points which are out of bounds
+        idx = y < minn
+        y[idx] = 2*minn[idx] - y[idx]
+        idx = y > maxn
+        y[idx] = 2*maxn[idx] - y[idx]
+
+        # Randomize points which are still out of bounds
+        idx = (y < minn) | (y > maxn)
+        y[idx] = minn[idx] + util.rng.rand(sum(idx))*(maxn[idx]-minn[idx])
+        return y
+
+
+class ClipBounds(Bounds):
+    """
+    Clip values to bounded region
+    """
+    def __init__(self, low, high):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        minn, maxn = self.low, self.high
+        idx = y < minn
+        y[idx] = minn[idx]
+        idx = y > maxn
+        y[idx] = maxn[idx]
+
+        return y
+
+
+class FoldBounds(Bounds):
+    """
+    Wrap values into the bounded region
+    """
+    def __init__(self, low, high):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        minn, maxn = self.low, self.high
+
+        # Deal with semi-infinite cases using reflection
+        idx = (y < minn) & isinf(maxn)
+        y[idx] = 2*minn[idx] - y[idx]
+        idx = (y > maxn) & isinf(minn)
+        y[idx] = 2*maxn[idx] - y[idx]
+
+        # Wrap points which are out of bounds
+        idx = y < minn
+        y[idx] = maxn[idx] - (minn[idx] - y[idx])
+        idx = y > maxn
+        y[idx] = minn[idx] + (y[idx] - maxn[idx])
+
+        # Randomize points which are still out of bounds
+        idx = (y < minn) | (y > maxn)
+        y[idx] = minn[idx] + util.rng.rand(sum(idx))*(maxn[idx]-minn[idx])
+
+        return y
+
+
+class RandomBounds(Bounds):
+    """
+    Randomize values into the bounded region
+    """
+    def __init__(self, low, high):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        minn, maxn = self.low, self.high
+
+        # Deal with semi-infinite cases using reflection
+        idx = (y < minn) & isinf(maxn)
+        y[idx] = 2*minn[idx] - y[idx]
+        idx = (y > maxn) & isinf(minn)
+        y[idx] = 2*maxn[idx] - y[idx]
+
+        # The remainder are selected uniformly from the bounded region
+        idx = (y < minn) | (y > maxn)
+        y[idx] = minn[idx] + util.rng.rand(sum(idx))*(maxn[idx]-minn[idx])
+
+        return y
+
+
+class IgnoreBounds(Bounds):
+    """
+    Leave values outside the bounded region
+    """
+    def __init__(self, low=None, high=None):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        return y
+
+
+def test():
+    from numpy.linalg import norm
+    from numpy import array
+
+    bounds = list(zip([5, 10], [-inf, -10], [-5, inf], [-inf, inf]))
+    v = asarray([6, -12, 6, -12], 'd')
+    for t in 'none', 'reflect', 'clip', 'fold', 'randomize':
+        assert norm(make_bounds_handler(bounds, t).apply(v+0) - v) == 0
+    v = asarray([12, 12, -12, -12], 'd')
+    for t in 'none', 'reflect', 'clip', 'fold':
+        w = make_bounds_handler(bounds, t)
+        assert norm(w(array([v, v, v])) - w.apply(v+0)) == 0
+    assert norm(make_bounds_handler(bounds, 'none').apply(v+0) - v) == 0
+    assert norm(make_bounds_handler(bounds, 'reflect').apply(v+0)
+                - [8, -32, 2, -12]) == 0
+    assert norm(make_bounds_handler(bounds, 'clip').apply(v+0)
+                - [10, -10, -5, -12]) == 0
+    assert norm(make_bounds_handler(bounds, 'fold').apply(v+0)
+                - [7, -32, 2, -12]) == 0
+    w = make_bounds_handler(bounds, 'randomize').apply(v+0)
+    assert 5 <= w[0] <= 10 and w[1] == -32 and w[2] == 2 and w[3] == -12
+    v = asarray([20, 1, 1, 1], 'd')
+    w = make_bounds_handler(bounds, 'reflect').apply(v+0)
+    assert 5 <= w[0] <= 10
+    v = asarray([20, 1, 1, 1], 'd')
+    w = make_bounds_handler(bounds, 'fold').apply(v+0)
+    assert 5 <= w[0] <= 10
+
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/core.py b/bumps/dream/core.py
new file mode 100644
index 0000000..8962b5e
--- /dev/null
+++ b/bumps/dream/core.py
@@ -0,0 +1,432 @@
+"""
+DiffeRential Evolution Adaptive Metropolis algorithm
+
+DREAM runs multiple different chains simultaneously for global exploration,
+and automatically tunes the scale and orientation of the proposal
+distribution using differential evolution.  The algorithm maintains
+detailed balance and ergodicity and works well and efficient for a large
+range of problems, especially in the presence of high-dimensionality and
+multimodality.
+
+DREAM developed by Jasper A. Vrugt and Cajo ter Braak
+
+This algorithm has been described in:
+
+   Vrugt, J.A., C.J.F. ter Braak, M.P. Clark, J.M. Hyman, and B.A. Robinson,
+      *Treatment of input uncertainty in hydrologic modeling: Doing hydrology
+      backward with Markov chain Monte Carlo simulation*,
+      Water Resources Research, 44, W00B09, 2008.
+      `doi:10.1029/2007WR006720 <http://dx.doi.org/10.1029/2007WR006720>`_
+
+   Vrugt, J.A., C.J.F. ter Braak, C.G.H. Diks, D. Higdon, B.A. Robinson,
+       and J.M. Hyman,
+       *Accelerating Markov chain Monte Carlo simulation by differential
+       evolution with self-adaptive randomized subspace sampling*,
+       International Journal of Nonlinear Sciences and Numerical Simulation,
+       10(3), 271-288, 2009.
+
+   Vrugt, J.A., C.J.F. ter Braak, H.V. Gupta, and B.A. Robinson,
+       *Equifinality of formal (DREAM) and informal (GLUE) Bayesian approaches
+       in hydrologic modeling*,
+       Stochastic Environmental Research and Risk Assessment,
+       1-16, 2009, In Press.
+       `doi:10.1007/s00477-008-0274-y
+       <http://dx.doi.org/10.1007/s00477-008-0274-y>`_
+
+For more information please read:
+
+   Ter Braak, C.J.F.,
+       *A Markov Chain Monte Carlo version of the genetic algorithm Differential
+       Evolution: easy Bayesian computing for real parameter spaces*,
+       Stat. Comput., 16, 239 - 249, 2006.
+       `doi:10.1007/s11222-006-8769-1
+       <http://dx.doi.org/10.1007/s11222-006-8769-1>`_
+
+   Vrugt, J.A., H.V. Gupta, W. Bouten and S. Sorooshian,
+       *A Shuffled Complex Evolution Metropolis algorithm for optimization
+       and uncertainty assessment of hydrologic model parameters*,
+       Water Resour. Res., 39 (8), 1201, 2003.
+       `doi:10.1029/2002WR001642 <http://dx.doi.org/10.1029/2002WR001642>`_
+
+   Ter Braak, C.J.F., and J.A. Vrugt,
+       *Differential Evolution Markov Chain with snooker updater
+       and fewer chains*,
+       Statistics and Computing, 2008.
+       `doi:10.1007/s11222-008-9104-9
+       <http://dx.doi.org/2008.10.1007/s11222-008-9104-9>`_
+
+   Vrugt, J.A., C.J.F. ter Braak, and J.M. Hyman,
+       *Differential evolution adaptive Metropolis with snooker update and
+       sampling from past states*,
+       SIAM journal on Optimization, 2009.
+
+   Vrugt, J.A., C.J.F. ter Braak, and J.M. Hyman,
+       *Parallel Markov chain Monte Carlo simulation on distributed computing
+       networks using multi-try Metropolis with sampling from past states*,
+       SIAM journal on Scientific Computing, 2009.
+
+   G. Schoups, and J.A. Vrugt,
+       *A formal likelihood function for Bayesian inference of hydrologic
+       models with correlated, heteroscedastic and non-Gaussian errors*,
+       Water Resources Research, 2010, In Press.
+
+   G. Schoups, J.A. Vrugt, F. Fenicia, and N.C. van de Giesen,
+       *Inaccurate numerical solution of hydrologic models corrupts efficiency
+       and robustness of MCMC simulation*,
+       Water Resources Research, 2010, In Press.
+
+Copyright (c) 2008, Los Alamos National Security, LLC
+All rights reserved.
+
+Copyright 2008. Los Alamos National Security, LLC. This software was produced
+under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National
+Laboratory (LANL), which is operated by Los Alamos National Security, LLC
+for the U.S. Department of Energy. The U.S. Government has rights to use,
+reproduce, and distribute this software.
+
+NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY
+WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF
+THIS SOFTWARE.  If software is modified to produce derivative works, such
+modified software should be clearly marked, so as not to confuse it with
+the version available from LANL.
+
+Additionally, redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of Los Alamos National Security, LLC, Los Alamos National
+  Laboratory, LANL the U.S. Government, nor the names of its contributors
+  may be used to endorse or promote products derived from this software
+  without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL
+SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+MATLAB code written by Jasper A. Vrugt, Center for NonLinear Studies (CNLS)
+
+Written by Jasper A. Vrugt: vrugt at lanl.gov
+
+Version 0.5: June 2008
+Version 1.0: October 2008  Adaption updated and generalized CR implementation
+
+
+
+2010-04-20 Paul Kienzle
+* Convert to python
+"""
+from __future__ import division, print_function
+
+__all__ = ["Dream", "run_dream"]
+
+import sys
+import time
+
+import numpy as np
+
+from .state import MCMCDraw
+from .metropolis import metropolis, metropolis_dr, dr_step
+from .gelman import gelman
+from .crossover import AdaptiveCrossover, LogAdaptiveCrossover
+from .diffev import de_step
+from .bounds import make_bounds_handler
+
+# Everything should be available in state, but lets be lazy for now
+LAST_TIME = 0
+
+
+def console_monitor(state, pop, logp):
+    global LAST_TIME
+    if state.generation == 1:
+        print("#gen", "logp(x)",
+              " ".join("<%s>" % par for par in state.labels))
+        LAST_TIME = time.time()
+
+    current_time = time.time()
+    if current_time >= LAST_TIME + 1:
+        LAST_TIME = current_time
+        print(state.generation, state._best_logp,
+              " ".join("%.15g" % v for v in state._best_x))
+        sys.stdout.flush()
+
+
+class Dream(object):
+    """
+    Data structure containing the details of the running DREAM analysis code.
+    """
+    model = None
+    # Sampling parameters
+    burn = 0
+    draws = 100000
+    thinning = 1
+    outlier_test = "IQR"
+    population = None
+    # DE parameters
+    DE_steps = 10
+    DE_pairs = 3
+    DE_eps = 0.05
+    DE_snooker_rate = 0.1
+    DE_noise = 1e-6
+    bounds_style = 'reflect'
+    # Crossover parameters
+    CR = None
+    CR_spacing = 'linear'  # 'log' or 'linear'
+    # Delay rejection parameters
+    use_delayed_rejection = False
+    DR_scale = 1  # 1-sigma step size using cov of population
+    # Local optimizer best fit injection  The optimizer has
+    # the following interface:
+    #    x, fx = goalseek(mapper, bounds_handler, pop, fpop)
+    # where:
+    #    x, fx are the local optimum point and its value
+    #    pop is the starting population
+    #    fpop is the nllf for each point in pop
+    #    mapper is a function which takes pop and returns fpop
+    #    bounds_handler takes pop and forces all points into the range
+    goalseek_optimizer = None
+    goalseek_interval = 1e100  # close enough to never
+    goalseek_minburn = 1000
+
+    def __init__(self, **kw):
+        self.monitor = console_monitor
+        for k, v in kw.items():
+            if hasattr(self, k):
+                setattr(self, k, v)
+            else:
+                raise TypeError("Unknown attribute "+k)
+
+        self._initialized = False
+
+    def sample(self, state=None, abort_test=lambda: False):
+        """
+        Pull the requisite number of samples from the distribution
+        """
+        if not self._initialized:
+            self._initialized = True
+        self.state = state
+        try:
+            run_dream(self, abort_test=abort_test)
+        except KeyboardInterrupt:
+            pass
+        return self.state
+
+
+def run_dream(dream, abort_test=lambda: False):
+
+    # Step 1: Sample s points in the parameter space
+    # [PAK] I moved this out of dream so that the user can use whatever
+    # complicated sampling scheme they want.  Unfortunately, this means
+    # the user needs to know some complex sampling scheme.
+    if dream.population is None:
+        raise ValueError("initial population not defined")
+
+    # Remember the problem dimensions
+    n_gen, n_chain, n_var = dream.population.shape
+    n_pop = n_gen*n_chain
+
+    if dream.CR is None:
+        if dream.CR_spacing == 'log':
+            dream.CR = LogAdaptiveCrossover(n_var)
+        else:  # linear
+            dream.CR = AdaptiveCrossover(3)
+
+    # Step 2: Calculate posterior density associated with each value in x
+    apply_bounds = make_bounds_handler(dream.model.bounds,
+                                       style=dream.bounds_style)
+
+    # Record initial state
+    allocate_state(dream)
+    state = dream.state
+    state.labels = dream.model.labels
+    previous_draws = state.draws
+    if previous_draws:
+        x, logp = state._last_gen()
+    else:
+        # No initial state, so evaluate initial population
+        for x in dream.population:
+            apply_bounds(x)
+# ********************** MAP *****************************
+            logp = dream.model.map(x)
+            state._generation(new_draws=n_chain, x=x,
+                              logp=logp, accept=n_chain,
+                              force_keep=True)
+            dream.monitor(state, x, logp)
+
+    # Skip r_stat and pCR until we have some data data to analyze
+    state._update(R_stat=-2, CR_weight=dream.CR.weight)
+
+    # Now start drawing samples
+    #print "previous draws", previous_draws, "new draws", dream.draws+dream.burn
+    last_goalseek = (dream.draws + dream.burn)/n_pop - dream.goalseek_minburn
+    next_goalseek = state.generation + dream.goalseek_interval \
+        if dream.goalseek_optimizer else 1e100
+
+    #need_outliers_removed = True
+    scale = 1.0
+    #serial_time = parallel_time = 0.
+    #last_time = time.time()
+    while state.draws < dream.draws + dream.burn:
+
+        # Age the population using differential evolution
+        dream.CR.reset(Nsteps=dream.DE_steps, Npop=n_chain)
+        for gen in range(dream.DE_steps):
+
+            # Define the current locations and associated posterior densities
+            xold, logp_old = x, logp
+            pop = state._draw_pop()
+
+            # Generate candidates for each sequence
+            xtry, step_alpha, used \
+                = de_step(n_chain, pop, dream.CR[gen],
+                          max_pairs=dream.DE_pairs,
+                          eps=dream.DE_eps,
+                          snooker_rate=dream.DE_snooker_rate,
+                          noise=dream.DE_noise,
+                          scale=scale)
+
+            # PAK: Try a local optimizer every N generations
+            if next_goalseek <= state.generation <= last_goalseek:
+                best, logp_best = dream.goalseek_optimizer(
+                    dream.model.map, apply_bounds, xold, logp_old)
+                xtry[0] = best
+                # Note: it is slightly inefficient to throw away logp_best,
+                # but it makes the the code cleaner if we do
+                next_goalseek += dream.goalseek_interval
+
+            # Compute the likelihood of the candidates
+            apply_bounds(xtry)
+# ********************** MAP *****************************
+            #next_time = time.time()
+            #serial_time += next_time - last_time
+            #last_time = next_time
+            logp_try = dream.model.map(xtry)
+            #next_time = time.time()
+            #parallel_time  += next_time - last_time
+            #last_time = next_time
+            draws = len(logp_try)
+
+            # Apply the metropolis acceptance/rejection rule
+            x, logp, alpha, accept \
+                = metropolis(xtry, logp_try,
+                             xold, logp_old,
+                             step_alpha)
+
+            # Process delayed rejection
+            # PAK NOTE: this updates according to the covariance matrix of the
+            # current sample, which may be useful on unimodal systems, but
+            # doesn't seem to be of any value in general; the DREAM papers
+            # found that the acceptance rate did indeed improve with delayed
+            # rejection, but the overall performance did not.  Worse, this
+            # requires a linear system solution O(nPop^3) which can be near
+            # singular for complex posterior distributions.
+            if dream.use_delayed_rejection and not accept.all():
+                # Generate alternate candidates using the covariance of xold
+                xdr, r = dr_step(x=xold, scale=dream.DR_scale)
+
+                # Compute the likelihood of the new candidates
+                reject = ~accept
+                apply_bounds(xdr)
+# ********************** MAP *****************************
+                logp_xdr = dream.model.map(xdr[reject])
+                draws += len(logp_xdr)
+
+                # Apply the metropolis delayed rejection rule.
+                x[reject], logp[reject], alpha[reject], accept[reject] = \
+                    metropolis_dr(xtry[reject], logp_try[reject],
+                                  x[reject], logp[reject],
+                                  xold[reject], logp_old[reject],
+                                  alpha[reject], r)
+
+            # els = zip(logp_old, logp_try, logp, x[:, -2], x[:, -1], accept)
+            #print "pop", "\n ".join((("%12.3e "*(len(el)-1))%el[:-1])
+            #                       +("T " if el[-3]<=el[-2] else "  ")
+            #                       +("accept" if el[-1] else "")
+            #                       for el in els)
+
+            # Update Sequences with the new population.
+            state._generation(draws, x, logp, accept)
+# ********************** NOTIFY **************************
+            dream.monitor(state, x, logp)
+            #print state.generation, ":", state._best_logp
+
+            # Keep track of which CR ratios were successful
+            if state.draws <= dream.burn:
+                dream.CR.update(gen, xold, x, used)
+            
+            if abort_test():
+                break
+
+        #print("serial&parallel",serial_time,parallel_time)
+        # End of differential evolution aging
+        # ---------------------------------------------------------------------
+
+        # Calculate Gelman and Rubin convergence diagnostic
+        #_, points, _ = state.chains()
+        #r_stat = gelman(points, portion=0.5)
+        r_stat = 0.  # Suppress for now since it is broken, and it costs to unroll
+
+        #if state.draws <= 0.1 * dream.draws:
+        if state.draws <= dream.burn:
+            # Adapt the crossover ratio, but only during burn-in.
+            dream.CR.adapt()
+        # See whether there are any outlier chains, and remove
+        # them to current best value of X
+        #if need_outliers_removed and state.draws > 0.5*dream.burn:
+        #    state.remove_outliers(x, logp, test=dream.outlier_test)
+        #    need_outliers_removed = False
+
+        if False:
+            # Suppress scale update until we have a chance to verify that it
+            # doesn't skew the resulting statistics.
+            _, r = state.acceptance_rate()
+            ravg = np.mean(r[-dream.DE_steps:])
+            if ravg > 0.4:
+                scale *= 1.01
+            elif ravg < 0.2:
+                scale /= 1.01
+
+
+
+        # Save update information
+        state._update(R_stat=r_stat, CR_weight=dream.CR.weight)
+        
+        if abort_test():
+            break
+
+
+def allocate_state(dream):
+    """
+    Estimate the size of the output
+    """
+    # Determine problem dimensions from the initial population
+    n_pop, n_chain, n_var = dream.population.shape
+    steps = dream.DE_steps
+    thinning = dream.thinning
+    n_cr = len(dream.CR.CR)
+    draws = dream.draws
+
+    n_update = int(draws/(steps*n_chain)) + 1
+    n_gen = n_update * steps
+    n_thin = int(n_gen/thinning) + 1
+    #print n_gen, n_thin, n_update, draws, steps, Npop, n_var
+
+    if dream.state is not None:
+        dream.state.resize(
+            n_gen, n_thin, n_update, n_var, n_chain, n_cr, thinning)
+    else:
+        dream.state = MCMCDraw(
+            n_gen, n_thin, n_update, n_var, n_chain, n_cr, thinning)
diff --git a/bumps/dream/corrplot.py b/bumps/dream/corrplot.py
new file mode 100644
index 0000000..5ae979e
--- /dev/null
+++ b/bumps/dream/corrplot.py
@@ -0,0 +1,175 @@
+# This program is public domain
+# Author Paul Kienzle
+"""
+2-D correlation histograms
+
+Generate 2-D correlation histograms and display them in a figure.
+
+Uses false color plots of density.
+"""
+__all__ = ['Corr2d']
+
+import numpy as np
+from numpy import inf
+
+from matplotlib import cm, colors, image, artist
+from matplotlib.font_manager import FontProperties
+from matplotlib.ticker import MaxNLocator
+
+try:
+    COLORMAP = colors.LinearSegmentedColormap.from_list(
+        'density', ('w', 'y', 'g', 'b', 'r'))
+except Exception:
+    COLORMAP = cm.gist_earth_r
+
+
+class Corr2d:
+    """
+    Generate and manage 2D correlation histograms.
+    """
+    def __init__(self, data, labels=None, **kw):
+        if labels is None:
+            labels = ["P"+str(i+1) for i, _ in enumerate(data)]
+        self.N = len(data)
+        self.labels = labels
+        self.data = data
+        self.hists = _hists(data, **kw)
+        #for k, v in self.hists.items():
+        #    print k, (v[1][0], v[1][-1]), (v[2][0], v[2][-1])
+        self.ax = None  # will be set on plot
+
+    def R(self):
+        return np.corrcoef(self.data)
+
+    def __getitem__(self, i, j):
+        """
+        Retrieve correlation histogram for data[i] X data[j].
+
+        Returns bin i edges, bin j edges, and histogram
+        """
+        return self.hists[i, j]
+
+    def plot(self, title=None):
+        """
+        Plot the correlation histograms on the specified figure
+        """
+        import pylab
+
+        pylab.clf()
+        fig = pylab.gcf()
+        if title is not None:
+            fig.text(0.5, 0.95, title,
+                     horizontalalignment='center',
+                     fontproperties=FontProperties(size=16))
+        self.ax = _plot(fig, self.hists, self.labels, self.N)
+
+
+def _hists(data, ranges=None, **kw):
+    """
+    Generate pair-wise correlation histograms
+    """
+    n = len(data)
+    if ranges is None:
+        low, high = np.min(data, axis=1), np.max(data, axis=1)
+        ranges = [(l, h) for l, h in zip(low, high)]
+    return dict(((i, j), np.histogram2d(data[i], data[j],
+                                        range=[ranges[i], ranges[j]], **kw))
+                for i in range(0, n)
+                for j in range(i+1, n))
+
+
+def _plot(fig, hists, labels, n, show_ticks=None):
+    """
+    Plot pair-wise correlation histograms
+    """
+    if n <= 1:
+        fig.text(0.5, 0.5, "No correlation plots when only one variable",
+                 ha="center", va="center")
+        return
+    vmin, vmax = inf, -inf
+    for data, _, _ in hists.values():
+        positive = data[data > 0]
+        if len(positive) > 0:
+            vmin = min(vmin, np.amin(positive))
+            vmax = max(vmax, np.amax(positive))
+    norm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=False)
+    #norm = colors.Normalize(vmin=vmin, vmax=vmax)
+    mapper = image.FigureImage(fig)
+    mapper.set_array(np.zeros(0))
+    mapper.set_cmap(cmap=COLORMAP)
+    mapper.set_norm(norm)
+
+    if show_ticks is None:
+        show_ticks = n < 3
+    ax = {}
+    Nr = Nc = n-1
+    for i in range(0, n-1):
+        for j in range(i+1, n):
+            sharex = ax.get((0, j), None)
+            sharey = ax.get((i, i+1), None)
+            a = fig.add_subplot(Nr, Nc, (Nr-i-1)*Nc + j,
+                                sharex=sharex, sharey=sharey)
+            ax[(i, j)] = a
+            a.xaxis.set_major_locator(MaxNLocator(4, steps=[1, 2, 4, 5, 10]))
+            a.yaxis.set_major_locator(MaxNLocator(4, steps=[1, 2, 4, 5, 10]))
+            data, x, y = hists[(i, j)]
+            data = np.clip(data, vmin, vmax)
+            a.pcolorfast(y, x, data, cmap=COLORMAP, norm=norm)
+            # Show labels or hide ticks
+            if i != 0:
+                artist.setp(a.get_xticklabels(), visible=False)
+            if i == n-2 and j == n-1:
+                a.set_xlabel(labels[j])
+                #a.xaxis.set_label_position("top")
+                #a.xaxis.set_offset_position("top")
+            if not show_ticks:
+                a.xaxis.set_ticks([])
+            if j == i+1:
+                a.set_ylabel(labels[i])
+            else:
+                artist.setp(a.get_yticklabels(), visible=False)
+            if not show_ticks:
+                a.yaxis.set_ticks([])
+
+            a.zoomable = True
+
+    # Adjust subplots and add the colorbar
+    fig.subplots_adjust(left=0.07, bottom=0.07, top=0.9, right=0.85,
+                        wspace=0.0, hspace=0.0)
+    cax = fig.add_axes([0.88, 0.2, 0.04, 0.6])
+    fig.colorbar(mapper, cax=cax, orientation='vertical')
+    return ax
+
+
+def zoom(event, step):
+    ax = event.inaxes
+    if not hasattr(ax, 'zoomable'):
+        return
+
+    # TODO: test logscale
+    step *= 3
+
+    if ax.zoomable is not True and 'mapper' in ax.zoomable:
+        mapper = ax.zoomable['mapper']
+        if event.ydata is not None:
+            lo, hi = mapper.get_clim()
+            pt = event.ydata*(hi-lo)+lo
+            lo, hi = _rescale(lo, hi, pt, step)
+            mapper.set_clim((lo, hi))
+    if ax.zoomable is True and event.xdata is not None:
+        lo, hi = ax.get_xlim()
+        lo, hi = _rescale(lo, hi, event.xdata, step)
+        ax.set_xlim((lo, hi))
+    if ax.zoomable is True and event.ydata is not None:
+        lo, hi = ax.get_ylim()
+        lo, hi = _rescale(lo, hi, event.ydata, step)
+        ax.set_ylim((lo, hi))
+    ax.figure.canvas.draw_idle()
+
+
+def _rescale(lo, hi, pt, step):
+    scale = float(hi-lo)*step/(100 if step > 0 else 100-step)
+    bal = float(pt-lo)/(hi-lo)
+    new_lo = lo - bal*scale
+    new_hi = hi + (1-bal)*scale
+    return new_lo, new_hi
diff --git a/bumps/dream/crossover.py b/bumps/dream/crossover.py
new file mode 100644
index 0000000..14e09d5
--- /dev/null
+++ b/bumps/dream/crossover.py
@@ -0,0 +1,230 @@
+"""
+Crossover ratios
+
+The crossover ratio (CR) determines what percentage of parameters in the
+target vector are updated with difference vector selected from the
+population.  In traditional differential evolution a CR value is chosen
+somewhere in [0, 1] at the start of the search and stays constant throughout.
+DREAM extends this by allowing multiple CRs at the same time with different
+probabilities.  Adaptive crossover adjusts the relative weights of the CRs
+based on the average distance of the steps taken when that CR was used.  This
+distance will be zero for unsuccessful metropolis steps, and so the relative
+weights on those CRs which generate many unsuccessful steps will be reduced.
+
+Usage
+-----
+
+1. Traditional differential evolution::
+
+    crossover = Crossover(CR=CR)
+
+2. Weighted crossover ratios::
+
+    crossover = Crossover(CR=[CR1, CR2, ...], weight=[weight1, weight2, ...])
+
+The weights are normalized to one, and default to equally weighted CRs.
+
+3. Adaptive weighted crossover ratios::
+
+    crossover = AdaptiveCrossover(N)
+
+The CRs are set to *[1/N, 2/N, ... 1]*, and start out equally weighted.  The
+weights are adapted during burn-in (10% of the runs) and fixed for the
+remainder of the analysis.
+
+Compatibility Notes
+-------------------
+
+For *Extra.pCR == 'Update'* in the matlab interface use::
+
+    CR = AdaptiveCrossover(Ncr=MCMCPar.nCR)
+
+For *Extra.pCR != 'Update'* in the matlab interface use::
+
+    CR = Crossover(CR=[1./Ncr], pCR=[1])
+
+"""
+from __future__ import division
+
+__all__ = ["Crossover", "AdaptiveCrossover", "LogAdaptiveCrossover"]
+
+from numpy import hstack, empty, ones, zeros, cumsum, arange, \
+    reshape, array, isscalar, asarray, std, sum, trunc, log10, logspace
+
+from . import util
+
+
+class Crossover(object):
+    """
+    Fixed weight crossover ratios.
+
+    *CR* is a scalar if there is a single crossover ratio, or a vector of
+    numbers in (0, 1].
+
+    *weight* is the relative weighting of each CR, or None for equal weights.
+    """
+    def __init__(self, CR, weight=None):
+        if isscalar(CR):
+            CR, weight = [CR], [1]
+        CR, weight = [asarray(v, 'd') for v in (CR, weight)]
+        self.CR, self.weight = CR, weight/sum(weight)
+
+    def reset(self, Nsteps, Npop):
+        """
+        Generate CR samples for the next Nsteps over a population of size Npop.
+        """
+        self._CR_samples = gen_CR(self.CR, self.weight, Nsteps, Npop)
+
+    def __getitem__(self, N):
+        """
+        Return CR samples for step N since reset.
+        """
+        return self._CR_samples[N]
+
+    def update(self, N, xold, xnew, used):
+        """
+        Gather adaptation data on *xold*, *xnew* for each CR that was
+        *used* in step *N*.
+        """
+        pass
+
+    def adapt(self):
+        """
+        Update CR weights based on the available adaptation data.
+        """
+        pass
+
+
+class BaseAdaptiveCrossover(object):
+    """
+    Adapted weight crossover ratios.
+    """
+    def _set_CRs(self, CR):
+        self.CR = CR
+        N = len(CR)
+        self.weight = ones(N) / N  # Start with all CRs equally probable
+        self._count = zeros(N)     # No statistics for adaptation
+        self._distance = zeros(N)
+
+    def reset(self, Nsteps, Npop):
+        """
+        Generate CR samples for the next Nsteps over a population of size Npop.
+        """
+        self._CR_samples = gen_CR(self.CR, self.weight, Nsteps, Npop)
+
+    def __getitem__(self, step):
+        """
+        Return CR samples for step N since reset.
+        """
+        return self._CR_samples[step]
+
+    def update(self, N, xold, xnew, used):
+        """
+        Gather adaptation data on *xold*, *xnew* for each CR that was
+        *used* in step *N*.
+        """
+        # Calculate the standard deviation of each dimension of X
+        r = std(xnew, ddof=1, axis=0)
+        # Compute the Euclidean distance between new X and old X
+        d = sum(((xold - xnew)/r)**2, axis=1)
+        # Use this information to update sum_p2 to update N_CR
+        N, Sd = distance_per_CR(self.CR, d, self._CR_samples[N], used)
+        self._distance += Sd
+        self._count += N
+
+    def adapt(self):
+        """
+        Update CR weights based on the available adaptation data.
+        """
+        Npop = self._CR_samples.shape[1]
+        self.weight = (self._distance/self._count) * (Npop/sum(self._distance))
+        self.weight /= sum(self.weight)
+
+
+class AdaptiveCrossover(BaseAdaptiveCrossover):
+    """
+    Adapted weight crossover ratios.
+
+    *N* is the number of CRs to use.  CR is set to [1/N, 2/N, ..., 1], with
+    initial weights [1/N, 1/N, ..., 1/N].
+    """
+    def __init__(self, N):
+        if N < 2:
+            raise ValueError("Need more than one CR for AdaptiveCrossover")
+        self._set_CRs((arange(N)+1)/N)  # Equally spaced CRs
+
+
+# [PAK] Add log spaced adaptive cross-over for high dimensional tightly
+# constrained problems.
+class LogAdaptiveCrossover(BaseAdaptiveCrossover):
+    """
+    Adapted weight crossover ratios, log-spaced.
+
+    *dim* is the number of dimensions in the problem.
+    *N* is the number of CRs to use per decade.
+
+    CR is set to [k/dim] where k is log-spaced from 1 to dim.
+    The CRs start equally weighted as [1, ..., 1]/len(CR).
+
+    *N* should be around 4.5.  This gives good low end density, with 1, 2, 3,
+    and 5 parameters changed at a time, and proceeds up to 60% and 100% of
+    parameters each time.  Lower values of *N* give too few high density CRs,
+    and higher values give too many low density CRs.
+    """
+    def __init__(self, dim, N=4.5):
+        # Log spaced CR from 1/dim to dim/dim
+        self._set_CRs(logspace(0, log10(dim), trunc(N*log10(dim)+1))/dim)
+
+
+def gen_CR(CR, weight, Nsteps, Npop):
+    """
+    Generates CR samples for Nsteps generations of size Npop.
+
+    The frequency and value of the samples is based on the CR and weight
+    """
+    if len(CR) == 1:
+        return CR[0] * ones( (Nsteps, Npop) )
+
+    # Determine how many of each CR to use based on the weights
+    L = util.rng.multinomial(Nsteps * Npop, weight)
+
+    # Turn this into index boundaries within a CR location vector
+    L = hstack((0, cumsum(L)))
+
+    # Generate a random location vector for each CR in the sample
+    r = util.rng.permutation(Nsteps * Npop)
+
+    # Fill each location in the sample with the correct CR.
+    sample = empty(r.shape)
+    for i, v in enumerate(CR):
+        # Select a range of elements in r
+        idx = r[L[i]:L[i+1]]
+
+        # Fill those elements with crossover ratio v
+        sample[idx] = v
+
+    # Now reshape CR
+    sample = reshape(sample, (Nsteps, Npop) )
+
+    return sample
+
+
+def distance_per_CR(available_CRs, distances, CRs, used):
+    """
+    Accumulate normalized Euclidean distance for each crossover value
+
+    Returns the number of times each available CR was used and the total
+    distance for that CR.
+    """
+    total = array([sum(distances[(CRs==p)&used]) for p in available_CRs])
+    count = array([sum((CRs==p)&used) for p in available_CRs])
+    return count, total
+
+
+def demo():
+    CR, weight = array([.25, .5, .75, .1]), array([.1, .6, .2, .1])
+    print(gen_CR(CR, weight, 5, 4))
+
+if __name__ == "__main__":
+    demo()
+    # TODO: needs actual tests
diff --git a/bumps/dream/diffev.py b/bumps/dream/diffev.py
new file mode 100644
index 0000000..76b9276
--- /dev/null
+++ b/bumps/dream/diffev.py
@@ -0,0 +1,164 @@
+"""
+Differential evolution MCMC stepper.
+"""
+from __future__ import division
+
+__all__ = ["de_step"]
+
+from numpy import zeros, ones, dot, cov, eye, sqrt, sum, all
+from numpy import where, select
+from numpy.linalg import norm, cholesky, LinAlgError
+from .util import draw, rng
+
+EPS = 1e-6
+_SNOOKER, _DE, _DIRECT = 0, 1, 2
+
+
+def de_step(Nchain, pop, CR, max_pairs=2, eps=0.05,
+            snooker_rate=0.1, noise=1e-6, scale=1.0):
+    """
+    Generates offspring using METROPOLIS HASTINGS monte-carlo markov chain
+
+    The number of chains may be smaller than the population size if the
+    population is selected from both the current generation and the
+    ancestors.
+    """
+    Npop, Nvar = pop.shape
+
+    # Initialize the delta update to zero
+    delta_x = zeros((Nchain, Nvar))
+    step_alpha = ones(Nchain)
+
+    # Choose snooker, de or direct according to snooker_rate, and 80:20
+    # ratio of de to direct.
+    u = rng.rand(Nchain)
+    de_rate = 0.8 * (1-snooker_rate)
+    alg = select([u < snooker_rate, u < snooker_rate+de_rate],
+                 [_SNOOKER, _DE], default=_DIRECT)
+    use_de_step = (alg == _DE)
+
+    # Chains evolve using information from other chains to create offspring
+    for qq in range(Nchain):
+
+        if alg[qq] == _DE:  # Use DE with cross-over ratio
+
+            # Select to number of vector pair differences to use in update
+            # using k ~ discrete U[1, max pairs]
+            k = rng.randint(max_pairs)+1
+            # [PAK: same as k = DEversion[qq, 1] in matlab version]
+
+            # Select 2*k members at random different from the current member
+            perm = draw(2*k, Npop-1)
+            perm[perm >= qq] += 1
+            r1, r2 = perm[:k], perm[k:2*k]
+
+            # Select the dims to update based on the crossover ratio, making
+            # sure at least one dim is selected
+            vars = where(rng.rand(Nvar) > CR[qq])[0]
+            if len(vars) == 0:
+                vars = [rng.randint(Nvar)]
+
+            # Weight the size of the jump inversely proportional to the
+            # number of contributions, both from the parameters being
+            # updated and from the population defining the step direction.
+            gamma = 2.38/sqrt(2 * len(vars) * k)
+            # [PAK: same as F=Table_JumpRate[len(vars), k] in matlab version]
+
+            # Find and average step from the selected pairs
+            step = sum(pop[r1]-pop[r2], axis=0)
+
+            # Apply that step with F scaling and noise
+            jiggle = 1 + eps * (2 * rng.rand(*step.shape) - 1)
+            delta_x[qq, vars] = (jiggle*gamma*step)[vars]
+
+        elif alg[qq] == _SNOOKER:  # Use snooker update
+
+            # Select current and three others
+            perm = draw(3, Npop-1)
+            perm[perm >= qq] += 1
+            xi = pop[qq]
+            z, R1, R2 = [pop[i] for i in perm]
+
+            # Find the step direction and scale it to the length of the
+            # projection of R1-R2 onto the step direction.
+            # TODO: population sometimes not unique!
+            step = xi - z
+            denom = sum(step**2)
+            if denom == 0:  # identical points; should be extremely rare
+                step = EPS*rng.randn(*step.shape)
+                denom = sum(step**2)
+            step_scale = sum((R1-R2)*step) / denom
+
+            # Step using gamma of 2.38/sqrt(2) + U(-0.5, 0.5)
+            gamma = 1.2 + rng.rand()
+            delta_x[qq] = gamma * step_scale * step
+
+            # Scale Metropolis probability by (||xi* - z||/||xi - z||)^(d-1)
+            step_alpha[qq] = (norm(delta_x[qq]+step)/norm(step))**((Nvar-1)/2)
+
+        elif alg[qq] == _DIRECT:  # Use one pair and all dimensions
+
+            # Note that there is no F scaling, dimension selection or noise
+            perm = draw(2, Npop-1)
+            perm[perm >= qq] += 1
+            delta_x[qq, :] = pop[perm[0], :] - pop[perm[1], :]
+
+        else:
+            raise RuntimeError("Select failed...should never happen")
+
+        # If no step was specified (exceedingly unlikely!), then
+        # select a delta at random from a gaussian approximation to the
+        # current population
+        if all(delta_x[qq] == 0):
+            try:
+                #print "No step"
+                # Compute the Cholesky Decomposition of x_old
+                R = (2.38/sqrt(Nvar)) * cholesky(cov(pop.T) + EPS*eye(Nvar))
+                # Generate jump using multinormal distribution
+                delta_x[qq] = dot(rng.randn(*(1, Nvar)), R)
+            except LinAlgError:
+                print("Bad cholesky")
+                delta_x[qq] = rng.randn(Nvar)
+
+    # Update x_old with delta_x and noise
+    delta_x *= scale
+
+    # [PAK] The noise term needs to depend on the fitting range
+    # of the parameter rather than using a fixed noise value for all
+    # parameters.  The  current parameter value is a pretty good proxy
+    # in most cases (i.e., relative noise), but it breaks down if the
+    # parameter is zero, or if the range is something like 1 +/- eps.
+
+    # absolute noise
+    #x_new = pop[:Nchain] + delta_x + scale*noise*rng.randn(Nchain, Nvar)
+
+    # relative noise
+    x_new = pop[:Nchain] * (1 + scale*noise*rng.randn(Nchain, Nvar)) + delta_x
+
+    # no noise
+    #x_new = pop[:Nchain] + delta_x
+
+    return x_new, step_alpha, use_de_step
+
+
+def _check():
+    from numpy import arange
+    nchain, npop, nvar = 4, 10, 3
+
+    pop = 100*arange(npop*nvar).reshape((npop, nvar))
+    pop += rng.rand(*pop.shape)*1e-6
+    cr = 1./(rng.randint(4, size=nvar)+1)
+    x_new, _step_alpha, used = de_step(nchain, pop, cr, max_pairs=2, eps=0.05)
+    print("""\
+The following table shows the expected portion of the dimensions that
+are changed and the rounded value of the change for each point in the
+population.
+""")
+    for r, i, u in zip(cr, range(8), used):
+        rstr = ("%3d%% " % (r*100)) if u else "full "
+        vstr = " ".join("%4d" % (int(v/100+0.5)) for v in x_new[i]-pop[i])
+        print(rstr+vstr)
+
+
+if __name__ == "__main__":
+    _check()
diff --git a/bumps/dream/entropy.py b/bumps/dream/entropy.py
new file mode 100644
index 0000000..b3209e4
--- /dev/null
+++ b/bumps/dream/entropy.py
@@ -0,0 +1,351 @@
+"""
+Estimate entropy after a fit.
+
+The :func:`entropy` method computes the entropy directly from a set of
+MCMC samples, normalized by a scale factor computed from the kernel density
+estimate at a subset of the points.\ [#Kramer]_
+
+The :func:`cov_entropy` method computes the entropy associated with the
+covariance matrix.  This covariance matrix can be estimated during the
+fitting procedure (BFGS updates an estimate of the Hessian matrix for example),
+or computed by estimating derivatives when the fit is complete.
+
+The :class:`MVNEntropy` estimates the covariance from an MCMC sample and
+uses this covariance to estimate the entropy.  This gives a better
+estimate of the entropy than the equivalent direct calculation, which requires
+many more samples for a good kernel density estimate.  The *reject_normal*
+attribute is *True* if the MCMC sample is significantly different from normal.
+
+.. [#Kramer]
+    Kramer, A., Hasenauer, J., Allgower, F., Radde, N., 2010.
+    Computation of the posterior entropy in a Bayesian framework
+    for parameter estimation in biological networks,
+    in: 2010 IEEE International Conference on Control Applications (CCA).
+    Presented at the 2010 IEEE International Conference on
+    Control Applications (CCA), pp. 493-498.
+    doi:10.1109/CCA.2010.5611198
+
+
+.. [#Turjillo-Ortiz]
+    Trujillo-Ortiz, A. and R. Hernandez-Walls. (2003). Mskekur: Mardia's
+        multivariate skewness and kurtosis coefficients and its hypotheses
+        testing. A MATLAB file. [WWW document].
+        `<http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=3519>`_
+
+.. [#Mardia1970]
+    Mardia, K. V. (1970), Measures of multivariate skewnees and kurtosis with
+        applications. Biometrika, 57(3):519-530.
+
+.. [#Mardia1974]
+    Mardia, K. V. (1974), Applications of some measures of multivariate skewness
+        and kurtosis for testing normality and robustness studies. Sankhy A,
+        36:115-128
+
+.. [#Stevens]
+    Stevens, J. (1992), Applied Multivariate Statistics for Social Sciences.
+        2nd. ed. New-Jersey:Lawrance Erlbaum Associates Publishers. pp. 247-248.
+
+"""
+from __future__ import division
+
+__all__ = ["entropy"]
+
+import numpy as np
+from numpy import mean, std, exp, log, max, sqrt, log2, pi, e
+from numpy.random import permutation
+from scipy.stats import norm, chi2
+LN2 = log(2)
+
+
+def scipy_stats_density(sample_points, evaluation_points):  # pragma: no cover
+    """
+    Estimate the probability density function from which a set of sample
+    points was drawn and return the estimated density at the evaluation points.
+    """
+    from scipy.stats import gaussian_kde
+
+    ## standardize data so that we can use uniform bandwidth
+    ## Note: this didn't help with singular matrix
+    #mu, sigma = mean(data, axis=0), std(data, axis=0)
+    #data,points = (data - mu)/sigma, (points - mu)/sigma
+
+    kde = gaussian_kde(sample_points)
+    return kde(evaluation_points)
+
+
+def sklearn_density(sample_points, evaluation_points):
+    """
+    Estimate the probability density function from which a set of sample
+    points was drawn and return the estimated density at the evaluation points.
+    """
+    from sklearn.neighbors import KernelDensity
+
+    # Silverman bandwidth estimator
+    n, d = sample_points.shape
+    bandwidth = (n * (d + 2) / 4.)**(-1. / (d + 4))
+
+    # Standardize data so that we can use uniform bandwidth.
+    # Note that we will need to scale the resulting density by sigma to
+    # correct the area.
+    mu, sigma = mean(sample_points, axis=0), std(sample_points, axis=0)
+    data, points = (sample_points - mu)/sigma, (evaluation_points - mu)/sigma
+
+    #print("starting grid search for bandwidth over %d points"%n)
+    #from sklearn.grid_search import GridSearchCV
+    #from numpy import logspace
+    #params = {'bandwidth': logspace(-1, 1, 20)}
+    #fitter = GridSearchCV(KernelDensity(), params)
+    #fitter.fit(data)
+    #kde = fitter.best_estimator_
+    #print("best bandwidth: {0}".format(kde.bandwidth))
+    #import time; T0 = time.time()
+    kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth,
+                        rtol=1e-6, atol=1e-6)
+    #print("T:%6.3f   fitting"%(time.time()-T0))
+    kde.fit(data)
+    #print("T:%6.3f   estimating"%(time.time()-T0))
+    log_pdf = kde.score_samples(points)
+    #print("T:%6.3f   done"%(time.time()-T0))
+    return exp(log_pdf)/np.prod(sigma)  # undo the x scaling on the data points
+
+
+# scipy kde fails with singular matrix, so we will use scikit.learn
+#density = scipy_stats_density
+density = sklearn_density
+
+
+def entropy(points, logp, N_entropy=10000, N_norm=2500):
+    r"""
+    Return entropy estimate and uncertainty from a random sample.
+
+    *points* is a set of draws from an underlying distribution, as returned
+    by a Markov chain Monte Carlo process for example.
+
+    *logp* is the log-likelihood for each draw.
+
+    *N_norm* is the number of points $k$ to use to estimate the posterior
+    density normalization factor $P(D) = \hat N$, converting
+    from $\log( P(D|M) P(M) )$ to $\log( P(D|M)P(M)/P(D) )$. The relative
+    uncertainty $\Delta\hat S/\hat S$ scales with $\sqrt{k}$, with the
+    default *N_norm=2500* corresponding to 2% relative uncertainty.
+    Computation cost is $O(nk)$ where $n$ is number of points in the draw.
+
+    *N_entropy* is the number of points used to estimate the entropy
+    $\hat S = - \int P(M|D) \log P(M|D)$ from the normalized log likelihood
+    values.
+    """
+
+    # Use a random subset to estimate density
+    if N_norm >= len(logp):
+        norm_points = points
+    else:
+        idx = permutation(len(points))[:N_entropy]
+        norm_points = points[idx]
+
+    # Use a different subset to estimate the scale factor between density
+    # and logp.
+    if N_entropy >= len(logp):
+        entropy_points, eval_logp = points, logp
+    else:
+        idx = permutation(len(points))[:N_entropy]
+        entropy_points, eval_logp = points[idx], logp[idx]
+
+    """
+    # Try again, just using the points from the high probability regions
+    # to determine the scale factor
+    N_norm = min(len(logp), 5000)
+    N_entropy = int(0.8*N_norm)
+    idx = np.argsort(logp)
+    norm_points = points[idx[-N_norm:]]
+    entropy_points = points[idx[-N_entropy:]]
+    eval_logp = logp[idx[-N_entropy:]]
+    """
+
+    # Normalize p to a peak probability of 1 so that exp() doesn't underflow.
+    #
+    # This should be okay since for the normalizing constant C:
+    #
+    #      u' = e^(ln u + ln C) = e^(ln u)e^(ln C) = C u
+    #
+    # Using eq. 11 with u' substituted for u:
+    #
+    #      N_est = < u'/p > = < C u/p > = C < u/p >
+    #
+    #      S_est = - < ln q >
+    #            = - < ln (u'/N_est) >
+    #            = - < ln C + ln u - ln (C <u/p>) >
+    #            = - < ln u + ln C - ln C  - ln <u/p> >
+    #            = - < ln u - ln <u/p> >
+    #            = - < ln u > + ln <u/p>
+    #
+    # Uncertainty comes from eq. 13:
+    #
+    #      N_err^2 = 1/(k-1) sum( (u'/p - <u'/p>)^2 )
+    #              = 1/(k-1) sum( (C u/p - <C u/p>)^2 )
+    #              = C^2 std(u/p)^2
+    #      S_err = std(u'/p) / <u'/p> = (C std(u/p))/(C <u/p>) = std(u/p)/<u/p>
+    #
+    # So even though the constant C shows up in N_est, N_err, it cancels
+    # again when S_est, S_err is formed.
+    log_scale = max(eval_logp)
+    # print("max log sample: %g"%log_scale)
+    eval_logp -= log_scale
+
+    # Compute entropy and uncertainty in nats
+    rho = density(norm_points, entropy_points)
+    frac = exp(eval_logp)/rho
+    n_est, n_err = mean(frac), std(frac)
+    s_est = log(n_est) - mean(eval_logp)
+    s_err = n_err/n_est
+    #print(n_est, n_err, s_est/LN2, s_err/LN2)
+    ##print(np.median(frac), log(np.median(frac))/LN2, log(n_est)/LN2)
+    if False:
+        import pylab
+        idx = pylab.argsort(entropy_points[:,0])
+        pylab.figure()
+        pylab.subplot(221)
+        pylab.hist(points[:,0], bins=50, normed=True, log=True)
+        pylab.plot(entropy_points[idx,0], rho[idx], label='density')
+        pylab.plot(entropy_points[idx,0], exp(eval_logp+log_scale)[idx], label='p')
+        pylab.ylabel("p(x)")
+        pylab.legend()
+        pylab.subplot(222)
+        pylab.hist(points[:,0], bins=50, normed=True, log=False)
+        pylab.plot(entropy_points[idx,0], rho[idx], label='density')
+        pylab.plot(entropy_points[idx,0], exp(eval_logp+log_scale)[idx], label='p')
+        pylab.ylabel("p(x)")
+        pylab.legend()
+        pylab.subplot(212)
+        pylab.plot(entropy_points[idx,0], frac[idx], '.')
+        pylab.xlabel("P[0] value")
+        pylab.ylabel("p(x)/kernel density")
+
+    # return entropy and uncertainty in bits
+    return s_est/LN2, s_err/LN2
+
+
+class MVNEntropy(object):
+    """
+    Multivariate normal entropy approximation.
+
+    Uses Mardia's multivariate skewness and kurtosis test to estimate normality.
+
+    *x* is a set of points
+
+    *alpha* is the cutoff for the normality test.
+
+    *max_points* is the maximum number of points to use when computing the
+    entropy.  Since the normality test is $O(n^2)$ in memory and time,
+    where $n$ is the number of points, *max_points* defaults to 1000.
+
+    The returned object has the following attributes:
+
+        *p_kurtosis* is the p-value for the kurtosis normality test
+
+        *p_skewness* is the p-value for the skewness normality test
+
+        *reject_normal* is True if either the the kurtosis or the skew test
+        fails
+
+        *entropy* is the estimated entropy of the best normal approximation
+        to the distribution
+
+    """
+    def __init__(self, x, alpha=0.05, max_points=1000):
+        # compute Mardia test coefficient
+        n, p = x.shape   # num points, num dimensions
+        mu = np.mean(x, axis=0)
+        C = np.cov(x.T, bias=1) if p>1 else np.array([[np.var(x.T, ddof=1)]])
+        # squared Mahalanobis distance matrix
+        # Note: this forms a full n x n matrix of distances, so will
+        # fail for a large number of points.  Kurtosis only requires
+        # the diagonal elements so can be computed cheaply.  If there
+        # is no order to the points, skew could be estimated using only
+        # the block diagonal
+        dx = (x - mu[None,:])[:max_points]
+        D = np.dot(dx, np.linalg.solve(C, dx.T))
+        kurtosis = np.sum(np.diag(D)**2)/n
+        skewness = np.sum(D**3)/n**2
+
+        kurtosis_stat = (kurtosis - p*(p+2)) / sqrt(8*p*(p+2)/n)
+        raw_skewness_stat = n*skewness/6
+        # Small sample correction converges to 1 as n increases, so it is
+        # always safe to apply it
+        small_sample_correction = (p+1)*(n+1)*(n+3)/((p+1)*(n+1)*n - n*6)
+        skewness_stat = raw_skewness_stat * small_sample_correction
+        dof = (p*(p+1)*(p+2))/6   # degrees of freedom for chisq test
+
+        self.p_kurtosis = 2*(1 - norm.cdf(abs(kurtosis_stat)))
+        self.p_skewness = 1 - chi2.cdf(skewness_stat, dof)
+        self.reject_normal = self.p_kurtosis < alpha or self.p_skewness < alpha
+        #print("kurtosis", kurtosis, kurtosis_stat, self.p_kurtosis)
+        #print("skewness", skewness, skewness_stat, self.p_skewness)
+        # compute entropy
+        self.entropy = cov_entropy(C)
+
+    def __str__(self):
+        return "H=%.1f bits%s"%(self.entropy, " (not normal)" if self.reject_normal else "")
+
+def cov_entropy(C):
+    """
+    Entropy estimate from covariance matrix C
+    """
+    return 0.5 * (len(C) * log2(2*pi*e) + log2(abs(np.linalg.det(C))))
+
+def mvn_entropy_test():
+    # Test against results from the R MVN pacakge (using the web version)
+    # and the matlab Mskekur program (using Octave), both of which produce
+    # the same value.  Note that MVNEntropy uses the small sample correction
+    # for the skewness stat since it converges to the large sample value for
+    # large n.
+    x = np.array([
+        [2.4, 2.1, 2.4],
+        [4.5, 4.9, 5.7],
+        [3.5, 1.8, 3.9],
+        [3.9, 4.7, 4.7],
+        [6.7, 3.6, 5.9],
+        [4.0, 3.6, 2.9],
+        [5.3, 3.3, 6.1],
+        [5.7, 5.5, 6.2],
+        [5.2, 4.1, 6.4],
+        [2.4, 2.9, 3.2],
+        [3.2, 2.7, 4.0],
+        [2.7, 2.6, 4.1],
+    ])
+    M = MVNEntropy(x)
+    #print M
+    #print "%.15g %.15g %.15g"%(M.p_kurtosis, M.p_skewness, M.entropy)
+    assert abs(M.p_kurtosis - 0.265317890462476) <= 1e-10
+    assert abs(M.p_skewness - 0.773508066109368) <= 1e-10
+    assert abs(M.entropy - 5.7920040570988) <= 1e-10
+
+
+def _check_entropy(D, seed=1, N=10000, N_entropy=10000, N_norm=2500):
+    """
+    Check if entropy from a random draw matches analytic entropy.
+    """
+    state = np.random.get_state()
+    np.random.seed(seed)
+    try:
+        theta = D.rvs(size=N)
+        logp_theta = D.logpdf(theta)
+        logp_theta += 27  # result should be independent of scale factor
+        if getattr(D, 'dim', 1) == 1:
+            theta = theta.reshape(N, 1)
+        S, Serr = entropy(theta, logp_theta, N_entropy=N_entropy, N_norm=N_norm)
+    finally:
+        np.random.set_state(state)
+    #print "entropy", S, Serr, "target", D.entropy()/LN2
+    assert Serr < 0.05*S
+    assert abs(S - D.entropy()/LN2) < Serr
+
+def test():
+    """check entropy estimates from known distributions"""
+    from scipy import stats
+    _check_entropy(stats.norm(100,8), N=2000)
+    _check_entropy(stats.norm(100,8), N=12000)
+    _check_entropy(stats.multivariate_normal(cov=np.diag([1,12**2,0.2**2])))
+
+if __name__ == "__main__":  # pragma: no cover
+    test()
+    mvn_entropy_test()
diff --git a/bumps/dream/exppow.py b/bumps/dream/exppow.py
new file mode 100644
index 0000000..a59b9c7
--- /dev/null
+++ b/bumps/dream/exppow.py
@@ -0,0 +1,55 @@
+"""
+Exponential power density parameter calculator.
+"""
+
+from __future__ import division
+
+__all__ = ["exppow_pars"]
+
+from scipy.special import gamma
+from math import sqrt
+
+
+def exppow_pars(B):
+    r"""
+    Return w(B) and c(B) for the exponential power density:
+
+    .. math::
+
+        p(v|S,B) = \frac{w(B)}{S} \exp\left(-c(B) |v/S|^{2/(1+B)}\right)
+
+    *B* in (-1,1] is a measure of kurtosis::
+
+        B = 1: double exponential
+        B = 0: normal
+        B -> -1: uniform
+
+    [1] Thiemann, M., M. Trosser, H. Gupta, and S. Sorooshian (2001).
+    *Bayesian recursive parameter estimation for hydrologic models*,
+    Water Resour. Res. 37(10) 2521-2535.
+    """
+
+    # First calculate some dummy variables
+    A1 = gamma(3*(1+B)/2)
+    A2 = gamma((1+B)/2)
+    # And use these to derive Cb and Wb
+    cB = (A1/A2)**(1/(1+B))
+    wB = sqrt(A1/A2**3)/(1+B)
+
+    return cB, wB
+
+
+def test():
+    import math
+    cB, wB = exppow_pars(13)
+    assert abs(cB - 12.8587702619708) < 1e-13
+    assert abs(wB - 5766.80847609837) < 1e-11
+    # Check that beta=0 yields a normal distribution
+    cB, wB = exppow_pars(0)
+    assert abs(2*math.pi*wB**2 - 1) < 1e-14
+    assert abs(cB - 0.5) < 1e-14
+
+
+if __name__ == "__main__":
+    #print calc_CbWb(13)
+    test()
diff --git a/bumps/dream/formatnum.py b/bumps/dream/formatnum.py
new file mode 100644
index 0000000..d33d0d0
--- /dev/null
+++ b/bumps/dream/formatnum.py
@@ -0,0 +1,451 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Format values and uncertainties nicely for printing.
+
+The formatted value uses only the number of digits warranted by
+the uncertainty in the measurement.
+
+:func:`format_value` shows the value without the uncertainty.
+
+:func:`format_uncertainty_pm` shows the expanded format v +/- err.
+
+:func:`format_uncertainty_compact` shows the compact format v(##),
+where the number in parenthesis is the uncertainty in the last two digits of v.
+
+:func:`format_uncertainty` uses the compact format by default, but this
+can be changed to use the expanded +/- format by setting
+format_uncertainty.compact to False.  This is a global setting which should
+be considered a user preference.  Any library code that depends on a specific
+format style should use the corresponding formatting function.
+
+If the uncertainty is 0 or not otherwise provided, the simple
+%g floating point format option is used.
+
+Infinite and indefinite numbers are represented as inf and NaN.
+
+Example::
+
+    >>> v,dv = 757.2356,0.01032
+    >>> print(format_uncertainty_pm(v,dv))
+    757.236 +/- 0.010
+    >>> print(format_uncertainty_compact(v,dv))
+    757.236(10)
+    >>> print(format_uncertainty(v,dv))
+    757.236(10)
+    >>> format_uncertainty.compact = False
+    >>> print(format_uncertainty(v,dv))
+    757.236 +/- 0.010
+"""
+from __future__ import division
+import math
+
+from numpy import isinf, isnan, inf, NaN
+
+__all__ = ['format_value', 'format_uncertainty',
+           'format_uncertainty_compact', 'format_uncertainty_pm',
+           ]
+
+
+# Coordinating scales across a set of numbers is not supported.  For easy
+# comparison a set of numbers should be shown in the same scale.  One could
+# force this from the outside by adding scale parameter (either 10**n, n, or
+# a string representing the desired SI prefix) and having a separate routine
+# which computes the scale given a set of values.
+
+# Coordinating scales with units offers its own problems.  Again, the user
+# may want to force particular units.  This can be done by outside of the
+# formatting routines by scaling the numbers to the appropriate units then
+# forcing them to print with scale 10**0.  If this is a common operation,
+# however, it may want to happen inside.
+
+# The value e<n> is currently formatted into the number.  Alternatively this
+# scale factor could be returned so that the user can choose the appropriate
+# SI prefix when printing the units.  This gets tricky when talking about
+# composite units such as 2.3e-3 m**2 -> 2300 mm**2, and with volumes
+# such as 1 g/cm**3 -> 1 kg/L.
+
+
+def format_value(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return a string v which is
+    the value formatted with the appropriate number of digits.
+    """
+    return _format_uncertainty(value, uncertainty, compact=None)
+
+
+def format_uncertainty_pm(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return a string v +/- dv.
+    """
+    return _format_uncertainty(value, uncertainty, compact=False)
+
+
+def format_uncertainty_compact(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return the compact
+    representation v(##), where ## are the first two digits of
+    the uncertainty.
+    """
+    return _format_uncertainty(value, uncertainty, compact=True)
+
+
+def format_uncertainty(value, uncertainty):
+    """
+    Value and uncertainty formatter.
+
+    Either the expanded v +/- dv form or the compact v(##) form will be
+    used depending on whether *format_uncertainty.compact* is True or False.
+    The default is True.
+    """
+    return _format_uncertainty(value, uncertainty, format_uncertainty.compact)
+format_uncertainty.compact = True
+
+
+def _format_uncertainty(value, uncertainty, compact):
+    """
+    Implementation of both the compact and the +/- formats.
+    """
+    # Handle indefinite value
+    if isinf(value):
+        return "inf" if value > 0 else "-inf"
+    if isnan(value):
+        return "NaN"
+
+    # Handle indefinite uncertainty
+    if uncertainty is None or uncertainty <= 0 or isnan(uncertainty):
+        return "%g" % value
+    if isinf(uncertainty):
+        if compact is None:
+            return "%.2g" % value
+        elif compact:
+            return "%.2g(inf)" % value
+        else:
+            return "%.2g +/- inf" % value
+
+    # Handle zero and negative values
+    sign = "-" if value < 0 else ""
+    value = abs(value)
+
+    # Determine scale of value and error
+    err_place = int(math.floor(math.log10(uncertainty)))
+    if value == 0:
+        val_place = err_place - 1
+    else:
+        val_place = int(math.floor(math.log10(value)))
+
+    if err_place > val_place:
+        # Degenerate case: error bigger than value
+        # The mantissa is 0.#(##)e#, 0.0#(##)e# or 0.00#(##)e#
+        val_place = err_place + 2
+    elif err_place == val_place:
+        # Degenerate case: error and value the same order of magnitude
+        # The value is ##(##)e#, #.#(##)e# or 0.##(##)e#
+        val_place = err_place + 1
+    elif err_place <= 1 and val_place >= -3:
+        # Normal case: nice numbers and errors
+        # The value is ###.###(##)
+        val_place = 0
+    else:
+        # Extreme cases: zeros before value or after error
+        # The value is ###.###(##)e#, ##.####(##)e# or #.#####(##)e#
+        pass
+
+    # Force engineering notation, with exponent a multiple of 3
+    val_place = int(math.floor(val_place / 3.)) * 3
+
+    # Format the result
+    digits_after_decimal = abs(val_place - err_place + 1)
+    # Only use one digit of uncertainty if no precision included in result
+    #if compact is None: digits_after_decimal -= 1
+    val_str = "%.*f" % (digits_after_decimal, value / 10. ** val_place)
+    exp_str = "e%d" % val_place if val_place != 0 else ""
+    if compact is None:
+        result = "".join((sign, val_str, exp_str))
+    elif compact:
+        err_str = "(%2d)" % int(uncertainty / 10. ** (err_place - 1) + 0.5)
+        result = "".join((sign, val_str, err_str, exp_str))
+    else:
+        err_str = "%.*f" % (digits_after_decimal,
+                            uncertainty / 10. ** val_place)
+        result = "".join((sign, val_str, exp_str + " +/- ", err_str, exp_str))
+    # print sign,value, uncertainty, "=>", result
+    return result
+
+
+def test_compact():
+    # Oops... renamed function after writing tests
+    value_str = format_uncertainty_compact
+
+    # val_place > err_place
+    assert value_str(1235670, 766000) == "1.24(77)e6"
+    assert value_str(123567., 76600) == "124(77)e3"
+    assert value_str(12356.7, 7660) == "12.4(77)e3"
+    assert value_str(1235.67, 766) == "1.24(77)e3"
+    assert value_str(123.567, 76.6) == "124(77)"
+    assert value_str(12.3567, 7.66) == "12.4(77)"
+    assert value_str(1.23567, .766) == "1.24(77)"
+    assert value_str(.123567, .0766) == "0.124(77)"
+    assert value_str(.0123567, .00766) == "0.0124(77)"
+    assert value_str(.00123567, .000766) == "0.00124(77)"
+    assert value_str(.000123567, .0000766) == "124(77)e-6"
+    assert value_str(.0000123567, .00000766) == "12.4(77)e-6"
+    assert value_str(.00000123567, .000000766) == "1.24(77)e-6"
+    assert value_str(.000000123567, .0000000766) == "124(77)e-9"
+    assert value_str(.00000123567, .0000000766) == "1.236(77)e-6"
+    assert value_str(.0000123567, .0000000766) == "12.357(77)e-6"
+    assert value_str(.000123567, .0000000766) == "123.567(77)e-6"
+    assert value_str(.00123567, .000000766) == "0.00123567(77)"
+    assert value_str(.0123567, .00000766) == "0.0123567(77)"
+    assert value_str(.123567, .0000766) == "0.123567(77)"
+    assert value_str(1.23567, .000766) == "1.23567(77)"
+    assert value_str(12.3567, .00766) == "12.3567(77)"
+    assert value_str(123.567, .0764) == "123.567(76)"
+    assert value_str(1235.67, .764) == "1235.67(76)"
+    assert value_str(12356.7, 7.64) == "12356.7(76)"
+    assert value_str(123567, 76.4) == "123567(76)"
+    assert value_str(1235670, 764) == "1.23567(76)e6"
+    assert value_str(12356700, 764) == "12.35670(76)e6"
+    assert value_str(123567000, 764) == "123.56700(76)e6"
+    assert value_str(123567000, 7640) == "123.5670(76)e6"
+    assert value_str(1235670000, 76400) == "1.235670(76)e9"
+
+    # val_place == err_place
+    assert value_str(123567, 764000) == "0.12(76)e6"
+    assert value_str(12356.7, 76400) == "12(76)e3"
+    assert value_str(1235.67, 7640) == "1.2(76)e3"
+    assert value_str(123.567, 764) == "0.12(76)e3"
+    assert value_str(12.3567, 76.4) == "12(76)"
+    assert value_str(1.23567, 7.64) == "1.2(76)"
+    assert value_str(.123567, .764) == "0.12(76)"
+    assert value_str(.0123567, .0764) == "12(76)e-3"
+    assert value_str(.00123567, .00764) == "1.2(76)e-3"
+    assert value_str(.000123567, .000764) == "0.12(76)e-3"
+
+    # val_place == err_place-1
+    assert value_str(123567, 7640000) == "0.1(76)e6"
+    assert value_str(12356.7, 764000) == "0.01(76)e6"
+    assert value_str(1235.67, 76400) == "0.001(76)e6"
+    assert value_str(123.567, 7640) == "0.1(76)e3"
+    assert value_str(12.3567, 764) == "0.01(76)e3"
+    assert value_str(1.23567, 76.4) == "0.001(76)e3"
+    assert value_str(.123567, 7.64) == "0.1(76)"
+    assert value_str(.0123567, .764) == "0.01(76)"
+    assert value_str(.00123567, .0764) == "0.001(76)"
+    assert value_str(.000123567, .00764) == "0.1(76)e-3"
+
+    # val_place == err_place-2
+    assert value_str(12356700, 7640000000) == "0.0(76)e9"
+    assert value_str(1235670, 764000000) == "0.00(76)e9"
+    assert value_str(123567, 76400000) == "0.000(76)e9"
+    assert value_str(12356, 7640000) == "0.0(76)e6"
+    assert value_str(1235, 764000) == "0.00(76)e6"
+    assert value_str(123, 76400) == "0.000(76)e6"
+    assert value_str(12, 7640) == "0.0(76)e3"
+    assert value_str(1, 764) == "0.00(76)e3"
+    assert value_str(0.1, 76.4) == "0.000(76)e3"
+    assert value_str(0.01, 7.64) == "0.0(76)"
+    assert value_str(0.001, 0.764) == "0.00(76)"
+    assert value_str(0.0001, 0.0764) == "0.000(76)"
+    assert value_str(0.00001, 0.00764) == "0.0(76)e-3"
+
+    # val_place == err_place-3
+    assert value_str(12356700, 76400000000) == "0.000(76)e12"
+    assert value_str(1235670, 7640000000) == "0.0(76)e9"
+    assert value_str(123567, 764000000) == "0.00(76)e9"
+    assert value_str(12356, 76400000) == "0.000(76)e9"
+    assert value_str(1235, 7640000) == "0.0(76)e6"
+    assert value_str(123, 764000) == "0.00(76)e6"
+    assert value_str(12, 76400) == "0.000(76)e6"
+    assert value_str(1, 7640) == "0.0(76)e3"
+    assert value_str(0.1, 764) == "0.00(76)e3"
+    assert value_str(0.01, 76.4) == "0.000(76)e3"
+    assert value_str(0.001, 7.64) == "0.0(76)"
+    assert value_str(0.0001, 0.764) == "0.00(76)"
+    assert value_str(0.00001, 0.0764) == "0.000(76)"
+    assert value_str(0.000001, 0.00764) == "0.0(76)e-3"
+
+    # Zero values
+    assert value_str(0, 7640000) == "0.0(76)e6"
+    assert value_str(0, 764000) == "0.00(76)e6"
+    assert value_str(0,  76400) == "0.000(76)e6"
+    assert value_str(0,   7640) == "0.0(76)e3"
+    assert value_str(0,    764) == "0.00(76)e3"
+    assert value_str(0,     76.4) == "0.000(76)e3"
+    assert value_str(0,      7.64) == "0.0(76)"
+    assert value_str(0,      0.764) == "0.00(76)"
+    assert value_str(0,      0.0764) == "0.000(76)"
+    assert value_str(0,      0.00764) == "0.0(76)e-3"
+    assert value_str(0,      0.000764) == "0.00(76)e-3"
+    assert value_str(0,      0.0000764) == "0.000(76)e-3"
+
+    # negative values
+    assert value_str(-1235670, 765000) == "-1.24(77)e6"
+    assert value_str(-1.23567, .766) == "-1.24(77)"
+    assert value_str(-.00000123567, .0000000766) == "-1.236(77)e-6"
+    assert value_str(-12356.7, 7.64) == "-12356.7(76)"
+    assert value_str(-123.567, 764) == "-0.12(76)e3"
+    assert value_str(-1235.67, 76400) == "-0.001(76)e6"
+    assert value_str(-.000123567, .00764) == "-0.1(76)e-3"
+    assert value_str(-12356, 7640000) == "-0.0(76)e6"
+    assert value_str(-12, 76400) == "-0.000(76)e6"
+    assert value_str(-0.0001, 0.764) == "-0.00(76)"
+
+    # non-finite values
+    assert value_str(-inf, None) == "-inf"
+    assert value_str(inf, None) == "inf"
+    assert value_str(NaN, None) == "NaN"
+
+    # bad or missing uncertainty
+    assert value_str(-1.23567, NaN) == "-1.23567"
+    assert value_str(-1.23567, -inf) == "-1.23567"
+    assert value_str(-1.23567, -0.1) == "-1.23567"
+    assert value_str(-1.23567, 0) == "-1.23567"
+    assert value_str(-1.23567, None) == "-1.23567"
+    assert value_str(-1.23567, inf) == "-1.2(inf)"
+
+
+def test_pm():
+    # Oops... renamed function after writing tests
+    value_str = format_uncertainty_pm
+
+    # val_place > err_place
+    assert value_str(1235670, 766000) == "1.24e6 +/- 0.77e6"
+    assert value_str(123567., 76600) == "124e3 +/- 77e3"
+    assert value_str(12356.7,  7660) == "12.4e3 +/- 7.7e3"
+    assert value_str(1235.67,   766) == "1.24e3 +/- 0.77e3"
+    assert value_str(123.567,    76.6) == "124 +/- 77"
+    assert value_str(12.3567,     7.66) == "12.4 +/- 7.7"
+    assert value_str(1.23567,      .766) == "1.24 +/- 0.77"
+    assert value_str(.123567,      .0766) == "0.124 +/- 0.077"
+    assert value_str(.0123567,     .00766) == "0.0124 +/- 0.0077"
+    assert value_str(.00123567,    .000766) == "0.00124 +/- 0.00077"
+    assert value_str(.000123567,   .0000766) == "124e-6 +/- 77e-6"
+    assert value_str(.0000123567,  .00000766) == "12.4e-6 +/- 7.7e-6"
+    assert value_str(.00000123567, .000000766) == "1.24e-6 +/- 0.77e-6"
+    assert value_str(.000000123567, .0000000766) == "124e-9 +/- 77e-9"
+    assert value_str(.00000123567, .0000000766) == "1.236e-6 +/- 0.077e-6"
+    assert value_str(.0000123567,  .0000000766) == "12.357e-6 +/- 0.077e-6"
+    assert value_str(.000123567,   .0000000766) == "123.567e-6 +/- 0.077e-6"
+    assert value_str(.00123567,    .000000766) == "0.00123567 +/- 0.00000077"
+    assert value_str(.0123567,     .00000766) == "0.0123567 +/- 0.0000077"
+    assert value_str(.123567,      .0000766) == "0.123567 +/- 0.000077"
+    assert value_str(1.23567,      .000766) == "1.23567 +/- 0.00077"
+    assert value_str(12.3567,      .00766) == "12.3567 +/- 0.0077"
+    assert value_str(123.567,      .0764) == "123.567 +/- 0.076"
+    assert value_str(1235.67,      .764) == "1235.67 +/- 0.76"
+    assert value_str(12356.7,     7.64) == "12356.7 +/- 7.6"
+    assert value_str(123567,     76.4) == "123567 +/- 76"
+    assert value_str(1235670,   764) == "1.23567e6 +/- 0.00076e6"
+    assert value_str(12356700,  764) == "12.35670e6 +/- 0.00076e6"
+    assert value_str(123567000, 764) == "123.56700e6 +/- 0.00076e6"
+    assert value_str(123567000, 7640) == "123.5670e6 +/- 0.0076e6"
+    assert value_str(1235670000, 76400) == "1.235670e9 +/- 0.000076e9"
+
+    # val_place == err_place
+    assert value_str(123567, 764000) == "0.12e6 +/- 0.76e6"
+    assert value_str(12356.7, 76400) == "12e3 +/- 76e3"
+    assert value_str(1235.67, 7640) == "1.2e3 +/- 7.6e3"
+    assert value_str(123.567, 764) == "0.12e3 +/- 0.76e3"
+    assert value_str(12.3567, 76.4) == "12 +/- 76"
+    assert value_str(1.23567, 7.64) == "1.2 +/- 7.6"
+    assert value_str(.123567, .764) == "0.12 +/- 0.76"
+    assert value_str(.0123567, .0764) == "12e-3 +/- 76e-3"
+    assert value_str(.00123567, .00764) == "1.2e-3 +/- 7.6e-3"
+    assert value_str(.000123567, .000764) == "0.12e-3 +/- 0.76e-3"
+
+    # val_place == err_place-1
+    assert value_str(123567, 7640000) == "0.1e6 +/- 7.6e6"
+    assert value_str(12356.7, 764000) == "0.01e6 +/- 0.76e6"
+    assert value_str(1235.67, 76400) == "0.001e6 +/- 0.076e6"
+    assert value_str(123.567, 7640) == "0.1e3 +/- 7.6e3"
+    assert value_str(12.3567, 764) == "0.01e3 +/- 0.76e3"
+    assert value_str(1.23567, 76.4) == "0.001e3 +/- 0.076e3"
+    assert value_str(.123567, 7.64) == "0.1 +/- 7.6"
+    assert value_str(.0123567, .764) == "0.01 +/- 0.76"
+    assert value_str(.00123567, .0764) == "0.001 +/- 0.076"
+    assert value_str(.000123567, .00764) == "0.1e-3 +/- 7.6e-3"
+
+    # val_place == err_place-2
+    assert value_str(12356700, 7640000000) == "0.0e9 +/- 7.6e9"
+    assert value_str(1235670, 764000000) == "0.00e9 +/- 0.76e9"
+    assert value_str(123567, 76400000) == "0.000e9 +/- 0.076e9"
+    assert value_str(12356, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(1235, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(123, 76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(12, 7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(1, 764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0.1, 76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0.01, 7.64) == "0.0 +/- 7.6"
+    assert value_str(0.001, 0.764) == "0.00 +/- 0.76"
+    assert value_str(0.0001, 0.0764) == "0.000 +/- 0.076"
+    assert value_str(0.00001, 0.00764) == "0.0e-3 +/- 7.6e-3"
+
+    # val_place == err_place-3
+    assert value_str(12356700, 76400000000) == "0.000e12 +/- 0.076e12"
+    assert value_str(1235670, 7640000000) == "0.0e9 +/- 7.6e9"
+    assert value_str(123567, 764000000) == "0.00e9 +/- 0.76e9"
+    assert value_str(12356, 76400000) == "0.000e9 +/- 0.076e9"
+    assert value_str(1235, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(123, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(12, 76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(1, 7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(0.1, 764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0.01, 76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0.001, 7.64) == "0.0 +/- 7.6"
+    assert value_str(0.0001, 0.764) == "0.00 +/- 0.76"
+    assert value_str(0.00001, 0.0764) == "0.000 +/- 0.076"
+    assert value_str(0.000001, 0.00764) == "0.0e-3 +/- 7.6e-3"
+
+    # Zero values
+    assert value_str(0, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(0, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(0,  76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(0,   7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(0,    764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0,     76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0,      7.64) == "0.0 +/- 7.6"
+    assert value_str(0,      0.764) == "0.00 +/- 0.76"
+    assert value_str(0,      0.0764) == "0.000 +/- 0.076"
+    assert value_str(0,      0.00764) == "0.0e-3 +/- 7.6e-3"
+    assert value_str(0,      0.000764) == "0.00e-3 +/- 0.76e-3"
+    assert value_str(0,      0.0000764) == "0.000e-3 +/- 0.076e-3"
+
+    # negative values
+    assert value_str(-1235670, 766000) == "-1.24e6 +/- 0.77e6"
+    assert value_str(-1.23567, .766) == "-1.24 +/- 0.77"
+    assert value_str(-.00000123567, .0000000766) == "-1.236e-6 +/- 0.077e-6"
+    assert value_str(-12356.7, 7.64) == "-12356.7 +/- 7.6"
+    assert value_str(-123.567, 764) == "-0.12e3 +/- 0.76e3"
+    assert value_str(-1235.67, 76400) == "-0.001e6 +/- 0.076e6"
+    assert value_str(-.000123567, .00764) == "-0.1e-3 +/- 7.6e-3"
+    assert value_str(-12356, 7640000) == "-0.0e6 +/- 7.6e6"
+    assert value_str(-12, 76400) == "-0.000e6 +/- 0.076e6"
+    assert value_str(-0.0001, 0.764) == "-0.00 +/- 0.76"
+
+    # non-finite values
+    assert value_str(-inf, None) == "-inf"
+    assert value_str(inf, None) == "inf"
+    assert value_str(NaN, None) == "NaN"
+
+    # bad or missing uncertainty
+    assert value_str(-1.23567, NaN) == "-1.23567"
+    assert value_str(-1.23567, -inf) == "-1.23567"
+    assert value_str(-1.23567, -0.1) == "-1.23567"
+    assert value_str(-1.23567, 0) == "-1.23567"
+    assert value_str(-1.23567, None) == "-1.23567"
+    assert value_str(-1.23567, inf) == "-1.2 +/- inf"
+
+
+def test():
+    # Check compact and plus/minus formats
+    test_compact()
+    test_pm()
+    # Check that the default is the compact format
+    assert format_uncertainty(-1.23567, 0.766) == "-1.24(77)"
+
+    import doctest
+    doctest.testmod()
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/gelman.py b/bumps/dream/gelman.py
new file mode 100644
index 0000000..984a978
--- /dev/null
+++ b/bumps/dream/gelman.py
@@ -0,0 +1,81 @@
+
+"""
+Convergence test statistic from Gelman and Rubin, 1992.
+"""
+
+from __future__ import division
+
+__all__ = ["gelman"]
+
+from numpy import var, mean, ones, sqrt
+
+
+def gelman(sequences, portion=0.5):
+    """
+    Calculates the R-statistic convergence diagnostic
+
+    For more information please refer to: Gelman, A. and D.R. Rubin, 1992.
+    Inference from Iterative Simulation Using Multiple Sequences,
+    Statistical Science, Volume 7, Issue 4, 457-472.
+    doi:10.1214/ss/1177011136
+    """
+
+    # Find the size of the sample
+    chain_len, nchains, nvar = sequences.shape
+    #print sequences[:20, 0, 0]
+
+    # Only use the last portion of the sample
+    chain_len = int(chain_len*portion)
+    sequences = sequences[-chain_len:]
+
+    if chain_len < 2:
+        # Set the R-statistic to a large value
+        r_stat = -2 * ones(nvar)
+    else:
+        # Step 1: Determine the sequence means
+        mean_seq = mean(sequences, axis=0)
+
+        # Step 1: Determine the variance between the sequence means
+        b = chain_len * var(mean_seq, axis=0, ddof=1)
+
+        # Step 2: Compute the variance of the various sequences
+        var_seq = var(sequences, axis=0, ddof=1)
+
+        # Step 2: Calculate the average of the within sequence variances
+        w = mean(var_seq, axis=0)
+
+        # Step 3: Estimate the target mean
+        #mu = mean(mean_seq)
+
+        # Step 4: Estimate the target variance (Eq. 3)
+        sigma2 = ((chain_len - 1)/chain_len) * w + (1/chain_len) * b
+
+        # Step 5: Compute the R-statistic
+        r_stat = sqrt((nchains + 1)/nchains * sigma2 / w
+                      - (chain_len-1)/nchains/chain_len)
+        #par=2
+        #print chain_len,b[par],var_seq[...,par],w[par],r_stat[par]
+
+    return r_stat
+
+
+def test():
+    from numpy import reshape, arange, transpose
+    from numpy.linalg import norm
+    # Targe values computed from octave:
+    #    format long
+    #    s = reshape([1:15*6*7],[15,6,7]);
+    #    r = gelman(s,struct('n',6,'seq',7))
+    s = reshape(arange(1.0, 15*6*7+1)**-2, (15, 6, 7), order='F')
+    s = transpose(s, [0, 2, 1])
+    target = [1.06169861367116, 2.75325774624905, 4.46256647696399,
+              6.12792266170178, 7.74538715553575, 9.31276519155232]
+    r = gelman(s, portion=1)
+    #print r
+    #print "target", array(target), "\nactual", r
+    assert norm(r-target) < 1e-14
+    r = gelman(s, portion=0.1)
+    assert norm(r - [-2, -2, -2, -2, -2, -2]) == 0
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/geweke.py b/bumps/dream/geweke.py
new file mode 100644
index 0000000..1965d60
--- /dev/null
+++ b/bumps/dream/geweke.py
@@ -0,0 +1,63 @@
+"""
+Convergence test statistic from Gelman and Rubin, 1992.
+"""
+
+from __future__ import division
+
+__all__ = ["geweke"]
+
+from numpy import var, mean, ones, sqrt, reshape, log10, abs
+
+
+def geweke(sequences, portion=0.25):
+    """
+    Calculates the Geweke convergence diagnostic
+
+    Refer to:
+
+        pymc-devs.github.com/pymc/modelchecking.html#informal-methods
+        support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introbayes_sect008.html
+    
+    """
+
+    # Find the size of the sample
+    chain_len, nchains, nvar = sequences.shape
+    z_stat = -2*ones(nvar)
+    if chain_len >= 2:
+        # Only use the last portion of the sample
+        try:
+            front_portion, back_portion = portion
+        except TypeError:
+            front_portion = back_portion = portion
+        front_len = int(chain_len*front_portion)
+        back_len = int(chain_len*back_portion)
+        #print "STARTING SHAPE", sequences.shape
+        seq1 = reshape(sequences[:front_len, :, :], (front_len*nchains, nvar))
+        seq2 = reshape(sequences[-back_len:, :, :], (back_len*nchains, nvar))
+        #print "SEQ1", seq1.shape, 'SEQ2', seq2.shape
+        # Step 1: Determine the sequence means
+        meanseq1 = mean(seq1, axis=0)
+        meanseq2 = mean(seq2, axis=0)
+        #print "SHAPEs", meanseq1.shape, meanseq2.shape
+        var1 = var(seq1, axis=0)
+        var2 = var(seq2, axis=0)
+        denom = sqrt(var1+var2)
+        z_stat[denom>0] = (meanseq1 - meanseq2)[denom>0]/denom[denom>0]
+        
+        # z_stat is now the Z score for every chain and parameter
+        # in that with shape (chains, vars)
+        
+        # To make it easier to look at, return the average for the vars.
+        if 0:
+            avg_z = mean(z_stat, axis=0)
+            lavg_z = log10(abs(avg_z))
+            return lavg_z.tolist()
+        if 0:
+            avg_z = z_stat
+            lavg_z = log10(abs(avg_z))
+            return lavg_z.flatten().tolist()
+        else:
+            return z_stat.flatten().tolist()
+
+    # TODO: code is wrong if chain length is 1, since lavg_z is not defined
+    return lavg_z.tolist()
diff --git a/bumps/dream/initpop.py b/bumps/dream/initpop.py
new file mode 100644
index 0000000..d2c0165
--- /dev/null
+++ b/bumps/dream/initpop.py
@@ -0,0 +1,98 @@
+"""
+Population initialization routines.
+
+To start the analysis an initial population is required.  This will be
+an array of size M x N, where M is the number of dimensions in the fitting
+problem and N is the number of Markov chains.
+
+Two functions are provided:
+
+1. lhs_init(N, bounds) returns a latin hypercube sampling, which tests every
+parameter at each of N levels.
+
+2. cov_init(N, x, cov) returns a Gaussian sample along the ellipse
+defined by the covariance matrix, cov.  Covariance defaults to
+diag(dx) if dx is provided as a parameter, or to I if it is not.
+
+Additional options are random box: rand(M, N) or random scatter: randn(M, N).
+"""
+
+from __future__ import division, print_function
+
+__all__ = ['lhs_init', 'cov_init']
+
+from numpy import eye, diag, asarray, empty
+from . import util
+
+
+def lhs_init(N, bounds):
+    """
+    Latin Hypercube Sampling
+
+    Returns an array whose columns each have *N* samples from equally spaced
+    bins between *bounds=(xmin, xmax)* for the column.  DREAM bounds
+    objects, with bounds.low and bounds.high can be used as well.
+
+    Note: Indefinite ranges are not supported.
+    """
+    try:
+        xmin, xmax = bounds.low, bounds.high
+    except AttributeError:
+        xmin, xmax = bounds
+
+    # Define the size of xmin
+    nvar = len(xmin)
+    # Initialize array ran with random numbers
+    ran = util.rng.rand(N, nvar)
+
+    # Initialize array s with zeros
+    s = empty((N, nvar))
+
+    # Now fill s
+    for j in range(nvar):
+        # Random permutation
+        idx = util.rng.permutation(N)+1
+        p = (idx-ran[:, j])/N
+        s[:, j] = xmin[j] + p*(xmax[j]-xmin[j])
+
+    return s
+
+
+def cov_init(N, x, cov=None, dx=None):
+    """
+    Initialize *N* sets of random variables from a gaussian model.
+
+    The center is at *x* with an uncertainty ellipse specified by the
+    1-sigma independent uncertainty values *dx* or the full covariance
+    matrix uncertainty *cov*.
+
+    For example, create an initial population for 20 sequences for a
+    model with local minimum x with covariance matrix C::
+
+        pop = cov_init(cov=C, x=x, N=20)
+    """
+    #return mean + dot(util.rng.randn(N, len(mean)), chol(cov))
+    if cov is None and dx is None:
+        cov = eye(len(x))
+    elif cov is None:
+        cov = diag(asarray(dx)**2)
+    return util.rng.multivariate_normal(mean=x, cov=cov, size=N)
+
+
+def demo():
+    from numpy import arange
+    print("Three ways of calling cov_init:")
+    print("with cov", cov_init(N=4, x=[5, 6], cov=diag([0.1, 0.001])))
+    print("with dx", cov_init(N=4, x=[5, 6], dx=[0.1, 0.001]))
+    print("with nothing", cov_init(N=4, x=[5, 6]))
+    print("""
+The following array should have four columns.  Column 1 should have the
+numbers from 10 to 19, column 2 from 20 to 29, etc.  The columns are in
+random order with a random fractional part.
+""")
+    pop = lhs_init(N=10, bounds=(arange(1, 5), arange(2, 6)))*10
+    print(pop)
+
+
+if __name__ == "__main__":
+    demo()
diff --git a/bumps/dream/ksmirnov.py b/bumps/dream/ksmirnov.py
new file mode 100644
index 0000000..29d932f
--- /dev/null
+++ b/bumps/dream/ksmirnov.py
@@ -0,0 +1,35 @@
+"""
+Kolmogorov-Smirnov test for MCMC convergence.
+
+Use the K-S tests to compare the distribution of values at the front of
+the chain to that at the end of the chain.  If the distributions are
+significantly different, then the MCMC chain has not converged.
+"""
+
+__all__ = ["ksmirnov"]
+
+from numpy import reshape, apply_along_axis
+from scipy.stats import ks_2samp
+
+
+def ksmirnov(seq, portion=0.25, filter_order=15):
+    """
+    Kolmogorov-Smirnov test of similarity between the empirical distribution
+    at the start and at the end of the chain.  Apply a median
+    filter (filter=15) on neighbouring K-S values to reduce variation in
+    the test statistic value.
+    """
+    chlen, nchains, nvars = seq.shape
+    count = portion*chlen*nchains
+    n = filter_order
+    ks, p = apply_along_axis(lambda chain: _ksm(chain, n, count),
+                             0, reshape(seq, (chlen*nchains, nvars)))
+    return ks, p
+
+
+def _ksm(chain, n, count):
+    #return ks_2samp(chain[:count], chain[-count:])
+    ks, p = zip(*[ks_2samp(chain[i:count+i], chain[-count-n+i:-n+i])
+                  for i in range(n)])
+    return sorted(ks)[(n-1)//2], sorted(p)[(n-1)//2]
+
diff --git a/bumps/dream/mahal.py b/bumps/dream/mahal.py
new file mode 100644
index 0000000..0220b9d
--- /dev/null
+++ b/bumps/dream/mahal.py
@@ -0,0 +1,52 @@
+# This program is public domain
+# Author: Paul Kienzle, April 2010
+"""
+Mahalanobis distance calculator
+
+Compute the
+`Mahalanobis distance <https://en.wikipedia.org/wiki/Mahalanobis_distance>`_
+between observations and a reference set.  The principle components of the
+reference set define the basis of the space for the observations.  The simple
+Euclidean distance is used within this space.
+"""
+
+__all__ = ["mahalanobis"]
+
+from numpy import dot, mean, sum
+from numpy.linalg import svd
+
+def mahalanobis(Y, X):
+    """
+    Returns the distances of the observations from a reference set.
+
+    Observations are stored in rows *Y* and the reference set in *X*.
+    """
+
+    M = mean(X, axis=0)                 # mean
+    Xc = X - mean(X, axis=0)            # center the reference
+    W = dot(Xc.T, Xc)/(Xc.shape[0] - 1) # covariance of reference
+    Yc = Y - M                          # center the observations
+    # Distance is diag(Yc * inv(W) * Yc.H)
+    # solve Wb = Yc.H using singular value decomposition because it is
+    # the most accurate with numpy; QR decomposition does not use pivoting,
+    # and is less accurate.  The built-in solve routine is between the two.
+    u, s, vh = svd(W, 0)
+    SVinv = vh.T.conj()/s
+    Uy = dot(u.T.conj(), Yc.T.conj())
+    b = dot(SVinv, Uy)
+
+    D = sum(Yc.T * b, axis=0)  # compute distance
+    return D
+
+
+def test():
+    from numpy import array
+    from numpy.linalg import norm
+
+    d = mahalanobis(array([[2, 3, 4], [2, 3, 4]]),
+                    array([[1, 0, 0], [2, 1, 0], [1, 1, 0], [2, 0, 1]]))
+    assert norm(d-[290.25, 290.25]) < 1e-12, "diff=%s" % str(d-[290.25, 290.25])
+
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/matlab.py b/bumps/dream/matlab.py
new file mode 100644
index 0000000..c4bff0f
--- /dev/null
+++ b/bumps/dream/matlab.py
@@ -0,0 +1,270 @@
+"""
+Interface compatible with matlab dream.
+
+Usage
+-----
+
+This interface is identical to dream in matlab::
+
+    [Sequences, Reduced_Seq, X, output, hist_logp] = \
+      dream(MCMCPar, ParRange, Measurement, ModelName, Extra, option)
+
+With care, you will be able to use the same model definition file
+on both platforms, with only minor edits required to switch between them.
+Clearly, you won't be able to use if statements and loops since the
+syntax for python and matlab are incompatible.  Similarly, you will
+have to be careful about indentation and line breaks.  And create your
+model without comments.
+
+Python requires that structures be defined before you assign values to
+their fields, so you will also need the following lines.  Note however,
+that they are safe to use in matlab as well::
+
+    MCMCPar = struct()
+    Extra = struct()
+    Measurement = struct()
+    ParRange = struct()
+
+
+Another challenge is *ModelName*.  In matlab this is the name of the
+m-file that contains the model definition.  Following that convention,
+we will try to using "from <ModelName> import <ModelName>", and if this
+fails, assume that *ModelName* is actually the function itself.  For
+this to work you will need to translate the function in ModelName.m
+to the equivalent function in ModelName.py.
+
+*option* is the same option number as before
+
+IPython usage
+-------------
+
+Within ipython you can interact with your models something like
+you do in matlab.  For example::
+
+    $ ipython -pylab
+    In [1]: from dream.matlab import *
+    In [2]: from dream import *
+    In [3]: %run example.m
+
+
+You can now use various dream visualization tools or use the matlab-like
+plotting functions from pylab::
+
+    In [4]: out.state.save('modeloutput')
+    In [5]: out.state.plot_state()
+
+Command line usage
+------------------
+
+You can also run a suitable m-file example from the command line.  This will
+place you at an ipython command line with all the variables from your
+m-file available.  For example::
+
+    python -m dream.matlab example.m
+    In [1]: out.state.save('modeloutput')
+    In [2]: out.state.plot_state()
+
+Script usage
+------------
+
+You can create a driver script which calls the m-file example and
+uses pylab commands to plot the results.  For example::
+
+    -- example.py --
+    #!/usr/bin/env python
+    from pylab import *
+    from dream.matlab import *
+    execfile('example.m')
+
+    from dream import *
+    out.state.save('modeloutput')
+    plot_state(out.state)
+
+This can be run from the command prompt::
+
+    $ python example.py
+
+"""
+
+__all__ = ['struct', 'dream', 'setup', 'convert_output']
+
+import numpy as np
+
+from .core import Dream
+from .model import Density, LogDensity, Simulation
+from .initpop import cov_init, lhs_init
+from .crossover import Crossover, AdaptiveCrossover
+
+
+class struct:
+    """
+    Matlab compatible structure creation.
+    """
+    def __init__(self, *pairs, **kw):
+        for k, v in zip(pairs[::2], pairs[1::2]):
+            setattr(self, k, v)
+        for k, v in kw.items():
+            setattr(self, k, v)
+
+    def __getattr__(self, k):
+        return None
+
+
+def dream(MCMCPar, ParRange, Measurement, ModelName, Extra, option):
+    """
+    Emulate the matlab dream call.
+    """
+    dreamer = setup(MCMCPar, ParRange, Measurement, ModelName, Extra, option)
+    dreamer.sample()
+    return convert_state(dreamer.state)
+
+
+def setup(MCMCPar, ParRange, Measurement, ModelName, Extra, option):
+    """
+    Convert matlab dream models to a python Dream object.
+    """
+    dreamer = Dream()
+
+    # Problem specification
+    bounds = ParRange.minn, ParRange.maxn
+    dreamer.bounds_style = Extra.BoundHandling
+    if ModelName == 'Banshp':
+        # specific properties of the banana function
+        # Extra.imat is computed from cmat
+        # MCMCPar.n is implicit in Extra.cmat
+        f = Banana(mu=Extra.mu.flatten(), bpar=Extra.bpar, cmat=Extra.cmat)
+        option = 4
+    else:
+        try:
+            f = None  # keep lint happy
+            # Try matlab style of having the function in the same named file.
+            exec("from "+ModelName+" import "+ModelName+" as f")
+        except ImportError:
+            # The import failed; hope the caller supplied a function instead.
+            f = ModelName
+
+    if option == 1:
+        model = Density(f, bounds=bounds)
+    elif option == 4:
+        model = LogDensity(f, bounds=bounds)
+    elif option in [2, 3, 5]:
+        # Measurement.N is implicit in Measurement.MeasData
+        model = Simulation(f, data=Measurement.MeasData, bounds=bounds,
+                           sigma=Measurement.Sigma, gamma=MCMCPar.Gamma)
+    else:
+        raise ValueError("option should be in 1 to 5")
+    dreamer.model = model
+
+    # Sampling parameters
+    if Extra.save_in_memory == 'Yes':
+        thinning = 1
+    elif Extra.reduced_sample_collection == 'Yes':
+        thinning = Extra.T
+    else:
+        thinning = 1
+    dreamer.thinning = thinning
+    dreamer.draws = MCMCPar.ndraw
+
+    # Outlier detection
+    T = MCMCPar.outlierTest
+    if T.endswith('_test'):
+        T = T[:-5]
+    dreamer.outlier_test = T
+
+    # DE parameters
+    dreamer.DE_steps = MCMCPar.steps
+    dreamer.DE_pairs = MCMCPar.DEpairs
+    dreamer.DE_eps = MCMCPar.eps
+
+    # Initial population
+    if Extra.InitPopulation == 'COV_BASED':
+        pop = cov_init(N=MCMCPar.seq, x=Extra.muX.flatten(), cov=Extra.qcov)
+    elif Extra.InitPopulation == 'LHS_BASED':
+        pop = lhs_init(N=MCMCPar.seq, bounds=(ParRange.minn, ParRange.maxn))
+    else:
+        raise ValueError("Extra.InitPopulation must be COV_BASED or LHS_BASED")
+    dreamer.population = pop
+
+    # Crossover parameters
+    if Extra.pCR == 'Update':
+        CR = AdaptiveCrossover(MCMCPar.nCR)
+    else:
+        CR = Crossover(1./MCMCPar.nCR)
+    dreamer.CR = CR
+
+    # Delayed rejection parameters
+    dreamer.use_delayed_rejection = (Extra.DR == 'Yes')
+    dreamer.DR_scale = Extra.DRscale
+
+    return dreamer
+
+
+def convert_state(state):
+    """
+    Convert a completed dreamer run into a form compatible with the
+    matlab dream interface::
+
+        Sequences, Reduced_Seq, X, out, hist_logp
+
+    The original state is stored in out.state
+    """
+
+    _, points, logp = state.sample()
+    logp = logp[:, :, None]
+    Sequences = np.concatenate((points, np.exp(logp), logp), axis=2)
+    X = Sequences[-1, :, :]
+
+    draws, logp = state.logp()
+    hist_logp = np.concatenate((draws[:, None], logp), axis=1)
+
+    out = struct()
+    draws, R = state.R_stat()
+    out.R_stat = np.concatenate((draws[:, None], R), axis=1)
+    draws, AR = state.acceptance_rate()
+    out.AR = np.concatenate((draws[:, None], AR[:, None]), axis=1)
+    draws, w = state.CR_weight()
+    out.CR = np.concatenate((draws[:, None], w), axis=1)
+    out.outlier = state.outliers()[:, :2]
+
+    # save the dreamer state data structure  as well
+    out.state = state
+
+    return Sequences, Sequences, X, out, hist_logp
+
+
+def run_script(filename):
+    exec(compile(open(filename).read(), filename, 'exec'))
+
+
+class Banana:
+    """
+    Banana shaped function.
+
+    Note that this is not one of the N dimensional Rosenbrock variants
+    documented on wikipedia as it only operates "banana-like" in
+    the x0-x1 plane.
+    """
+    def __init__(self, mu, bpar, cmat):
+        self.mu, self.bpar, self.cmat = mu, bpar, cmat
+        self.imat = np.linalg.inv(cmat)
+
+    def __call__(self, x):
+        x = x+0 # make a copy
+        x[1] += self.bpar*(x[0]**2 - 100)
+        ret = -0.5*np.dot(np.dot(x[None, :], self.imat), x[:, None])
+        return ret[0, 0]
+
+
+def main():
+    import sys
+    if len(sys.argv) == 2:
+        import pylab
+        run_script(sys.argv[1])
+        user_ns = pylab.__dict__.copy().update(locals())
+        import IPython
+        IPython.Shell.IPShell(user_ns=user_ns).mainloop()
+    else:
+        print("usage: python -m dream.matlab model.m")
+
+if __name__ == "__main__":
+    main()
\ No newline at end of file
diff --git a/bumps/dream/metropolis.py b/bumps/dream/metropolis.py
new file mode 100644
index 0000000..5a7c618
--- /dev/null
+++ b/bumps/dream/metropolis.py
@@ -0,0 +1,92 @@
+"""
+MCMC step acceptance test.
+"""
+from __future__ import with_statement
+
+__all__ = ["metropolis", "metropolis_dr"]
+
+from numpy import exp, sqrt, minimum, where, cov, eye, array, dot, errstate
+from numpy.linalg import norm, cholesky, inv
+from . import util
+
+
+def paccept(logp_old, logp_try):
+    """
+    Returns the probability of taking a metropolis step given two
+    log density values.
+    """
+    return exp(minimum(logp_try-logp_old, 0))
+
+
+def metropolis(xtry, logp_try, xold, logp_old, step_alpha):
+    """
+    Metropolis rule for acceptance or rejection
+
+    Generates the next generation, *newgen* from::
+
+        x_new[k] = x[k]     if U > alpha
+                 = x_old[k] if U <= alpha
+
+    where alpha is p/p_old and accept is U > alpha.
+
+    Returns x_new, logp_new, alpha, accept
+    """
+    with errstate(under='ignore'):
+        alpha = paccept(logp_try=logp_try, logp_old=logp_old)
+        alpha *= step_alpha
+    accept = alpha > util.rng.rand(*alpha.shape)
+    logp_new = where(accept, logp_try, logp_old)
+    ## The following only works for vectors:
+    # xnew = where(accept, xtry, xold)
+    xnew = xtry+0
+    for i, a in enumerate(accept):
+        if not a:
+            xnew[i] = xold[i]
+
+    return xnew, logp_new, alpha, accept
+
+
+def dr_step(x, scale):
+    """
+    Delayed rejection step.
+    """
+
+    # Compute the Cholesky Decomposition of X
+    nchains, npars = x.shape
+    r = (2.38/sqrt(npars)) * cholesky(cov(x.T) + 1e-5*eye(npars))
+
+    # Now do a delayed rejection step for each chain
+    delta_x = dot(util.rng.randn(*x.shape), r)/scale
+
+    # Generate ergodicity term
+    eps = 1e-6 * util.rng.randn(*x.shape)
+
+    # Update x_old with delta_x and eps;
+    return x + delta_x + eps, r
+
+
+def metropolis_dr(xtry, logp_try, x, logp, xold, logp_old, alpha12, R):
+    """
+    Delayed rejection metropolis
+    """
+
+    # Compute alpha32 (note we turned x and xtry around!)
+    alpha32 = paccept(logp_try=logp, logp_old=logp_try)
+
+    # Calculate alpha for each chain
+    l2 = paccept(logp_try=logp_try, logp_old=logp_old)
+    iR = inv(R)
+    q1 = array([exp(-0.5*(norm(dot(x2-x1, iR))**2 - norm(dot(x1-x0, iR))**2))
+                for x0, x1, x2 in zip(xold, x, xtry)])
+    alpha13 = l2*q1*(1-alpha32)/(1-alpha12)
+
+    accept = alpha13 > util.rng.rand(*alpha13.shape)
+    logp_new = where(accept, logp_try, logp)
+    ## The following only works for vectors:
+    # xnew = where(accept, xtry, x)
+    xnew = xtry+0
+    for i, a in enumerate(accept):
+        if not a:
+            xnew[i] = x[i]
+
+    return xnew, logp_new, alpha13, accept
diff --git a/bumps/dream/model.py b/bumps/dream/model.py
new file mode 100644
index 0000000..f20967d
--- /dev/null
+++ b/bumps/dream/model.py
@@ -0,0 +1,244 @@
+"""
+MCMC model types
+
+Usage
+-----
+
+First create a :mod:`bounds` object.  This stores the ranges available
+on the parameters, and controls how values outside the range are handled::
+
+    M_bounds = bounds(minx, maxx, style='reflect|clip|fold|randomize|none')
+
+For simple functions you can use one of the existing models.
+
+If your model *f* computes the probability density, use :class:`Density`::
+
+    M = Density(f, bounds=M_bounds)
+
+If your model *f* computes the log probability density,
+use :class:`LogDensity`::
+
+    M = LogDensity(f, bounds=M_bounds)
+
+If your model *f* computes a simulation which returns a vector, and you
+have *data* associated with the simulation, use :class:`Simulation`::
+
+    M = Simulation(f, data=data, bounds=M_bounds)
+
+The measurement *data* can have a 1-sigma uncertainty associated with it, as
+well as a *gamma* factor if the uncertainty distribution has non-Gaussian
+kurtosis associated with it.
+
+
+Multivariate normal distribution::
+
+    M = MVNormal(mu, sigma)
+
+Mixture models::
+
+    M = Mixture(M1, w1, M2, w2, ...)
+
+
+For more complex functions, you can subclass MCMCModel::
+
+    class Model(MCMCModel):
+        def __init__(self, ..., bounds=None, ...):
+            ...
+            self.bounds = bounds
+            ...
+        def nnlf(self, x):
+            "Return the negative log likelihood of seeing x"
+            p = probability of seeing x
+            return -log(p)
+
+    M = Model(..., bounds=M_bounds, ...)
+
+The MCMC program uses only two methods from the model::
+
+    apply_bounds(pop)
+    log_density(pop)
+
+If your model provides these methods, you will not need to subclass MCMCModel
+in order to interact with DREAM.
+
+
+Compatibility with matlab DREAM
+-------------------------------
+
+First generate a bounds handling function::
+
+    M_bounds = bounds(ParRange.minn, ParRange.maxn)
+
+Then generate a model, depending on what kind of function you have.
+
+Option 1. Model directly computes posterior density::
+
+    model = Density(f, bounds=M_bounds)
+
+Option 2. Model computes simulation, data has known 1-sigma uncertainty::
+
+    model = Simulation(f, data=Measurement.MeasData, bounds=M_bounds,
+                       sigma=Measurement.Sigma, gamma = MCMCPar.Gamma)
+
+Option 3. Model computes simulation, data has unknown 1-sigma uncertainty::
+
+    model = Simulation(f, data=Measurement.MeasData, bounds=M_bounds,
+                       gamma = MCMCPar.Gamma)
+
+
+Option 4. Model directly computes log posterior density::
+
+    model = LogDensity(f, bounds=M_bounds)
+
+Option 5 is like option 2 but the reported likelihoods do not take the
+1-sigma uncertainty into account.  The metropolis steps are still based
+on the 1-sigma uncertainty, so use the style given in option 2 for this case.
+
+"""
+from __future__ import division
+
+__all__ = ['MCMCModel', 'Density', 'LogDensity', 'Simulation',
+           'MVNormal', 'Mixture']
+
+import numpy as np
+from numpy import diag, log, exp, pi
+from numpy.linalg import cholesky, inv
+
+from . import exppow
+
+
+class MCMCModel(object):
+    """
+    MCMCM model abstract base class.
+
+    Each model must have a negative log likelihood function which operates
+    on a point x, returning the negative log likelihood, or inf if the point
+    is outside the domain.
+    """
+    labels = None
+    bounds = None
+
+    def nllf(self, x):
+        raise NotImplemented
+
+    def log_density(self, x):
+        return -self.nllf(x)
+
+    def plot(self, x):
+        pass
+
+    def map(self, pop):
+        return np.array([self.nllf(x) for x in pop])
+
+
+class Density(MCMCModel):
+    """
+    Construct an MCMC model from a probablility density function.
+
+    *f* is the density function
+    """
+    def __init__(self, f, bounds=None, labels=None):
+        self.f, self.bounds, self.labels = f, bounds, labels
+
+    def nllf(self, x):
+        return -log(self.f(x))
+
+
+class LogDensity(MCMCModel):
+    """
+    Construct an MCMC model from a log probablility density function.
+
+    *f* is the log density function
+    """
+    def __init__(self, f, bounds=None, labels=None):
+        self.f, self.bounds, self.labels = f, bounds, labels
+
+    def nllf(self, x):
+        return -self.f(x)
+
+
+class Simulation(MCMCModel):
+    """
+    Construct an MCMC model from a simulation function.
+
+    *f* is the function which simulates the data
+    *data* is the measurement(s) to compare it to
+    *sigma* is the 1-sigma uncertainty of the measurement(s).
+    *gamma* in (-1, 1] represents kurtosis on the data measurement uncertainty.
+
+    Data is assumed to come from an exponential power density::
+
+        p(v|S, G) = w(G)/S exp(-c(G) |v/S|^(2/(1+G)))
+
+    where S is *sigma* and G is *gamma*.
+
+    The values of *sigma* and *gamma* can be uniform or can vary with the
+    individual measurement points.
+
+    Certain values of *gamma* select particular distributions::
+        G = 0: normal
+        G = 1: double exponential
+        G -> -1: uniform
+    """
+    def __init__(self, f=None, bounds=None, data=None, sigma=1, gamma=0,
+                 labels=None):
+        self.f, self.bounds, self.labels = f, bounds, labels
+        self.data, self.sigma, self.gamma = data, sigma, gamma
+        cb, wb = exppow.exppow_pars(gamma)
+        self._offset = np.sum(log(wb/sigma * np.ones_like(data)))
+        self._cb = cb
+        self._pow = 2/(1+gamma)
+        #print "cb", cb, "sqrt(2pi)*wb", sqrt(2*pi)*wb
+        #print "offset", self._offset
+
+    def nllf(self, x):
+        err = self.f(x) - self.data
+        log_p = self._offset - np.sum(self._cb * abs(err/self.sigma)**self._pow)
+        return log_p
+
+    def plot(self, x):
+        import pylab
+        v = pylab.arange(len(self.data))
+        pylab.plot(v, self.data, 'x', v, self.f(x), '-')
+
+
+class MVNormal(MCMCModel):
+    """
+    multivariate normal negative log likelihood function
+    """
+    def __init__(self, mu, sigma):
+        self.mu, self.sigma = np.asarray(mu), np.asarray(sigma)
+        # Precompute sigma contributions
+        r = cholesky(sigma)
+        self._rinv = inv(r)
+        self._c = 0.5*len(mu)*log(2*pi) + 0.5*np.sum(diag(r))
+
+    def nllf(self, x):
+        mu, c, rinv = self.mu, self._c, self._rinv
+        y = c + 0.5*np.sum(np.dot(x-mu, rinv)**2)
+        return y
+
+
+class Mixture(MCMCModel):
+    """
+    Create a mixture model from a list of weighted density models.
+
+    MixtureModel( M1, w1, M2, w2, ...)
+
+    Models M1, M2, ... are MCMC models with M.nllf(x) returning the negative
+    log likelihood of x.  Weights w1, w2, ... are arbitrary scalars.
+    """
+    def __init__(self, *args):
+
+        models = args[::2]
+        weights = args[1::2]
+        if (len(args) % 2 != 0
+                or not all(hasattr(M, 'nllf') for M in models)
+                or not all(np.isscalar(w) for w in weights)):
+            raise TypeError("Expected MixtureModel(M1, w1, M2, w2, ...)")
+        self.pairs = zip(models, weights)
+        self.weight = np.sum(w for w in weights)
+
+    def nllf(self, x):
+        p = [w*exp(-M.nllf(x)) for M, w in self.pairs]
+        return -log(np.sum(p)/self.weight)
diff --git a/bumps/dream/outliers.py b/bumps/dream/outliers.py
new file mode 100644
index 0000000..a4ce1be
--- /dev/null
+++ b/bumps/dream/outliers.py
@@ -0,0 +1,140 @@
+"""
+Chain outlier tests.
+"""
+
+__all__ = ["identify_outliers"]
+
+from numpy import mean, std, sqrt, where, argmin, arange, array
+from numpy import sort
+from scipy.stats import t as student_t
+from scipy.stats import scoreatpercentile
+
+from .mahal import mahalanobis
+from .acr import ACR
+
+tinv = student_t.ppf
+# from scipy.stats import scoreatpercentile as prctile
+# CRUFT: scoreatpercentile not accepting array arguments in older scipy
+
+
+def prctile(v, Q):
+    v = sort(v)
+    return [scoreatpercentile(v, Qi) for Qi in Q]
+
+
+def identify_outliers(test, chains, x):
+    """
+    Determine which chains have converged on a local maximum much lower than
+    the maximum likelihood.
+
+    *test* is the name of the test to use (one of IQR, Grubbs, Mahal or none).
+    *chains* is a set of log likelihood values of shape (chain len, num chains)
+    *x* is the current population of shape (num vars, num chains)
+
+    Returns an integer array of outlier indices.
+    """
+    # Determine the mean log density of the active chains
+    v = mean(chains, axis=0)
+
+    # Check whether any of these active chains are outlier chains
+    test = test.lower()
+    if test == 'iqr':
+        # Derive the upper and lower quartile of the chain averages
+        q1, q3 = prctile(v, [25., 75.])
+        # Derive the Inter Quartile Range (IQR)
+        iqr = q3 - q1
+        # See whether there are any outlier chains
+        outliers = where(v < q1 - 2*iqr)[0]
+
+    elif test == 'grubbs':
+        # Compute zscore for chain averages
+        zscore = (mean(v) - v) / std(v, ddof=1)
+        # Determine t-value of one-sided interval
+        n = len(v)
+        t2 = tinv(1 - 0.01/n, n-2)**2  # 95% interval
+        # Determine the critical value
+        gcrit = ((n - 1)/sqrt(n)) * sqrt(t2/(n-2 + t2))
+        # Then check against this
+        outliers = where(zscore > gcrit)[0]
+
+    elif test == 'mahal':
+        # Use the Mahalanobis distance to find outliers in the population
+        alpha = 0.01
+        npop, nvar = x.shape
+        gcrit = ACR(nvar, npop-1, alpha)
+        #print "alpha", alpha, "nvar", nvar, "npop", npop, "gcrit", gcrit
+        # Find which chain has minimum log_density
+        minidx = argmin(v)
+        # check the Mahalanobis distance of the current point to other chains
+        d1 = mahalanobis(x[minidx, :], x[minidx != arange(npop), :])
+        #print "d1", d1, "minidx", minidx
+        # and see if it is an outlier
+        outliers = array([minidx]) if d1 > gcrit else array([])
+
+    elif test == 'none':
+        outliers = array([])
+
+    else:
+        raise ValueError("Unknown outlier test "+test)
+
+    return outliers
+
+
+def test_outliers():
+    from .walk import walk
+    from numpy.random import multivariate_normal, seed
+    from numpy import vstack, ones, eye
+    seed(2)  # Remove uncertainty on tests
+    # Set a number of good and bad chains
+    ngood, nbad = 25, 2
+
+    # Make chains mean-reverting chains with widely separated values for
+    # bad and good; put bad chains first.
+    chains = walk(1000, mu=[1]*nbad+[5]*ngood, sigma=0.45, alpha=0.1)
+
+    # Check IQR and Grubbs
+    assert (identify_outliers('IQR', chains, None) == arange(nbad)).all()
+    assert (identify_outliers('Grubbs', chains, None) == arange(nbad)).all()
+
+    # Put points for 'bad' chains at [-1,...,-1] and 'good' chains at [1,...,1]
+    x = vstack((multivariate_normal(-ones(4), 0.1*eye(4), size=nbad),
+                multivariate_normal(ones(4), 0.1*eye(4), size=ngood)))
+    assert identify_outliers('Mahal', chains, x)[0] in range(nbad)
+
+    # Put points for _all_ chains at [1,...,1] and check that mahal return []
+    xsame = multivariate_normal(ones(4), 0.2*eye(4), size=ngood+nbad)
+    assert len(identify_outliers('Mahal', chains, xsame)) == 0
+
+    # Check again with large variance
+    x = vstack((multivariate_normal(-3*ones(4), eye(4), size=nbad),
+                multivariate_normal(ones(4), 10*eye(4), size=ngood)))
+    assert len(identify_outliers('Mahal', chains, x)) == 0
+
+    # =====================================================================
+    # Test replacement
+
+    # Construct a state object
+    from numpy.linalg import norm
+    from .state import MCMCDraw
+    ngen, npop = chains.shape
+    npop, nvar = x.shape
+    state = MCMCDraw(Ngen=ngen, Nthin=ngen, Nupdate=0,
+                     Nvar=nvar, Npop=npop, Ncr=0, thinning=0)
+    # Fill it with chains
+    for i in range(ngen):
+        state._generation(new_draws=npop, x=x, logp=chains[i], accept=npop)
+
+    # Make a copy of the current state so we can check it was updated
+    nx, nlogp = x+0, chains[-1]+0
+    # Remove outliers
+    state.remove_outliers(nx, nlogp, test='IQR', portion=0.5)
+    # Check that the outliers were removed
+    outliers = state.outliers()
+    assert outliers.shape[0] == nbad
+    for i in range(nbad):
+        assert nlogp[outliers[i, 1]] == chains[-1][outliers[i, 2]]
+        assert norm(nx[outliers[i, 1], :] - x[outliers[i, 2], :]) == 0
+
+
+if __name__ == "__main__":
+    test_outliers()
diff --git a/bumps/dream/state.py b/bumps/dream/state.py
new file mode 100644
index 0000000..803f400
--- /dev/null
+++ b/bumps/dream/state.py
@@ -0,0 +1,1029 @@
+"""
+Sampling history for MCMC.
+
+MCMC keeps track of a number of things during sampling.
+
+The results may be queried as follows::
+
+    draws, generation, thinning
+    sample(condition) returns draws, points, logp
+    logp()            returns draws, logp
+    acceptance_rate() returns draws, AR
+    chains()          returns draws, chains, logp
+    R_stat()          returns draws, R
+    CR_weight()       returns draws, CR_weight
+    best()            returns best_x, best_logp
+    outliers()        returns outliers
+    show()/save(file)/load(file)
+
+Data is stored in circular arrays, which keeps the last N generations and
+throws the rest away.
+
+draws is the total number of draws from the sampler.
+
+generation is the total number of generations.
+
+thinning is the number of generations per stored sample.
+
+draws[i] is the number of draws including those required to produce the
+information in the corresponding return vector.  Note that draw numbers
+need not be linearly spaced, since techniques like delayed rejection
+will result in a varying number of samples per generation.
+
+logp[i] is the set of log likelihoods, one for each member of the population.
+The logp() method returns the complete set, and the sample() method returns
+a thinned set, with on element of logp[i] for each vector point[i, :].
+
+AR[i] is the acceptance rate at generation i, showing the proportion of
+proposed points which are accepted into the population.
+
+chains[i, :, :] is the set of points in the differential evolution population
+at thinned generation i.  Ideally, the thinning rate of the MCMC process
+is chosen so that thinned generations i and i+1 are independent samples
+from the posterior distribution, though there is a chance that this may
+not be the case, and indeed, some points in generation i+1 may be identical
+to those in generation i.  Actual generation number is i*thinning.
+
+points[i, :] is the ith point in a returned sample.  The i is just a place
+holder; there is no inherent ordering to the sample once they have been
+extracted from the chains.  Note that the sample may be from a marginal
+distribution.
+
+R[i] is the Gelman R statistic measuring convergence of the Markov chain.
+
+CR_weight[i] is the set of weights used for selecting between the crossover
+ratios available to the candidate generation process of differential
+evolution.  These will be fixed early in the sampling, even when adaptive
+differential evolution is selected.
+
+outliers[i] is a vector containing the thinned generation number at which
+an outlier chain was removed, the id of the chain that was removed and
+the id of the chain that replaced it.  We leave it to the reader to decide
+if the cloned samples, point[:generation, :, removed_id], should be included
+in further analysis.
+
+best_logp is the highest log likelihood observed during the analysis and
+best_x is the corresponding point at which it was observed.
+
+generation is the last generation number
+"""
+#TODO: state should be collected in files as we go
+from __future__ import division, print_function
+
+__all__ = ['MCMCDraw', 'load_state', 'save_state']
+
+import re
+import gzip
+
+import numpy as np
+from numpy import empty, sum, asarray, inf, argmax, hstack, dstack
+from numpy import savetxt, reshape
+
+from .outliers import identify_outliers
+from .util import draw, rng
+
+#EXT = ".mc.gz"
+#CREATE = gzip.open
+EXT = ".mc"
+CREATE = open
+
+# CRUFT: python 2.x needs to convert unicode to bytes when writing to file
+try:
+    # python 2.x
+    unicode
+    def write(fid, s):
+        fid.write(s)
+except NameError:
+    # python 3.x
+    def write(fid, s):
+        fid.write(s.encode('utf-8') if isinstance(s, str) else s)
+
+class NoTrace:
+    def write(self, data):
+        pass
+
+    def flush(self):
+        pass
+
+    def close(self):
+        pass
+
+
+def save_state(state, filename):
+    trace = NoTrace()
+    #trace = open(filename+"-trace.mc", "w")
+
+    write(trace, "starting trace\n")
+    # Build 2-D data structures
+    write(trace, "extracting draws, logp\n")
+    draws, logp = state.logp(full=True)
+    write(trace, "extracting acceptance rate\n")
+    _, AR = state.acceptance_rate()
+    write(trace, "building chain from draws, AR and logp\n")
+    chain = hstack((draws[:, None], AR[:, None], logp))
+
+    write(trace, "extracting point, logp\n")
+    _, point, logp = state.chains()
+    Nthin, Npop, Nvar = point.shape
+    write(trace, "shape is %d,%d,%d\n" % (Nthin, Npop, Nvar))
+    write(trace, "adding logp to point\n")
+    point = dstack((logp[:, :, None], point))
+    write(trace, "collapsing to draws x point\n")
+    point = reshape(point, (point.shape[0]*point.shape[1], point.shape[2]))
+
+    write(trace, "extracting R_stat\n")
+    draws, R_stat = state.R_stat()
+    write(trace, "extracting CR_weight\n")
+    _, CR_weight = state.CR_weight()
+    _, Ncr = CR_weight.shape
+    write(trace, "building stats\n")
+    stats = hstack((draws[:, None], R_stat, CR_weight))
+
+    #TODO: missing _outliers from save_state
+
+    # Write convergence info
+    write(trace, "writing chain\n")
+    fid = CREATE(filename+'-chain'+EXT, 'wb')
+    write(fid, '# draws acceptance_rate %d*logp\n' % Npop)
+    savetxt(fid, chain)
+    fid.close()
+
+    # Write point info
+    write(trace, "writing point\n")
+    fid = CREATE(filename+'-point'+EXT, 'wb')
+    write(fid, '# logp point (Nthin x Npop x Nvar = [%d,%d,%d])\n'
+               % (Nthin, Npop, Nvar))
+    savetxt(fid, point)
+    fid.close()
+
+    # Write stats
+    write(trace, "writing stats\n")
+    fid = CREATE(filename+'-stats'+EXT, 'wb')
+    write(fid, '# draws %d*R-stat %d*CR_weight\n' % (Nvar, Ncr))
+    savetxt(fid, stats)
+    fid.close()
+    write(trace, "done state save\n")
+    trace.close()
+
+
+IND_PAT = re.compile('-1#IND')
+INF_PAT = re.compile('1#INF')
+
+
+def loadtxt(file, report=0):
+    """
+    Like numpy loadtxt, but adapted for windows non-finite numbers.
+    """
+    if not hasattr(file, 'readline'):
+        if file.endswith('.gz'):
+            #print("opening with gzip")
+            fh = gzip.open(file, 'r')
+        else:
+            fh = open(file, 'r')
+    else:
+        fh = file
+    res = []
+    section = 0
+    lineno = 0
+    for line in fh:
+        lineno += 1
+        if report and lineno%report==0:
+            print("read", section*report)
+            section += 1
+        IND_PAT.sub('nan', line)
+        INF_PAT.sub('inf', line)
+        line = line.split('#')[0].strip()
+        values = line.split()
+        if len(values) > 0:
+            try:
+                res.append([float(v) for v in values])
+            except ValueError:
+                print("Parse error:", values)
+    if fh != file:
+        fh.close()
+    return asarray(res)
+
+
+def load_state(filename, skip=0, report=0):
+    # Read chain file
+    chain = loadtxt(filename+'-chain'+EXT)
+
+    # Read point file
+    fid = open(filename+'-point'+EXT, 'r')
+    line = fid.readline()
+    point_dims = line[line.find('[')+1:line.find(']')]
+    Nthin, Npop, Nvar = eval(point_dims)
+    for _ in range(skip*Npop):
+        fid.readline()
+    point = loadtxt(fid, report=report*Npop)
+    fid.close()
+
+    # Read stats file
+    stats = loadtxt(filename+'-stats'+EXT)
+
+    # Guess dimensions
+    Ngen = chain.shape[0]
+    thinning = 1
+    Nthin -= skip
+    Nupdate = stats.shape[0]
+    #Ncr = stats.shape[1] - Nvar - 1
+
+    # Create empty draw and fill it with loaded data
+    state = MCMCDraw(0, 0, 0, 0, 0, 0, thinning)
+    #print("gen, var, pop", Ngen, Nvar, Npop)
+    state.draws = Ngen * Npop
+    state.generation = Ngen
+    state._gen_index = 0
+    state._gen_draws = chain[:, 0]
+    state._gen_acceptance_rate = chain[:, 1]
+    state._gen_logp = chain[:, 2:]
+    state.thinning = thinning
+    state._thin_count = Ngen//thinning
+    state._thin_index = 0
+    state._thin_draws = state._gen_draws[(skip+1)*thinning-1::thinning]
+    state._thin_logp = point[:, 0].reshape((Nthin, Npop))
+    state._thin_point = reshape(point[:, 1:], (Nthin, Npop, Nvar))
+    state._gen_current = state._thin_point[-1]
+    state._update_count = Nupdate
+    state._update_index = 0
+    state._update_draws = stats[:, 0]
+    state._update_R_stat = stats[:, 1:Nvar+1]
+    state._update_CR_weight = stats[:, Nvar+1:]
+    state._outliers = []
+
+    bestidx = np.argmax(point[:, 0])
+    state._best_logp = point[bestidx, 0]
+    state._best_x = point[bestidx, 1:]
+
+    return state
+
+
+class MCMCDraw(object):
+    """
+    """
+    _labels = None
+    title = None
+    @property
+    def Nvar(self):
+        """Number of parameters in the fit"""
+        return self._thin_point.shape[2]
+
+    def __init__(self, Ngen, Nthin, Nupdate, Nvar, Npop, Ncr, thinning):
+        # Total number of draws so far
+        self.draws = 0
+
+        # Maximum observed likelihood
+        self._best_x = None
+        self._best_logp = -inf
+
+        # Per generation iteration
+        self.generation = 0
+        self._gen_index = 0
+        self._gen_draws = empty(Ngen, 'i')
+        self._gen_logp = empty( (Ngen, Npop) )
+        self._gen_acceptance_rate = empty(Ngen)
+
+        # If we are thinning, we need to keep the current generation
+        # separately. [Note: don't remember why we need both the _gen_*
+        # and _thin_*]  [Note: the caller x vector is assigned to
+        # _gen_current; this may lead to unexpected behaviour if x is
+        # changed by the caller.
+        self._gen_current = None
+
+        # Per thinned generation iteration
+        self.thinning = thinning
+        self._thin_index = 0
+        self._thin_count = 0
+        self._thin_timer = 0
+        self._thin_draws = empty(Nthin, 'i')
+        self._thin_point = empty((Nthin, Npop, Nvar))
+        self._thin_logp = empty((Nthin, Npop))
+
+        # Per update iteration
+        self._update_index = 0
+        self._update_count = 0
+        self._update_draws = empty(Nupdate, 'i')
+        self._update_R_stat = empty((Nupdate, Nvar) )
+        self._update_CR_weight = empty((Nupdate, Ncr))
+
+        self._outliers = []
+
+        # Query functions will not return outlier chains; initially, all
+        # chains are marked as good.  Call mark_outliers to remove
+        # outlier chains from the set.
+        self._good_chains = slice(None, None)
+
+    @property
+    def Ngen(self):
+        return self._gen_draws.shape[0]
+
+    @property
+    def Nthin(self):
+        return self._thin_draws.shape[0]
+
+    @property
+    def Nupdate(self):
+        return self._update_draws.shape[0]
+
+    @property
+    def Nvar(self):
+        return self._thin_point.shape[2]
+
+    @property
+    def Npop(self):
+        return self._gen_logp.shape[1]
+
+    @property
+    def Ncr(self):
+        return self._update_CR_weight.shape[1]
+
+    def resize(self, Ngen, Nthin, Nupdate, Nvar, Npop, Ncr, thinning):
+        if self.Nvar != Nvar or self.Npop != Npop or self.Ncr != Ncr:
+            raise ValueError("Cannot change Nvar, Npop or Ncr on resize")
+
+        # For now, only handle the case where the we have one complete
+        # frame of data, such as on reloading the state vector
+        assert (self._gen_index == 0
+                and self._update_index == 0
+                and self._thin_index == 0)
+        assert (self.generation == self.Ngen
+                and self._update_count == self.Nupdate
+                and self._thin_count == self.Nthin)
+
+        self.thinning = thinning
+
+        if Ngen > self.Ngen:
+            self._gen_index = self.Ngen  # must happen before resize!!
+            self._gen_draws = np.resize(self._gen_draws, Ngen)
+            self._gen_logp = np.resize(self._gen_logp,  (Ngen, Npop))
+            self._gen_acceptance_rate \
+                = np.resize(self._gen_acceptance_rate, Ngen)
+        elif Ngen < self.Ngen:
+            self._gen_draws = self._gen_draws[-Ngen:].copy()
+            self._gen_logp = self._gen_logp[-Ngen:, :].copy()
+            self._gen_acceptance_rate \
+                = self._gen_acceptance_rate[-Ngen:].copy()
+
+        if Nthin > self.Nthin:
+            self._thin_index = self.Nthin  # must happen before resize!!
+            self._thin_draws = np.resize(self._thin_draws, Nthin)
+            self._thin_point = np.resize(self._thin_point,  (Nthin, Npop, Nvar))
+            self._thin_logp = np.resize(self._thin_logp,  (Nthin, Npop))
+        elif Nthin < self.Nthin:
+            self._thin_draws = self._thin_draws[-Nthin:].copy()
+            self._thin_point = self._thin_point[-Nthin:, :, :].copy()
+            self._thin_logp = self._thin_logp[-Nthin:, :].copy()
+
+        if Nupdate > self.Nupdate:
+            self._update_count = self.Nupdate  # must happen before resize!!
+            self._update_draws = np.resize(self._update_draws, Nupdate)
+            self._update_R_stat \
+                = np.resize(self._update_R_stat,  (Nupdate, Nvar))
+            self._update_CR_weight \
+                = np.resize(self._update_CR_weight,  (Nupdate, Ncr))
+        elif Nupdate < self.Nupdate:
+            self._update_draws = self._update_draws[-Nupdate:].copy()
+            self._update_R_stat = self._update_R_stat[-Nupdate:, :].copy()
+            self._update_CR_weight = self._update_CR_weight[-Nupdate:, :].copy()
+
+    def save(self, filename):
+        save_state(self, filename)
+
+    def show(self, portion=1.0, figfile=None):
+        from .views import plot_all
+        plot_all(self, portion=portion, figfile=figfile)
+
+    def _last_gen(self):
+        """
+        Returns x, logp for most recent generation to dream.py.
+        """
+        # Note: if generation number has wrapped and _gen_index is 0
+        # (the usual case when this function is called to resume an
+        # existing chain), then this returns the last row in the array.
+        return (self._thin_point[self._thin_index-1],
+                self._thin_logp[self._thin_index-1])
+
+    def _generation(self, new_draws, x, logp, accept, force_keep=False):
+        """
+        Called from dream.py after each generation is completed with
+        a set of accepted points and their values.
+        """
+        # Keep track of the total number of draws
+        # Note: this is first so that we tag the record with the number of
+        # draws taken so far, including the current draw.
+        self.draws += new_draws
+        self.generation += 1
+
+        # Record if this is the best so far
+        maxid = argmax(logp)
+        if logp[maxid] > self._best_logp:
+            self._best_logp = logp[maxid]
+            self._best_x = x[maxid, :]+0 # Force a copy
+
+        # Record acceptance rate and cost
+        i = self._gen_index
+        #print("generation", i, self.draws, "\n x", x, "\n logp", logp, "\n accept", accept)
+        self._gen_draws[i] = self.draws
+        self._gen_acceptance_rate[i] = 100*sum(accept)/new_draws
+        self._gen_logp[i] = logp
+        i = i+1
+        if i == len(self._gen_draws):
+            i = 0
+        self._gen_index = i
+
+        # Keep every nth iteration
+        self._thin_timer += 1
+        if self._thin_timer == self.thinning or force_keep:
+            self._thin_timer = 0
+            self._thin_count += 1
+            i = self._thin_index
+            self._thin_draws[i] = self.draws
+            self._thin_point[i] = x
+            self._thin_logp[i] = logp
+            i = i+1
+            if i == len(self._thin_draws): i = 0
+            self._thin_index = i
+            self._gen_current = x+0 # force a copy
+        else:
+            self._gen_current = x+0 # force a copy
+
+    def _update(self, R_stat, CR_weight):
+        """
+        Called from dream.py when a series of DE steps is completed and
+        summary statistics/adaptations are ready to be stored.
+        """
+        self._update_count += 1
+        i = self._update_index
+        #print("update", i, self.draws, "\n Rstat", R_stat, "\n CR weight", CR_weight)
+        self._update_draws[i] = self.draws
+        self._update_R_stat[i] = R_stat
+        self._update_CR_weight[i] = CR_weight
+        i = i+1
+        if i == len(self._update_draws): i = 0
+        self._update_index = i
+
+    def _replace_outlier(self, old, new):
+        """
+        Called from outliers.py when a chain is replaced by the
+        clone of another.
+        """
+        self._outliers.append((self._thin_index, old, new))
+
+        self._gen_logp[:, old] = self._gen_logp[:, new]
+        self._thin_logp[:, old] = self._thin_logp[:, new]
+        self._thin_point[:, old, :] = self._thin_point[:, new, :]
+        # PAK: shouldn't we reduce the total number of draws since we
+        # are throwing way an entire chain?
+
+    @property
+    def labels(self):
+        if self._labels is None:
+            return ["P%d"%i for i in range(self._thin_point.shape[2])]
+        else:
+            return self._labels
+
+    @labels.setter
+    def labels(self, v):
+        self._labels = v
+
+    def _draw_pop(self):
+        """
+        Return the current population.
+        """
+        return self._gen_current
+
+    def _draw_large_pop(self, Npop):
+        _, chains, _ = self.chains()
+        Ngen, Nchain, Nvar = chains.shape
+        points = reshape(chains, (Ngen*Nchain, Nvar))
+
+        # There are two complications with the history buffer:
+        # (1) due to thinning, not every generation is stored
+        # (2) because it is circular, the cursor may be in the middle
+        # If the current generation isn't in the buffer (but is instead
+        # stored separately as _gen_current), then the entire buffer
+        # becomes the history pool.
+        # otherwise we need to exclude the current generation from
+        # the pool.  If (2) happens, we need to increment everything
+        # above the cursor by the number of chains.
+        if self._gen_current is not None:
+            pool_size = Ngen*Nchain
+            cursor = pool_size  # infinite
+        else:
+            pool_size = (Ngen-1)*Nchain
+            k = len(self._thin_draws)
+            cursor = Nchain*((k+self._thin_index-1)%k)
+
+        # Make a return population and fill it with the current generation
+        pop = empty((Npop, Nvar), 'd')
+        if self._gen_current is not None:
+            pop[:Nchain] = self._gen_current
+        else:
+            #print(pop.shape, points.shape, chains.shape)
+            pop[:Nchain] = points[cursor:cursor+Nchain]
+
+        if Npop > Nchain:
+            # Find the remainder with unique ancestors.
+            # Again, because this is a circular buffer, their may be random
+            # numbers generated at or above the cursor.  All of these must
+            # be shifted by Nchains to avoid the cursor.
+            perm = draw(Npop-Nchain, pool_size)
+            perm[perm>=cursor] += Nchain
+            #print("perm", perm; raw_input('wait'))
+            pop[Nchain:] = points[perm]
+
+        return pop
+
+    def _unroll(self):
+        """
+        Unroll the circular queue so that data access can be done inplace.
+
+        Call this when done stepping, and before plotting.  Calls to
+        logp, sample, etc. assume the data is already unrolled.
+        """
+        if self.generation > self._gen_index > 0:
+            self._gen_draws[:] = np.roll(self._gen_draws,
+                                         -self._gen_index, axis=0)
+            self._gen_logp[:] = np.roll(self._gen_logp,
+                                        -self._gen_index, axis=0)
+            self._gen_acceptance_rate[:] = np.roll(self._gen_acceptance_rate,
+                                                   -self._gen_index, axis=0)
+            self._gen_index = 0
+
+        if self._thin_count > self._thin_index > 0:
+            self._thin_draws[:] = np.roll(self._thin_draws,
+                                          -self._thin_index, axis=0)
+            self._thin_point[:] = np.roll(self._thin_point,
+                                          -self._thin_index, axis=0)
+            self._thin_logp[:] = np.roll(self._thin_logp,
+                                         -self._thin_index, axis=0)
+            self._thin_index = 0
+
+        if self._update_count > self._update_index > 0:
+            self._update_draws[:] = np.roll(self._update_draws,
+                                            -self._update_index, axis=0)
+            self._update_R_stat[:] = np.roll(self._update_R_stat,
+                                             -self._update_index, axis=0)
+            self._update_CR_weight[:] = np.roll(self._update_CR_weight,
+                                                -self._update_index, axis=0)
+            self._update_index = 0
+
+    def remove_outliers(self, x, logp, test='IQR', portion=0.5):
+        """
+        Replace outlier chains with clones of good ones.  This should happen
+        early in the sampling processes so the clones have an opportunity
+        to evolve their own identity.
+
+        *state* contains the chains, with log likelihood for each point.
+
+        *x*, *logp* are the current population and the corresponding
+        log likelihoods
+
+        *test* is the name of the test to use (one of IQR, Grubbs, Mahal
+        or none).
+
+        *portion* in (0, 1] is the amount of the chain to use
+
+        Updates *state*, *x* and *logp* to reflect the changes.
+
+        See :mod:`outliers` for details.
+        """
+        # Grab the last part of the chain histories
+        _, chains = self.logp()
+        chain_len, Nchains = chains.shape
+        outliers = identify_outliers(test, chains[-chain_len:], x)
+
+        # Loop over each outlier chain, replacing each with another
+        for old in outliers:
+            # Draw another chain at random, with replacement
+            while True:
+                new = rng.randint(Nchains)
+                if new not in outliers:
+                    break
+            # Update the saved state and current population
+            self._replace_outlier(old=old, new=new)
+            x[old, :] = x[new, :]
+            logp[old] = logp[new]
+
+    def mark_outliers(self, test='IQR', portion=1.0):
+        """
+        Mark some chains as outliers but don't remove them.  This can happen
+        after drawing is complete, so that chains that did not converge are
+        not included in the statistics.
+
+        *test* is 'IQR', 'Mahol' or 'none'.
+
+        *portion* indicates what portion of the samples should be included
+        in the outlier test.  The default is to include all of them.
+        """
+        _, chains, logp = self.chains()
+
+        if test == 'none':
+            self._good_chains = slice(None, None)
+        else:
+            Ngen = chains.shape[0]
+            start = int(Ngen*(1-portion)) if portion else 0
+            outliers = identify_outliers(test, logp[start:], chains[-1])
+            #print("outliers", outliers)
+            #print(logp.shape, chains.shape)
+            if len(outliers) > 0:
+                self._good_chains = np.array([i
+                                              for i in range(logp.shape[1])
+                                              if i not in outliers])
+            else:
+                self._good_chains = slice(None, None)
+            #print(self._good_chains)
+
+    def logp(self, full=False):
+        """
+        Return the iteration number and the log likelihood for each point in
+        the individual sequences in that iteration.
+
+        For example, to plot the convergence of each sequence::
+
+            draw, logp = state.logp()
+            plot(draw, logp)
+
+        Note that draw[i] represents the total number of samples taken,
+        including those for the samples in logp[i].
+
+        If full is True, then return all chains, not just good chains.
+        """
+        self._unroll()
+        retval = self._gen_draws, self._gen_logp
+        if self.generation == self._gen_index:
+            retval = [v[:self.generation] for v in retval]
+        draws, logp = retval
+        return draws, (logp if full else logp[:, self._good_chains])
+
+    def acceptance_rate(self):
+        """
+        Return the iteration number and the acceptance rate for that iteration.
+
+        For example, to plot the acceptance rate over time::
+
+            draw, AR = state.acceptance_rate()
+            plot(draw, AR)
+
+        """
+        retval = self._gen_draws, self._gen_acceptance_rate
+        if self.generation == self._gen_index:
+            retval = [v[:self.generation] for v in retval]
+        elif self._gen_index > 0:
+            retval = [np.roll(v, -self._gen_index, axis=0) for v in retval]
+        return retval
+
+    def chains(self):
+        """
+        Returns the observed Markov chains and the corresponding likelihoods.
+
+        The return value is a tuple (*draws*, *chains*, *logp*).
+
+        *draws* is the number of samples taken up to and including the samples
+        for the current generation.
+
+        *chains* is a three dimensional array of generations X chains X vars
+        giving the set of points observed for each chain in every generation.
+        Only the thinned samples are returned.
+
+        *logp* is a two dimensional array of generation X population giving
+        the log likelihood of observing the set of variable values given in
+        chains.
+        """
+        self._unroll()
+        retval = self._thin_draws, self._thin_point, self._thin_logp
+        if self._thin_count == self._thin_index:
+            retval = [v[:self._thin_count] for v in retval]
+        return retval
+
+    def R_stat(self):
+        """
+        Return the R-statistics convergence statistic for each variable.
+
+        For example, to plot the convergence of all variables over time::
+
+            draw, R = state.R_stat()
+            plot(draw, R)
+
+        See :mod:`dream.gelman` and references detailed therein.
+        """
+        self._unroll()
+        retval = self._update_draws, self._update_R_stat
+        if self._update_count == self._update_index:
+            retval = [v[:self._update_count] for v in retval]
+        return retval
+
+    def CR_weight(self):
+        """
+        Return the crossover ratio weights to be used in the next generation.
+
+        For example, to see if the adaptive CR is stable use::
+
+            draw, weight = state.CR_weight()
+            plot(draw, weight)
+
+        See :mod:`dream.crossover` for details.
+        """
+        self._unroll()
+        retval = self._update_draws, self._update_CR_weight
+        if self._update_count == self._update_index:
+            retval = [v[:self._update_count] for v in retval]
+        return retval
+
+    def outliers(self):
+        """
+        Return a list of outlier removal operations.
+
+        Each outlier operation is a tuple giving the thinned generation
+        in which it occurred, the old chain id and the new chain id.
+
+        The chains themselves have already been updated to reflect the
+        removal.
+
+        Curiously, it is possible for the maximum likelihood seen so far
+        to be removed by this operation.
+        """
+        return asarray(self._outliers, 'i')
+
+    def best(self):
+        """
+        Return the best point seen and its log likelihood.
+        """
+        return self._best_x, self._best_logp
+
+    def keep_best(self):
+        """
+        Place the best point at the end of the chain final good chain.
+
+        Good chains are defined by mark_outliers.
+
+        Because the Markov chain is designed to wander the parameter
+        space, the best individual seen during the random walk may have
+        been observed during the burn-in period, and may no longer be
+        present in the chain.  If this is the case, replace the final
+        point with the best, otherwise swap the positions of the final
+        and the best.
+        """
+
+        # Get state as a 1D array
+        _, chains, logp = self.chains()
+        Ngen, Npop, Nvar = chains.shape
+        points = reshape(chains, (Ngen*Npop, Nvar))
+        logp = reshape(logp, Ngen*Npop)
+
+        # Set the final position to the end of the last good chain.  If
+        # mark_outliers has not been called, then _good_chains will
+        # just be slice(None, None)
+        if isinstance(self._good_chains, slice):
+            final = -1
+        else:
+            final = self._good_chains[-1] - Npop
+
+        # Find the location of the best point if it exists and swap with
+        # the final position
+        idx = np.where(logp==self._best_logp)[0]
+        if len(idx) == 0:
+            logp[final] = self._best_logp
+            points[final, :] = self._best_x
+        else:
+            idx = idx[0]
+            logp[final], logp[idx] = logp[idx], logp[final]
+            points[final, :], points[idx, :] = points[idx, :], points[final, :]
+        # For multiple minima, arbitrarily choose one of them
+        # TODO: this will lead to possible confusion when the best value
+        # spontaneously changes when the fit is complete.
+        self._best_p = points[final]
+        self._best_logp = logp[final]
+
+    def sample(self, **kw):
+        """
+        Return a sample from the posterior distribution.
+
+        **Deprecated** use :meth:`draw` instead.
+        """
+        drawn = self.draw(**kw)
+        return drawn.points, drawn.logp
+
+    def entropy(self, **kw):
+        """
+        Return entropy estimate and uncertainty from an MCMC draw.
+
+        See :func:`entropy.entropy` for details.
+        """
+        from .entropy import entropy, MVNEntropy
+
+        # Get the sample from the state
+        drawn = self.draw()
+
+        M = MVNEntropy(drawn.points)
+        print("Entropy from MVN: %s"%str(M))
+        if M.reject_normal:
+            return entropy(drawn.points, drawn.logp, **kw)
+        else:
+            return M.entropy, 0
+
+
+    def draw(self, portion=1, vars=None, selection=None):
+        """
+        Return a sample from the posterior distribution.
+
+        *portion* is the portion of each chain to use
+        *vars* is a list of variables to return for each point
+        *selection* sets the range for the returned marginal distribution
+
+        *selection* is a dictionary of {variable: (low, high)} to set the
+        range on each variable.  Missing variables default to the full
+        range.
+
+        To plot the distribution for parameter p1::
+
+            draw = state.draw()
+            hist(draw.points[:, 0])
+
+        To plot the interdependence of p1 and p2::
+
+            draw = state.sample()
+            plot(draw.points[:, 0], draw.points[:, 1], '.')
+        """
+        vars = vars if vars is not None else getattr(self, '_shown', None)
+        return Draw(self, portion=portion, vars=vars, selection=selection)
+
+    def set_visible_vars(self, labels):
+        self._shown = [self.labels.index(v) for v in labels]
+        #print("\n".join(str(pair) for pair in enumerate(self.labels)))
+        #print(labels)
+        #print(self._shown)
+
+    def integer_vars(self, labels):
+        """
+        Set variables to integer variables by rounding their values to the
+        nearest integer.
+
+        Note that this cannot be done ahead of time unless DREAM gets an
+        integer stepper, but it can be done before generating statistics.
+        DREAM on the OpenBUGS Asia model does not return the same results
+        as OpenBUGS, so the analysis of integer parameters should not yet
+        be trusted.  No other tests have been done to this point.
+        """
+        for var in labels:
+            idx = self.labels.index(var)
+            self._thin_point[:,:,idx] = np.round(self._thin_point[:,:,idx])
+
+    def derive_vars(self, fn, labels=None):
+        """
+        Generate derived variables from the current sample, adding columns
+        for the derived variables to each sample of every chain.
+
+        The new columns are treated as part of the sample.
+
+        *fn* is a function taking points p[:, k] for k in 0 ... samples and
+        returning a set of derived variables pj[k] for each sample k.  The
+        variables can be returned as any kind of sequence including an
+        array or a tuple with one entry per variable.  The caller uses
+        asarray to convert the returned variables into a vars X samples array.
+        For convenience, a single variable can be returned by itself.
+
+        *labels* are the labels to use for the derived variables.
+
+        The following example adds the new variable x+y = P[0] + P[1]::
+
+            state.derive_vars(lambda p: p[0]+p[1], labels=["x+y"])
+        """
+        # Grab all samples as a set of points
+        _, chains, _ = self.chains()
+        Ngen, Npop, Nvar = chains.shape
+        points = reshape(chains, (Ngen*Npop, Nvar))
+
+        # Compute new variables from the points
+        newvars = asarray(fn(points.T)).T
+        Nnew = newvars.shape[1] if len(newvars.shape) == 2 else 1
+        newvars.reshape((Ngen, Npop, Nnew))
+
+        # Extend new variables to be the same length as the stored selection
+        Nthin = self._thin_point.shape[0]
+        newvars = np.resize(newvars, (Nthin, Npop, Nnew))
+
+        # Add new variables to the points
+        self._thin_point = dstack((self._thin_point, newvars))
+
+        # Add labels for the new variables, if available.
+        if labels is not None:
+            self.labels = self.labels + labels
+        elif self._labels is not None:
+            labels = ["P%d" % i for i in range(Nvar, Nvar+Nnew)]
+            self.labels = self.labels + labels
+        else: # no labels specified, old or new
+            pass
+
+
+class Draw(object):
+    def __init__(self, state, vars=None, portion=None, selection=None):
+        self.state = state
+        self.vars = vars
+        self.portion = portion
+        self.selection = selection
+        self.points, self.logp \
+            = _sample(state, portion=portion, vars=vars, selection=selection)
+        self.labels \
+            = state.labels if vars is None else [state.labels[v] for v in vars]
+        self._stats = None
+        self.weights = None
+        self.num_vars = len(self.labels)
+
+
+def _sample(state, portion, vars, selection):
+    """
+    Return a sample from a set of chains.
+    """
+    draw, chains, logp = state.chains()
+    start = int((1-portion)*len(draw)) if portion else 0
+
+    # Collect the subset we are interested in
+    chains = chains[start:, state._good_chains, :]
+    logp = logp[start:, state._good_chains]
+
+    Ngen, Npop, Nvar = chains.shape
+    points = reshape(chains, (Ngen*Npop, Nvar))
+    logp = reshape(logp, (Ngen*Npop))
+    if selection not in [None, {}]:
+        idx = True
+        for v, r in selection.items():
+            if v == 'logp':
+                idx = idx & (logp>=r[0]) & (logp<=r[1])
+            else:
+                idx = idx & (points[:, v]>=r[0]) & (points[:, v]<=r[1])
+        points = points[idx, :]
+        logp = logp[idx]
+    if vars is not None:
+        points = points[:, vars]
+    return points, logp
+
+
+def test():
+    from numpy.linalg import norm
+    from numpy.random import rand
+    from numpy import arange
+
+    # Make some fake data
+    Nupdate, Nstep = 3, 5
+    Ngen = Nupdate*Nstep
+    Nvar, Npop, Ncr = 3, 6, 2
+    xin = rand(Ngen, Npop, Nvar)
+    pin = rand(Ngen, Npop)
+    accept = rand(Ngen, Npop) < 0.8
+    CRin = rand(Nupdate, Ncr)
+    Rin = rand(Nupdate, 1)
+    #thinning = 2
+    #Nthin = int(Ngen/thinning)
+
+    # Put it into a state
+    thinning = 2
+    Nthin = int(Ngen/thinning)
+    state = MCMCDraw(Ngen=Ngen, Nthin=Nthin, Nupdate=Nupdate,
+                     Nvar=Nvar, Npop=Npop, Ncr=Ncr, thinning=thinning)
+    for i in range(Nupdate):
+        state._update(R_stat=Rin[i], CR_weight=CRin[i])
+        for j in range(Nstep):
+            gen = i*Nstep+j
+            state._generation(new_draws=Npop, x=xin[gen],
+                              logp=pin[gen], accept=accept[gen])
+
+    # Check that it got there
+    draws, logp = state.logp()
+    assert norm(draws - Npop*arange(1, Ngen+1)) == 0
+    assert norm(logp - pin) == 0
+    draws, AR = state.acceptance_rate()
+    assert norm(draws - Npop*arange(1, Ngen+1)) == 0
+    assert norm(AR - 100*sum(accept, axis=1)/Npop) == 0
+    draws, logp = state.sample()
+    #assert norm(draws - thinning*Npop*arange(1, Nthin+1)) == 0
+    #assert norm(sample - xin[thinning-1::thinning]) == 0
+    #assert norm(logp - pin[thinning-1::thinning]) == 0
+    draws, R = state.R_stat()
+    assert norm(draws - Npop*Nstep*arange(Nupdate)) == 0
+    assert norm(R-Rin) == 0
+    draws, CR = state.CR_weight()
+    assert norm(draws - Npop*Nstep*arange(Nupdate)) == 0
+    assert norm(CR - CRin) == 0
+    x, p = state.best()
+    bestid = argmax(pin)
+    i, j = bestid//Npop, bestid%Npop
+    assert pin[i, j] == p
+    assert norm(xin[i, j, :]-x) == 0
+
+    # Check that outlier updates properly
+    state._replace_outlier(1, 2)
+    outliers = state.outliers()
+    draws, logp = state.sample()
+    assert norm(outliers -  asarray([[state._thin_index, 1, 2]])) == 0
+    #assert norm(sample[:, 1, :] - xin[thinning-1::thinning, 2, :]) == 0
+    #assert norm(sample[:, 2, :] - xin[thinning-1::thinning, 2, :]) == 0
+    #assert norm(logp[:, 1] - pin[thinning-1::thinning, 2]) == 0
+    #assert norm(logp[:, 2] - pin[thinning-1::thinning, 2]) == 0
+
+    from .stats import var_stats, format_vars
+    vstats = var_stats(state.draw())
+    print (format_vars(vstats))
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/dream/stats.py b/bumps/dream/stats.py
new file mode 100644
index 0000000..9541875
--- /dev/null
+++ b/bumps/dream/stats.py
@@ -0,0 +1,225 @@
+"""
+Statistics helper functions.
+"""
+
+__all__ = ["VarStats", "var_stats", "format_vars", "parse_var",
+           "stats", "credible_intervals"]
+
+import re
+
+import numpy as np
+
+from .formatnum import format_uncertainty
+
+
+class VarStats(object):
+    def __init__(self, **kw):
+        self.__dict__ = kw
+
+
+def var_stats(draw, vars=None):
+    if vars is None:
+        vars = range(draw.points.shape[1])
+    return [_var_stats_one(draw, v) for v in vars]
+
+
+ONE_SIGMA = 1 - 2*0.15865525393145705
+
+
+def _var_stats_one(draw, var):
+    weights, values = draw.weights, draw.points[:, var].flatten()
+
+    best_idx = np.argmax(draw.logp)
+    best = values[best_idx]
+
+    # Choose the interval for the histogram
+    #credible_interval = shortest_credible_interval
+    p95, p68, p0 = credible_intervals(x=values, weights=weights,
+                                      ci=[0.95, ONE_SIGMA, 0.0])
+    #open('/tmp/out','a').write(
+    #     "in vstats: p68=%s, p95=%s, p0=%s, value range=%s\n"
+    #     % (p68,p95,p0,(min(values),max(values))))
+    #if p0[0] != p0[1]: raise RuntimeError("wrong median %s"%(str(p0),))
+
+    mean, std = stats(x=values, weights=weights)
+
+    vstats = VarStats(label=draw.labels[var], index=var+1,
+                      p95=p95, p68=p68,
+                      median=p0[0], mean=mean, std=std, best=best)
+
+    return vstats
+
+
+def format_num(x, place):
+    precision = 10**place
+    digits_after_decimal = abs(place) if place < 0 else 0
+    return "%.*f" % (digits_after_decimal, np.round(x/precision)*precision)
+
+
+def format_vars(all_vstats):
+    v = dict(parameter="Parameter",
+             mean="mean", median="median", best="best",
+             interval68="68% interval",
+             interval95="95% interval")
+    s = ["   %(parameter)20s %(mean)10s %(median)7s %(best)7s "
+         "[%(interval68)15s] [%(interval95)15s]" % v]
+    for v in all_vstats:
+        # Make sure numbers are formatted with the appropriate precision
+        place = int(np.log10(v.p95[1]-v.p95[0]))-2
+        summary = dict(mean=format_uncertainty(v.mean, v.std),
+                       median=format_num(v.median, place-1),
+                       best=format_num(v.best, place-1),
+                       lo68=format_num(v.p68[0], place),
+                       hi68=format_num(v.p68[1], place),
+                       loci=format_num(v.p95[0], place),
+                       hici=format_num(v.p95[1], place),
+                       parameter=v.label,
+                       index=v.index)
+        s.append("%(index)2d %(parameter)20s %(mean)10s %(median)7s %(best)7s "
+                 "[%(lo68)7s %(hi68)7s] [%(loci)7s %(hici)7s]" % summary)
+
+    return "\n".join(s)
+
+
+VAR_PATTERN = re.compile(r"""
+   ^\ *
+   (?P<parnum>[0-9]+)\ +
+   (?P<parname>.+?)\ +
+   (?P<mean>[0-9.-]+?)
+   \((?P<err>[0-9]+)\)
+   (e(?P<exp>[+-]?[0-9]+))?\ +
+   (?P<median>[0-9.eE+-]+?)\ +
+   (?P<best>[0-9.eE+-]+?)\ +
+   \[\ *(?P<lo68>[0-9.eE+-]+?)\ +
+   (?P<hi68>[0-9.eE+-]+?)\]\ +
+   \[\ *(?P<lo95>[0-9.eE+-]+?)\ +
+   (?P<hi95>[0-9.eE+-]+?)\]
+   \ *$
+   """, re.VERBOSE)
+
+
+def parse_var(line):
+    """
+    Parse a line returned by format_vars back into the statistics for the
+    variable on that line.
+    """
+    m = VAR_PATTERN.match(line)
+    if m:
+        exp = int(m.group('exp')) if m.group('exp') else 0
+        return VarStats(index=int(m.group('parnum')),
+                        name=m.group('parname'),
+                        mean=float(m.group('mean')) * 10**exp,
+                        median=float(m.group('median')),
+                        best=float(m.group('best')),
+                        p68=(float(m.group('lo68')), float(m.group('hi68'))),
+                        p95=(float(m.group('lo95')), float(m.group('hi95'))),
+                        )
+    else:
+        return None
+
+
+def stats(x, weights=None):
+    """
+    Find mean and standard deviation of a set of weighted samples.
+
+    Note that the median is not strictly correct (we choose an endpoint
+    of the sample for the case where the median falls between two values
+    in the sample), but this is good enough when the sample size is large.
+    """
+    if weights is None:
+        x = np.sort(x)
+        mean, std = np.mean(x), np.std(x, ddof=1)
+    else:
+        mean = np.mean(x*weights)/np.sum(weights)
+        # TODO: this is biased by selection of mean; need an unbiased formula
+        var = np.sum((weights*(x-mean))**2)/np.sum(weights)
+        std = np.sqrt(var)
+
+    return mean, std
+
+
+def credible_intervals(x, ci, weights=None):
+    """
+    Find the credible interval covering the portion *ci* of the data.
+
+    *x* are samples from the posterior distribution.
+
+    *ci* is a set of intervals in [0,1].  For a $1-\sigma$ interval use
+    *ci=erf(1/sqrt(2))*, or 0.68. About 1e5 samples are needed for 2 digits
+    of  precision on a $1-\sigma$ credible interval.  For a 95% interval,
+    about 1e6 samples are needed.
+
+    *weights* is a vector of weights for each x, or None for unweighted.
+    One could weight points according to temperature in a parallel tempering
+    dataset.
+
+    Returns an array *[[x1_low, x1_high], [l2_low, x2_high], ...]* where
+    *[xi_low, xi_high]* are the starting and ending values for credible
+    interval *i*.
+
+    This function is faster if the inputs are already sorted.
+    """
+    from numpy import asarray, vstack, sort, cumsum, searchsorted, round, clip
+
+    ci = asarray(ci, 'd')
+    target = (1 + vstack((-ci, +ci))).T/2
+
+    if weights is None:
+        idx = clip(round(target*(x.size-1)), 0, x.size-1).astype('i')
+        return sort(x)[idx]
+    else:
+        idx = np.argsort(x)
+        x, weights = x[idx], weights[idx]
+        # convert weights to cdf
+        w = cumsum(weights/sum(weights))
+        return x[searchsorted(w, target)]
+
+def shortest_credible_interval(x, ci=0.95, weights=None):
+    """
+    Find the credible interval covering the portion *ci* of the data.
+    Returns the minimum and maximum values of the interval.
+    If *ci* is a vector, return a vector of intervals.
+    *x* are samples from the posterior distribution.
+    This function is faster if the inputs are already sorted.
+    About 1e6 samples are needed for 2 digits of precision on a 95%
+    credible interval, or 1e5 for 2 digits on a 1-sigma credible interval.
+    *ci* is the interval size in (0,1], and defaults to 0.95.  For a
+    1-sigma interval use *ci=erf(1/sqrt(2))*.
+    *weights* is a vector of weights for each x, or None for unweighted.
+    For log likelihood data, setting weights to exp(max(logp)-logp) should
+    give reasonable results.
+    """
+    sorted = np.all(x[1:]>=x[:-1])
+    if not sorted:
+        idx = np.argsort(x)
+        x = x[idx]
+        if weights is not None:
+            weights = weights[idx]
+
+    #  w = exp(max(logp)-logp)
+    if weights is not None:
+        # convert weights to cdf
+        w = np.cumsum(weights/sum(weights))
+        # sample the cdf at every 0.001
+        idx = np.searchsorted(w, np.arange(0,1,0.001))
+        x = x[idx]
+
+    # Simple solution: ci*N is the number of points in the interval, so
+    # find the width of every interval of that size and return the smallest.
+    if np.isscalar(ci):
+        return _find_interval(x, ci)
+    else:
+        return [_find_interval(x, i) for i in ci]
+
+def _find_interval(x, ci):
+    """
+    Find credible interval ci in sorted, unweighted x
+    """
+    n = len(x)
+    size = int( ci*n + np.sqrt(1-ci)*np.log(n) )
+    if size >= n:
+        return x[0],x[-1]
+    else:
+        width = x[size:] - x[:-size]
+        idx = np.argmin(width)
+        return x[idx],x[idx+size]
diff --git a/bumps/dream/tile.py b/bumps/dream/tile.py
new file mode 100644
index 0000000..1902e6a
--- /dev/null
+++ b/bumps/dream/tile.py
@@ -0,0 +1,55 @@
+"""
+Split a rectangle into n panes.
+"""
+from __future__ import division
+
+__all__ = ["max_tile_size"]
+
+import math
+
+
+def max_tile_size(tile_count, rect_size):
+    """
+    Determine the maximum sized tile possible.
+
+    Keyword arguments:
+    tile_count -- Number of tiles to fit
+    rect_size -- 2-tuple of rectangle size as (width, height)
+    """
+
+    # If the rectangle is taller than it is wide, reverse its dimensions
+    if rect_size[0] < rect_size[1]:
+        rect_size = rect_size[1], rect_size[0]
+
+    # Rectangle aspect ratio
+    rect_ar = rect_size[0] / rect_size[1]
+
+    # tiles_max_height is the square root of tile_count, rounded up
+    tiles_max_height = int(math.ceil(math.sqrt(tile_count)))
+
+    best_tile_size = 0
+
+    # i in the range [1, tile_max_height], inclusive
+    for i in range(1, tiles_max_height + 1):
+
+        # tiles_used is the arrangement of tiles (width, height)
+        tiles_used = math.ceil(tile_count / i), i
+
+        # tiles_ar is the aspect ratio of this arrangement
+        tile_ar = tiles_used[0] / tiles_used[1]
+
+        # Calculate the size of each tile
+        # Tile pattern is flatter than rectangle
+        if tile_ar > rect_ar:
+            tile_size = rect_size[0] / tiles_used[0]
+        # Tile pattern is skinnier than rectangle
+        else:
+            tile_size = rect_size[1] / tiles_used[1]
+
+        # Check if this is the best answer so far
+        if tile_size > best_tile_size:
+            best_tile_size = tile_size
+
+    return best_tile_size
+
+# print(max_tile_size(6, (100, 100)))
diff --git a/bumps/dream/util.py b/bumps/dream/util.py
new file mode 100644
index 0000000..482f08a
--- /dev/null
+++ b/bumps/dream/util.py
@@ -0,0 +1,90 @@
+"""
+Miscellaneous utilities.
+"""
+
+__all__ = ["draw", "console"]
+
+import numpy as np
+import numpy.random as rng
+
+
+def draw(k, n):
+    """
+    Select k things from a pool of n without replacement.
+    """
+    # At k == n/4, an extra 0.15*k draws are needed to get k unique draws
+    if k > n/4:
+        result = rng.permutation(n)[:k]
+    else:
+        s = set()
+        result = np.empty(k, 'i')
+        for i in range(k):
+            p = rng.randint(n)
+            while p in s:
+                p = rng.randint(n)
+            s.add(p)
+            result[i] = p
+    return result
+
+
+def _check_uniform_draw():
+    """
+    Draws from history should
+    """
+    import pylab
+
+    k, n = 50, 400
+    counts = np.zeros(n*k)
+    idx = np.arange(k)
+    for _ in range(100000):
+        t = draw(k, n)
+        counts[k*t+idx] += 1
+    pylab.subplot(211)
+    pylab.pcolormesh(np.reshape(counts, (n, k)))
+    pylab.colorbar()
+    pylab.title('drawn number vs. draw position')
+    pylab.subplot(212)
+    pylab.hist(counts)
+    pylab.title('number of draws per (number,position) bin')
+    pylab.show()
+
+
+def console():
+    """
+    Start the python console with the local variables available.
+
+    console() should be the last thing in the file, after sampling and
+    showing the default plots.
+    """
+    import os
+    import sys
+
+    # Hack for eclipse console: can't actually run ipython in the eclipse
+    # console and get it to plot, so instead guess whether we are in a
+    # console by checking if we are attached to a proper tty through stdin.
+    # For eclipse, just show the plots.
+    try:
+        tty = os.isatty(sys.stdin.fileno())
+    except Exception:
+        tty = False
+
+    if tty:
+        # Display outstanding plots and turn interactive on
+        from matplotlib import interactive
+        from matplotlib._pylab_helpers import Gcf
+        for fig in Gcf.get_all_fig_managers():
+            try:  # CRUFT
+                fig.show()
+            except AttributeError:
+                fig.frame.Show()
+        interactive(True)
+
+        # Start an ipython shell with the caller's local variables
+        import IPython
+        symbols = sys._getframe(1).f_locals
+        ip = IPython.Shell.IPShell(user_ns=symbols)
+        ip.mainloop()
+    else:
+        # Not a tty; try doing show() anyway
+        import pylab
+        pylab.show()
diff --git a/bumps/dream/views.py b/bumps/dream/views.py
new file mode 100644
index 0000000..74cc7da
--- /dev/null
+++ b/bumps/dream/views.py
@@ -0,0 +1,375 @@
+"""
+MCMC plotting methods.
+"""
+from __future__ import division, print_function
+
+__all__ = ['plot_all', 'plot_corr', 'plot_corrmatrix',
+           'plot_trace', 'plot_vars', 'plot_var',
+           'plot_R', 'plot_logp', 'format_vars']
+
+import math
+
+import numpy as np
+from numpy import arange, squeeze, linspace, meshgrid, vstack, inf
+from scipy.stats import kde
+
+from . import corrplot
+from .formatnum import format_value
+from .stats import var_stats, format_vars
+
+def plot_all(state, portion=1.0, figfile=None):
+    from pylab import figure, savefig, suptitle, rcParams
+    figext = '.'+rcParams.get('savefig.format', 'png')
+
+    draw = state.draw(portion=portion)
+    all_vstats = var_stats(draw)
+    figure()
+    plot_vars(draw, all_vstats)
+    if state.title:
+        suptitle(state.title)
+    print(format_vars(all_vstats))
+    if figfile is not None:
+        savefig(figfile+"-vars"+figext)
+    figure()
+    plot_trace(state, portion=portion)
+    if state.title:
+        suptitle(state.title)
+    if figfile is not None:
+        savefig(figfile+"-trace"+figext)
+    # Suppress R stat for now
+    #figure()
+    #plot_R(state, portion=portion)
+    #if state.title:
+    #    suptitle(state.title)
+    #if figfile is not None:
+    #    savefig(figfile+"-R"+format)
+    figure()
+    plot_logp(state, portion=portion)
+    if state.title:
+        suptitle(state.title)
+    if figfile is not None:
+        savefig(figfile+"-logp"+figext)
+    if draw.num_vars <= 25:
+        figure()
+        plot_corrmatrix(draw)
+        if state.title:
+            suptitle(state.title)
+        if figfile is not None:
+            savefig(figfile+"-corr"+figext)
+
+
+def plot_vars(draw, all_vstats, **kw):
+    from pylab import subplot, clf
+
+    clf()
+    nw, nh = tile_axes(len(all_vstats))
+    cbar = _make_fig_colorbar(draw.logp)
+    for k, vstats in enumerate(all_vstats):
+        subplot(nw, nh, k+1)
+        plot_var(draw, vstats, k, cbar, **kw)
+
+
+def tile_axes(n, size=None):
+    """
+    Creates a tile for the axes which covers as much area of the graph as
+    possible while keeping the plot shape near the golden ratio.
+    """
+    from pylab import gcf
+    if size is None:
+        size = gcf().get_size_inches()
+    figwidth, figheight = size
+    # Golden ratio phi is the preferred dimension
+    #    phi = sqrt(5)/2
+    #
+    # nw, nh is the number of tiles across and down respectively
+    # w, h are the sizes of the tiles
+    #
+    # w,h = figwidth/nw, figheight/nh
+    #
+    # To achieve the golden ratio, set w/h to phi:
+    #     w/h = phi  => figwidth/figheight*nh/nw = phi
+    #                => nh/nw = phi * figheight/figwidth
+    # Must have enough tiles:
+    #     nh*nw > n  => nw > n/nh
+    #                => nh**2 > n * phi * figheight/figwidth
+    #                => nh = floor(sqrt(n*phi*figheight/figwidth))
+    #                => nw = ceil(n/nh)
+    phi = math.sqrt(5)/2
+    nh = int(math.floor(math.sqrt(n*phi*figheight/figwidth)))
+    if nh < 1:
+        nh = 1
+    nw = int(math.ceil(n/nh))
+    return nw, nh
+
+
+def plot_var(draw, vstats, var, cbar, nbins=30):
+    values = draw.points[:, var].flatten()
+    _make_logp_histogram(values, draw.logp, nbins, vstats.p95,
+                         draw.weights, cbar)
+    _decorate_histogram(vstats)
+
+
+def _decorate_histogram(vstats):
+    import pylab
+    from matplotlib.transforms import blended_transform_factory as blend
+    # Shade things inside 1-sigma
+    pylab.axvspan(vstats.p68[0], vstats.p68[1],
+                  color='gold', alpha=0.5, zorder=-1)
+    # build transform with x=data, y=axes(0,1)
+    ax = pylab.gca()
+    transform = blend(ax.transData, ax.transAxes)
+
+    l95, h95 = vstats.p95
+    l68, h68 = vstats.p68
+
+    def marker(symbol, position):
+        if position < l95:
+            symbol, position, ha = '<'+symbol, l95, 'left'
+        elif position > h95:
+            symbol, position, ha = '>'+symbol, h95, 'right'
+        else:
+            symbol, position, ha = symbol, position, 'center'
+        pylab.text(position, 0.95, symbol, va='top', ha=ha,
+                   transform=transform, zorder=3, color='g')
+        #pylab.axvline(v)
+
+    marker('|', vstats.median)
+    marker('E', vstats.mean)
+    marker('*', vstats.best)
+
+    pylab.text(0.01, 0.95, vstats.label, zorder=2,
+               backgroundcolor=(1, 1, 0, 0.2),
+               verticalalignment='top',
+               horizontalalignment='left',
+               transform=pylab.gca().transAxes)
+    pylab.setp([pylab.gca().get_yticklabels()], visible=False)
+    ticks = (l95, l68, vstats.median, h68, h95)
+    labels = [format_value(v, h95-l95) for v in ticks]
+    if len(labels[2]) > 5:
+        # Drop 68% values if too many digits
+        ticks, labels = ticks[0::2], labels[0::2]
+    pylab.xticks(ticks, labels)
+
+
+def _make_fig_colorbar(logp):
+    import matplotlib as mpl
+    import pylab
+
+    # Option 1: min to min + 4
+    #vmin=-max(logp); vmax=vmin+4
+    # Option 1b: min to min log10(num samples)
+    #vmin=-max(logp); vmax=vmin+log10(len(logp))
+    # Option 2: full range of best 98%
+    snllf = pylab.sort(-logp)
+    vmin, vmax = snllf[0], snllf[int(0.98*(len(snllf)-1))]  # robust range
+    # Option 3: full range
+    #vmin,vmax = -max(logp),-min(logp)
+
+    fig = pylab.gcf()
+    ax = fig.add_axes([0.60, 0.95, 0.35, 0.05])
+    cmap = mpl.cm.copper
+
+    # Set the colormap and norm to correspond to the data for which
+    # the colorbar will be used.
+    norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
+
+    # ColorbarBase derives from ScalarMappable and puts a colorbar
+    # in a specified axes, so it has everything needed for a
+    # standalone colorbar.  There are many more kwargs, but the
+    # following gives a basic continuous colorbar with ticks
+    # and labels.
+    class MinDigitsFormatter(mpl.ticker.Formatter):
+        def __init__(self, low, high):
+            self.delta = high - low
+
+        def __call__(self, x, pos=None):
+            return format_value(x, self.delta)
+
+    ticks = (vmin, vmax)
+    formatter = MinDigitsFormatter(vmin, vmax)
+    cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, 
+                                   ticks=ticks, format=formatter,
+                                   orientation='horizontal')
+    #cb.set_ticks(ticks)
+    #cb.set_ticklabels(labels)
+    #cb.set_label('negative log likelihood')
+
+    return vmin, vmax, cmap
+
+
+def _make_logp_histogram(values, logp, nbins, ci, weights, cbar):
+    from numpy import (ones_like, searchsorted, linspace, cumsum, diff, 
+                       argsort, array, hstack, exp)
+    if weights is None:
+        weights = ones_like(logp)
+    # TODO: values are being sorted to collect stats and again to plot
+    idx = argsort(values)
+    values, weights, logp = values[idx], weights[idx], logp[idx]
+    #open('/tmp/out','a').write("ci=%s, range=%s\n"
+    #                           % (ci,(min(values),max(values))))
+    edges = linspace(ci[0], ci[1], nbins+1)
+    idx = searchsorted(values, edges)
+    weightsum = cumsum(weights)
+    heights = diff(weightsum[idx])/weightsum[-1]  # normalized weights
+
+    import pylab
+    vmin, vmax, cmap = cbar
+    cmap_steps = linspace(vmin, vmax, cmap.N+1)
+    bins = []  # marginalized maximum likelihood
+    for h, s, e, xlo, xhi \
+            in zip(heights, idx[:-1], idx[1:], edges[:-1], edges[1:]):
+        if s == e:
+            continue
+        pv = -logp[s:e]
+        pidx = argsort(pv)
+        pw = weights[s:e][pidx]
+        x = array([xlo, xhi], 'd')
+        y = hstack((0, cumsum(pw)))
+        z = pv[pidx][:, None]
+        # centerpoint, histogram height, maximum likelihood for each bin
+        bins.append(((xlo+xhi)/2, y[-1], exp(vmin-z[0])))
+        if len(z) > cmap.N:
+            # downsample histogram bar according to number of colors
+            pidx = searchsorted(z[1:-1].flatten(), cmap_steps)
+            if pidx[-1] < len(z)-1:
+                pidx = hstack((pidx, -1))
+            y, z = y[pidx], z[pidx]
+        pylab.pcolormesh(x, y, z, vmin=vmin, vmax=vmax, hold=True, cmap=cmap)
+    centers, height, maxlikelihood = array(bins).T
+    # Normalize maximum likelihood plot so it contains the same area as the
+    # histogram, unless it is really spikey, in which case make sure it has
+    # about the same height as the histogram.
+    maxlikelihood *= np.sum(height)/np.sum(maxlikelihood)
+    hist_peak = np.max(height)
+    ml_peak = np.max(maxlikelihood)
+    if ml_peak > hist_peak*1.3:
+        maxlikelihood *= hist_peak*1.3/ml_peak
+    pylab.plot(centers, maxlikelihood, '-g', hold=True)
+
+
+def _make_var_histogram(values, logp, nbins, ci, weights):
+    # Produce a histogram
+    hist, bins = np.histogram(values, bins=nbins, range=ci,
+                              #new=True,
+                              normed=True, weights=weights)
+
+    # Find the max likelihood for values in each bin
+    edges = np.searchsorted(values, bins)
+    histbest = [np.max(logp[edges[i]:edges[i+1]])
+                if edges[i] < edges[i+1] else -inf
+                for i in range(nbins)]
+
+    # scale to marginalized probability with peak the same height as hist
+    histbest = np.exp(np.asarray(histbest) - max(logp)) * np.max(hist)
+
+    import pylab
+    # Plot the histogram
+    pylab.bar(bins[:-1], hist, width=bins[1]-bins[0])
+
+    # Plot the kernel density estimate
+    #density = KDE1D(values)
+    #x = linspace(bins[0],bins[-1],100)
+    #pylab.plot(x, density(x), '-k', hold=True)
+
+    # Plot the marginal maximum likelihood
+    centers = (bins[:-1]+bins[1:])/2
+    pylab.plot(centers, histbest, '-g', hold=True)
+
+
+def plot_corrmatrix(draw):
+    c = corrplot.Corr2d(draw.points.T, bins=50, labels=draw.labels)
+    c.plot()
+    #print "Correlation matrix\n",c.R()
+
+
+class KDE1D(kde.gaussian_kde):
+    covariance_factor = lambda self: 2*self.silverman_factor()
+
+
+class KDE2D(kde.gaussian_kde):
+    covariance_factor = kde.gaussian_kde.silverman_factor
+
+    def __init__(self, dataset):
+        kde.gaussian_kde.__init__(self, dataset.T)
+
+    def evalxy(self, x, y):
+        grid_x, grid_y = meshgrid(x, y)
+        dxy = self.evaluate(vstack([grid_x.flatten(), grid_y.flatten()]))
+        return dxy.reshape(grid_x.shape)
+
+    __call__ = evalxy
+
+
+def plot_corr(draw, vars=(0, 1)):
+    from pylab import axes, setp, MaxNLocator
+
+    _, _ = vars  # Make sure vars is length 2
+    labels = [draw.labels[v] for v in vars]
+    values = [draw.points[:, v] for v in vars]
+
+    # Form kernel density estimates of the parameters
+    xmin, xmax = min(values[0]), max(values[0])
+    density_x = KDE1D(values[0])
+    x = linspace(xmin, xmax, 100)
+    px = density_x(x)
+
+    density_y = KDE1D(values[1])
+    ymin, ymax = min(values[1]), max(values[1])
+    y = linspace(ymin, ymax, 100)
+    py = density_y(y)
+
+    nbins = 50
+    ax_data = axes([0.1, 0.1, 0.63, 0.63])  # x,y,w,h
+
+    #density_xy = KDE2D(values[vars])
+    #dxy = density_xy(x,y)*points.shape[0]
+    #ax_data.pcolorfast(x,y,dxy,cmap=cm.gist_earth_r) #@UndefinedVariable
+
+    ax_data.plot(values[0], values[1], 'k.', markersize=1)
+    ax_data.set_xlabel(labels[0])
+    ax_data.set_ylabel(labels[1])
+    ax_hist_x = axes([0.1, 0.75, 0.63, 0.2], sharex=ax_data)
+    ax_hist_x.hist(values[0], nbins, orientation='vertical', normed=1)
+    ax_hist_x.plot(x, px, 'k-')
+    ax_hist_x.yaxis.set_major_locator(MaxNLocator(4, prune="both"))
+    setp(ax_hist_x.get_xticklabels(), visible=False,)
+    ax_hist_y = axes([0.75, 0.1, 0.2, 0.63], sharey=ax_data)
+    ax_hist_y.hist(values[1], nbins, orientation='horizontal', normed=1)
+    ax_hist_y.plot(py, y, 'k-')
+    ax_hist_y.xaxis.set_major_locator(MaxNLocator(4, prune="both"))
+    setp(ax_hist_y.get_yticklabels(), visible=False)
+
+
+def plot_trace(state, var=0, portion=None):
+    from pylab import plot, title, xlabel, ylabel
+
+    draw, points, _ = state.chains()
+    start = int((1-portion)*len(draw)) if portion else 0
+    plot(arange(start, len(points))*state.thinning,
+         squeeze(points[start:, state._good_chains, var]))
+    title('Parameter history for variable %d' % (var+1,))
+    xlabel('Generation number')
+    ylabel('Parameter value')
+
+
+def plot_R(state, portion=None):
+    from pylab import plot, title, legend, xlabel, ylabel
+
+    draw, R = state.R_stat()
+    start = int((1-portion)*len(draw)) if portion else 0
+    plot(arange(start, len(R)), R[start:])
+    title('Convergence history')
+    legend(['P%d' % i for i in range(1, R.shape[1]+1)])
+    xlabel('Generation number')
+    ylabel('R')
+
+
+def plot_logp(state, portion=None):
+    from pylab import plot, title, xlabel, ylabel
+
+    draw, logp = state.logp()
+    start = int((1-portion)*len(draw)) if portion else 0
+    plot(arange(start, len(logp)), logp[start:], ',', markersize=1)
+    title('Log Likelihood History')
+    xlabel('Generation number')
+    ylabel('Log likelihood at x[k]')
diff --git a/bumps/dream/walk.py b/bumps/dream/walk.py
new file mode 100644
index 0000000..833cbb6
--- /dev/null
+++ b/bumps/dream/walk.py
@@ -0,0 +1,118 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Random walk functions.
+
+:function:`walk` simulates a mean-reverting random walk.
+"""
+# This code was developed to test outlier detection
+from __future__ import division
+
+__all__ = ["walk"]
+
+from numpy import asarray, ones_like, NaN, isnan
+
+from . import util
+
+
+def walk(n=1000, mu=0, sigma=1, alpha=0.01, s0=NaN):
+    """
+    Mean reverting random walk.
+
+    Returns an array of n-1 steps in the following process::
+
+        s[i] = s[i-1] + alpha*(mu-s[i-1]) + e[i]
+
+    with e ~ N(0,sigma).
+
+    The parameters are::
+
+        *n* walk length
+        *s0* starting value, defaults to N(mu,sigma)
+        *mu* target mean, defaults to 0
+        *sigma* volatility
+        *alpha* in [0,1] reversion rate
+
+    Use alpha=0 for a pure Gaussian random walk or alpha=1 independent
+    samples about the mean.
+
+    If *mu* is a vector, multiple streams are run in parallel.  In this
+    case *s0*, *sigma* and *alpha* can either be scalars or vectors.
+
+    If *mu* is an array, the target value is non-stationary, and the
+    parameter *n* is ignored.
+
+    Note: the default starting value should be selected from a distribution
+    whose width depends on alpha.  N(mu,sigma) is too narrow.  This
+    effect is illustrated in :function:`demo`, where the following choices
+    of sigma and alpha give approximately the same histogram::
+
+        sigma = [0.138, 0.31, 0.45, 0.85, 1]
+        alpha = [0.01,  0.05, 0.1,  0.5,  1]
+    """
+    s0, mu, sigma, alpha = [asarray(v) for v in (s0, mu, sigma, alpha)]
+    nchains = mu.shape[0] if mu.ndim > 0 else 1
+
+    if mu.ndim < 2:
+        if isnan(s0):
+            s0 = mu + util.rng.randn(nchains)*sigma
+        s = [s0*ones_like(mu)]
+        for i in range(n-1):
+            s.append(s[-1] + alpha*(mu-s[-1]) + sigma*util.rng.randn(nchains))
+    elif mu.ndim == 2:
+        if isnan(s0):
+            s0 = mu[0] + util.rng.randn(nchains)*sigma
+        s = [s0*ones_like(mu[0])]
+        for i in range(mu.shape[1]):
+            s.append(s[-1] + alpha*(mu[i]-s[-1])
+                     + sigma*util.rng.randn(nchains))
+    else:
+        raise ValueError("mu must be scalar, vector or 2D array")
+    return asarray(s)
+
+
+def demo():
+    """
+    Example showing the relationship between alpha and sigma in the random
+    walk posterior distribution.
+
+    The lag 1 autocorrelation coefficient R^2 is approximately 1-alpha.
+    """
+    from numpy import mean, std, sum
+    import pylab
+    from matplotlib.ticker import MaxNLocator
+    pylab.seed(10)  # Pick a pretty starting point
+
+    # Generate chains
+    n = 5000
+    mu = [0, 5, 10, 15, 20]
+    sigma = [0.138, 0.31, 0.45, 0.85, 1]
+    alpha = [0.01, 0.05, 0.1, 0.5, 1]
+    chains = walk(n, mu=mu, sigma=sigma, alpha=alpha)
+
+    # Compute lag 1 correlation coefficient
+    m, s = mean(chains, axis=0), std(chains, ddof=1, axis=0)
+    r2 = sum((chains[1:]-m)*(chains[:-1]-m), axis=0) / ((n-2)*s**2)
+    r2[abs(r2) < 0.01] = 0
+
+    # Plot chains
+    ax_data = pylab.axes([0.05, 0.05, 0.65, 0.9])  # x,y,w,h
+    ax_data.plot(chains)
+    textkw = dict(xytext=(30, 0), textcoords='offset points',
+                  verticalalignment='center',
+                  backgroundcolor=(0.8, 0.8, 0.8, 0.8))
+    label = r'$\ \alpha\,%.2f\ \ \sigma\,%.3f\ \ ' \
+            r'R^2\,%.2f\ \ avg\,%.2f\ \ std\,%.2f\ $'
+    for m, s, a, r2, em, es in zip(mu, sigma, alpha, r2, m, s):
+        pylab.annotate(label % (a, s, r2, em-m, es), xy=(0, m), **textkw)
+
+    # Plot histogram
+    ax_hist = pylab.axes([0.75, 0.05, 0.2, 0.9], sharey=ax_data)
+    ax_hist.hist(chains.flatten(), 100, orientation='horizontal')
+    pylab.setp(ax_hist.get_yticklabels(), visible=False)
+    ax_hist.xaxis.set_major_locator(MaxNLocator(3))
+
+    pylab.show()
+
+if __name__ == "__main__":
+    demo()
diff --git a/bumps/errplot.py b/bumps/errplot.py
new file mode 100644
index 0000000..a58a3d9
--- /dev/null
+++ b/bumps/errplot.py
@@ -0,0 +1,133 @@
+"""
+Estimate model uncertainty from random sample.
+
+MCMC uncertainty analysis gives the uncertainty on the model parameters
+rather than the model itself.  For example, when fitting a line to a set
+of data, the uncertainty on the slope and the intercept does not directly
+give you the uncertainty in the expected value of *y* for a given value
+of *x*.
+
+The routines in bumps.errplot allow you to generate confidence intervals
+on  the model using a random sample of MCMC parameters.  After calculating
+the model *y* values for each sample, one can generate 68% and 95% contours
+for a set of sampling points *x*.  This can apply even to models which
+are not directly measured.  For example, in scattering inverse problems
+the scattered intensity is the value measured, but the fitting parameters
+describe the real space model that is being probed.  It is the uncertainty
+in the real space model that is of primary interest.
+
+Since bumps knows only the probability of seeing the measured value given
+the input parameters, it is up to the model itself to calculate and display
+the confidence intervals on the model and the expected values for the data
+points.  This is done using the :mod:`bumps.plugin` architecture, so
+application writers can provide the appropriate functions for their data
+types.  Eventually this capability will move to the model definition so
+that different types of models can be processed in the same fit.
+
+For a completed MCMC run, four steps are required:
+
+#. reload the fitting problem and the MCMC state
+#. select a set of sample points
+#. evaluate model confidence intervals from sample points
+#. show model confidence intervals
+
+:func:`reload_errors` performs steps 1, 2 and 3, returning *errs*.
+If the fitting problem and the MCMC state are already loaded, then use
+:func:`calc_errors_From_state` to perform steps 2 and 3, returning *errs*.
+If alternative sampling is desired, then use :func:`calc_errors` on a
+given set of points to perform step 3, returning *errs*.  Once *errs* has
+been calculated and returned by one of these methods, call
+:func:`show_errors` to perform step 4.
+"""
+__all__ = ["reload_errors", "calc_errors_from_state", "calc_errors",
+           "show_errors"]
+import os
+
+import numpy as np
+
+from .dream.state import load_state
+from . import plugin
+from .cli import load_model, load_best
+
+def reload_errors(model, store, nshown=50, random=True):
+    """
+    Reload the MCMC state and compute the model confidence intervals.
+
+    The loaded error data is a sample from the fit space according to the
+    fit parameter uncertainty.  This is a subset of the samples returned
+    by the DREAM MCMC sampling process.
+
+    *model* is the name of the model python file
+
+    *store* is the name of the store directory containing the dream results
+
+    *nshown* and *random* are as for :func:`calc_errors_from_state`.
+
+    Returns *errs* for :func:`show_errors`.
+    """
+    problem = load_model(model)
+    load_best(problem, os.path.join(store, model[:-3] + ".par"))
+    state = load_state(os.path.join(store, model[:-3]))
+    state.mark_outliers()
+    return calc_errors_from_state(problem, state,
+                                  nshown=nshown, random=random)
+
+
+def calc_errors_from_state(problem, state, nshown=50, random=True):
+    """
+    Compute confidence regions for a problem from the
+    Align the sample profiles and compute the residual difference from 
+    the measured data for a set of points returned from DREAM.
+
+    *nshown* is the number of samples to include from the state.
+
+    *random* is True if the samples are randomly selected, or False if
+    the most recent samples should be used.  Use random if you have
+    poor mixing (i.e., the parameters tend to stay fixed from generation
+    to generation), but not random if your burn-in was too short, and
+    you want to select from the end.
+
+    Returns *errs* for :func:`show_errors`.
+    """
+    points, _logp = state.sample()
+    if points.shape[0] < nshown:
+        nshown = points.shape[0]
+    # randomize the draw; skip the last point since state.keep_best() put
+    # the best point at the end.
+    if random:
+        points = points[np.random.permutation(len(points) - 1)]
+    return calc_errors(problem, points[-nshown:-1])
+
+
+def calc_errors(problem, points):
+    """
+    Align the sample profiles and compute the residual difference from the
+    measured data for a set of points.
+
+    The return value is arbitrary.  It is passed to the :func:`show_errors`
+    plugin for the application.
+    Returns *errs* for :func:`show_errors`.
+    """
+    original = problem.getp()
+    try:
+        ret = plugin.calc_errors(problem, points)
+    except Exception:
+        import traceback, logging
+        info = ["error calculating distribution on model",
+                traceback.format_exc()]
+        logging.error("\n".join(info))
+        ret = None
+    finally:
+        problem.setp(original)
+    return ret
+
+
+def show_errors(errs):
+    """
+    Display the confidence regions returned by :func:`calc_errors`.
+
+    The content of *errs* depends on the active plugin.
+    """
+    return plugin.show_errors(errs)
+
+
diff --git a/bumps/fitproblem.py b/bumps/fitproblem.py
new file mode 100644
index 0000000..c49984d
--- /dev/null
+++ b/bumps/fitproblem.py
@@ -0,0 +1,641 @@
+"""
+Interface between the models and the fitters.
+
+:class:`Fitness` defines the interface that model evaluators can follow.
+These models can be bundled together into a :func:`FitProblem` and sent
+to :class:`bumps.fitters.FitDriver` for optimization and uncertainty
+analysis.
+"""
+from __future__ import division, with_statement
+
+__all__ = ['Fitness', 'FitProblem', 'load_problem',
+           'BaseFitProblem', 'MultiFitProblem']
+
+import sys
+
+import numpy as np
+from numpy import inf, isnan
+
+from . import parameter, bounds as mbounds
+from .formatnum import format_uncertainty
+
+# Abstract base class
+class Fitness(object):
+    """
+    Manage parameters, data, and theory function evaluation.
+
+    See :ref:`fitness` for a detailed explanation.
+    """
+    def parameters(self):
+        """
+        Return the parameters in the model.
+
+        Model parameters are a hierarchical structure of lists and
+        dictionaries.
+        """
+        raise NotImplementedError
+
+    def update(self):
+        """
+        Called when parameters have been updated.  Any cached values will need
+        to be cleared and the model reevaluated.
+        """
+        raise NotImplementedError
+
+    def numpoints(self):
+        """
+        Return the number of data points.
+        """
+        raise NotImplementedError
+
+    def nllf(self):
+        """
+        Return the negative log likelihood value of the current parameter set.
+        """
+        raise NotImplementedError
+
+    def resynth_data(self):
+        """
+        Generate fake data based on uncertainties in the real data.  For
+        Monte Carlo resynth-refit uncertainty analysis.  Bootstrapping?
+        """
+        raise NotImplementedError
+
+    def restore_data(self):
+        """
+        Restore the original data in the model (after resynth).
+        """
+        raise NotImplementedError
+
+    def residuals(self):
+        """
+        Return residuals for current theory minus data.
+
+        Used for Levenburg-Marquardt, and for plotting.
+        """
+        raise NotImplementedError
+
+    def save(self, basename):
+        """
+        Save the model to a file based on basename+extension.  This will point
+        to a path to a directory on a remote machine; don't make any
+        assumptions about information stored on the server.  Return the set of
+        files saved so that the monitor software can make a pretty web page.
+        """
+        pass
+
+    def plot(self, view='linear'):
+        """
+        Plot the model to the current figure.  You only get one figure, but you
+        can make it as complex as you want.  This will be saved as a png on
+        the server, and composed onto a results web page.
+        """
+        pass
+
+
+def no_constraints():
+    """default constraints function for FitProblem"""
+    return 0
+
+
+# TODO: refactor FitProblem definition
+# deprecate the direct use of MultiFitProblem
+def FitProblem(*args, **kw):
+    """
+    Return a fit problem instance for the fitness function(s).
+
+    For an individual model:
+
+        *fitness* is a :class:`Fitness` instance.
+
+    For a set of models:
+
+        *models* is a sequence of :class:`Fitness` instances.
+
+        *weights* is an optional scale factor for each model
+
+        *freevars* is :class:`parameter.FreeVariables` instance defining the
+        per-model parameter assignments.  See :ref:`freevariables` for details.
+
+
+    Additional parameters:
+
+        *name* name of the problem
+
+        *constraints* is a function which returns the negative log likelihood
+        of seeing the parameters independent from the fitness function.  Use
+        this for example to check for feasible regions of the search space, or
+        to add constraints that cannot be easily calculated per parameter.
+        Ideally, the constraints nllf will increase as you go farther from
+        the feasible region so that the fit will be directed toward feasible
+        values.
+
+        *soft_limit* is the constraints function cutoff, beyond which the
+        *penalty_nllf* will be used and *fitness* nllf will not be calculated.
+
+        *penalty_nllf* is the nllf to use for *fitness* when *constraints*
+        is greater than *soft_limit*.
+
+    Total nllf is the sum of the parameter nllf, the constraints nllf and the
+    depending on whether constraints is greater than soft_limit, either the
+    fitness nllf or the penalty nllf.
+    """
+    if len(args) > 0:
+        try:
+            models = list(args[0])
+        except TypeError:
+            models = args[0]
+        if isinstance(models, list):
+            return MultiFitProblem(models, *args[1:], **kw)
+        else:
+            return BaseFitProblem(*args, **kw)
+    else:
+        if 'fitness' in kw:
+            return BaseFitProblem(*args, **kw)
+        else:
+            return MultiFitProblem(*args, **kw)
+
+
+class BaseFitProblem(object):
+    """
+    See :func:`FitProblem`
+    """
+    def __init__(self, fitness, name=None, constraints=no_constraints,
+                 penalty_nllf=1e6, soft_limit=np.inf, partial=False):
+        self.constraints = constraints
+        self.fitness = fitness
+        self.partial = partial
+        if name is not None:
+            self.name = name
+        else:
+            try:
+                self.name = fitness.name
+            except AttributeError:
+                self.name = 'FitProblem'
+
+        self.soft_limit = soft_limit
+        self.penalty_nllf = penalty_nllf
+        self.model_reset()
+
+    # noinspection PyAttributeOutsideInit
+    def model_reset(self):
+        """
+        Prepare for the fit.
+
+        This sets the parameters and the bounds properties that the
+        solver is expecting from the fittable object.  We also compute
+        the degrees of freedom so that we can return a normalized fit
+        likelihood.
+
+        If the set of fit parameters changes, then model_reset must
+        be called.
+        """
+        # print self.model_parameters()
+        all_parameters = parameter.unique(self.model_parameters())
+        # print "all_parameters",all_parameters
+        self._parameters = parameter.varying(all_parameters)
+        # print "varying",self._parameters
+        self.bounded = [p for p in all_parameters
+                        if not isinstance(p.bounds, mbounds.Unbounded)]
+        self.dof = self.model_points()
+        if not self.partial:
+            self.dof -= len(self._parameters)
+        if self.dof <= 0:
+            raise ValueError("Need more data points than fitting parameters")
+        #self.constraints = pars.constraints()
+
+    def model_parameters(self):
+        """
+        Parameters associated with the model.
+        """
+        return self.fitness.parameters()
+
+    def model_points(self):
+        """
+        Number of data points associated with the model.
+        """
+        return self.fitness.numpoints()
+
+    def model_update(self):
+        """
+        Update the model according to the changed parameters.
+        """
+        if hasattr(self.fitness, 'update'):
+            self.fitness.update()
+
+    def model_nllf(self):
+        """
+        Negative log likelihood of seeing data given model.
+        """
+        return self.fitness.nllf()
+
+    def simulate_data(self, noise=None):
+        """Simulate data with added noise"""
+        self.fitness.simulate_data(noise=noise)
+
+    def resynth_data(self):
+        """Resynthesize data with noise from the uncertainty estimates."""
+        self.fitness.resynth_data()
+
+    def restore_data(self):
+        """Restore original data after resynthesis."""
+        self.fitness.restore_data()
+
+    def valid(self, pvec):
+        return all(v in p.bounds for p, v in zip(self._parameters, pvec))
+
+    def setp(self, pvec):
+        """
+        Set a new value for the parameters into the model.  If the model
+        is valid, calls model_update to signal that the model should be
+        recalculated.
+
+        Returns True if the value is valid and the parameters were set,
+        otherwise returns False.
+        """
+        # TODO: do we have to leave the model in an invalid state?
+        # WARNING: don't try to conditionally update the model
+        # depending on whether any model parameters have changed.
+        # For one thing, the model_update below probably calls
+        # the subclass MultiFitProblem.model_update, which signals
+        # the individual models.  Furthermore, some parameters may
+        # related to others via expressions, and so a dependency
+        # tree needs to be generated.  Whether this is better than
+        # clicker() from SrFit I do not know.
+        for v, p in zip(pvec, self._parameters):
+            p.value = v
+        # TODO: setp_hook is a hack to support parameter expressions in sasview
+        # Don't depend on this existing long term.
+        setp_hook = getattr(self, 'setp_hook', no_constraints)
+        setp_hook()
+        self.model_update()
+
+    def getp(self):
+        """
+        Returns the current value of the parameter vector.
+        """
+        return np.array([p.value for p in self._parameters], 'd')
+
+    def bounds(self):
+        return np.array([p.bounds.limits for p in self._parameters], 'd').T
+
+    def randomize(self, n=None):
+        """
+        Generates a random model.
+
+        *randomize()* sets the model to a random value.
+
+        *randomize(n)* returns a population of *n* random models.
+
+        For indefinite bounds, the random population distribution is centered
+        on initial value of the parameter, or 1. if the initial parameter is
+        not finite.
+        """
+        # TODO: split into two: randomize and random_pop
+        if n is None:
+            self.setp(self.randomize(n=1)[0])
+            return   # Not returning anything since no n is requested
+
+        target = self.getp()
+        target[~np.isfinite(target)] = 1.
+        pop = [p.bounds.random(n, target=v)
+               for p, v in zip(self._parameters, target)]
+        return np.array(pop).T
+
+    def parameter_nllf(self):
+        """
+        Returns negative log likelihood of seeing parameters p.
+        """
+        s = sum(p.nllf() for p in self.bounded)
+        # print "; ".join("%s %g %g"%(p,p.value,p.nllf()) for p in
+        # self.bounded)
+        return s
+
+    def constraints_nllf(self):
+        """
+        Returns the cost of all constraints.
+        """
+        return self.constraints()
+
+    def parameter_residuals(self):
+        """
+        Returns negative log likelihood of seeing parameters p.
+        """
+        return [p.residual() for p in self.bounded]
+
+    def residuals(self):
+        """
+        Return the model residuals.
+        """
+        return self.fitness.residuals()
+
+    def chisq(self):
+        """
+        Return sum squared residuals normalized by the degrees of freedom.
+
+        In the context of a composite fit, the reduced chisq on the individual
+        models only considers the points and the fitted parameters within
+        the individual model.
+
+        Note that this does not include cost factors due to constraints on
+        the parameters, such as sample_offset ~ N(0,0.01).
+        """
+        return np.sum(self.residuals() ** 2) / self.dof
+        # return 2*self.nllf()/self.dof
+
+    def nllf(self, pvec=None):
+        """
+        Compute the cost function for a new parameter set p.
+
+        Note that this is not simply the sum-squared residuals, but instead
+        is the negative log likelihood of seeing the data given the model plus
+        the negative log likelihood of seeing the model.  The individual
+        likelihoods are scaled by 1/max(P) so that normalization constants
+        can be ignored.
+
+        The model is not actually calculated if the parameter nllf plus the
+        constraint nllf are bigger than *soft_limit*, but instead it is
+        assigned a value of *penalty_nllf*.
+        """
+        if pvec is not None:
+            if self.valid(pvec):
+                self.setp(pvec)
+            else:
+                return inf
+
+        try:
+            if isnan(self.parameter_nllf()):
+                # TODO: make sure errors get back to the user
+                import logging
+                info = ["Parameter nllf is wrong"]
+                info += ["%s %g" %(p,p.nllf()) for p in self.bounded]
+                logging.error("\n  ".join(info))
+            pparameter = self.parameter_nllf()
+            pconstraint = self.constraints_nllf()
+            pmodel = (self.model_nllf()
+                      if pparameter + pconstraint <= self.soft_limit
+                      else self.penalty_nllf)
+            cost = pparameter + pconstraint + pmodel
+        except Exception:
+            # TODO: make sure errors get back to the user
+            import traceback, logging
+            info = (traceback.format_exc(),
+                    parameter.summarize(self._parameters))
+            logging.error("\n".join(info))
+            return inf
+        if isnan(cost):
+            # TODO: make sure errors get back to the user
+            # print "point evaluates to NaN"
+            # print parameter.summarize(self._parameters)
+            return inf
+        # print pvec, "cost",cost,"=",pparameter,"+",pconstraint,"+",pmodel
+        return cost
+
+    def __call__(self, pvec=None):
+        """
+        Problem cost function.
+
+        Returns the negative log likelihood scaled by DOF so that
+        the result looks like the familiar normalized chi-squared.  These
+        scale factors will not affect the value of the minimum, though some
+        care will be required when interpreting the uncertainty.
+        """
+        return 2 * self.nllf(pvec) / self.dof
+
+    def show(self):
+        print(parameter.format(self.model_parameters()))
+        print("[chisq=%s, nllf=%g]" % (self.chisq_str(), self.nllf()))
+        #print(self.summarize())
+
+    def summarize(self):
+        return parameter.summarize(self._parameters)
+
+    def labels(self):
+        return [p.name for p in self._parameters]
+
+    def save(self, basename):
+        if hasattr(self.fitness, 'save'):
+            self.fitness.save(basename)
+
+    def plot(self, p=None, fignum=None, figfile=None, view=None):
+        if not hasattr(self.fitness, 'plot'):
+            return
+
+        import pylab
+        if fignum is not None:
+            pylab.figure(fignum)
+        if p is not None:
+            self.setp(p)
+        self.fitness.plot(view=view)
+        pylab.text(0.01, 0.01, 'chisq=%s' % self.chisq_str(),
+                   transform=pylab.gca().transAxes)
+        if figfile is not None:
+            pylab.savefig(figfile + "-model.png", format='png')
+
+    def cov(self):
+        from . import lsqerror
+        H = lsqerror.hessian(self)
+        H, L = lsqerror.perturbed_hessian(H)
+        return lsqerror.chol_cov(L)
+
+    def stderr(self):
+        from . import lsqerror
+        c = self.cov()
+        return lsqerror.stderr(c), lsqerror.corr(c)
+
+    def __getstate__(self):
+        return (self.fitness, self.partial, self.name, self.penalty_nllf,
+                self.soft_limit, self.constraints)
+
+    def __setstate__(self, state):
+        self.fitness, self.partial, self.name, self.penalty_nllf, \
+            self.soft_limit, self.constraints = state
+        self.model_reset()
+
+    def chisq_str(self):
+        # TODO: remove unnecessary try-catch
+        try:
+            _, err = nllf_scale(self)
+            text = format_uncertainty(self.chisq(), err)
+            constraints = (self.parameter_nllf()
+                           + self.constraints_nllf())
+            if constraints > 0.:
+                text+= " constraints=%g"%constraints
+        except Exception:
+            # Otherwise indicate that chisq could not be calculated.
+            text = "--"
+
+        return text
+
+class MultiFitProblem(BaseFitProblem):
+    """
+    Weighted fits for multiple models.
+    """
+    def __init__(self, models, weights=None, name=None,
+                 constraints=no_constraints,
+                 soft_limit=np.inf, penalty_nllf=1e6,
+                 freevars=None):
+        self.partial = False
+        self.constraints = constraints
+        if freevars is None:
+            names = ["M%d" % i for i, _ in enumerate(models)]
+            freevars = parameter.FreeVariables(names=names)
+        self.freevars = freevars
+        self._models = [BaseFitProblem(m, partial=True) for m in models]
+        if weights is None:
+            weights = [1 for _ in models]
+        self.weights = weights
+        self.penalty_nllf = penalty_nllf
+        self.soft_limit = soft_limit
+        self.set_active_model(0)  # Set the active model to model 0
+        self.model_reset()
+        self.name = name
+
+    @property
+    def models(self):
+        """Iterate over models, with free parameters set from model values"""
+        for i, f in enumerate(self._models):
+            self.freevars.set_model(i)
+            yield f
+        # Restore the active model after cycling
+        self.freevars.set_model(self._active_model_index)
+
+    # noinspection PyAttributeOutsideInit
+    def set_active_model(self, i):
+        """Use free parameters from model *i*"""
+        self._active_model_index = i
+        self.active_model = self._models[i]
+        self.freevars.set_model(i)
+
+    def model_parameters(self):
+        """Return parameters from all models"""
+        pars = {'models': [f.model_parameters() for f in self.models]}
+        free = self.freevars.parameters()
+        if free:
+            pars['freevars'] = free
+        return pars
+
+    def model_points(self):
+        """Return number of points in all models"""
+        return sum(f.model_points() for f in self.models)
+
+    def model_update(self):
+        """Let all models know they need to be recalculated"""
+        # TODO: consider an "on changed" signal for model updates.
+        # The update function would be associated with model parameters
+        # rather than always recalculating everything.  This
+        # allows us to set up fits with 'fast' and 'slow' parameters,
+        # where the fit can quickly explore a subspace where the
+        # computation is cheap before jumping to a more expensive
+        # subspace.  SrFit does this.
+        for f in self.models:
+            f.model_update()
+
+    def model_nllf(self):
+        """Return cost function for all data sets"""
+        return sum(f.model_nllf() for f in self.models)
+
+    def constraints_nllf(self):
+        """Return the cost function for all constraints"""
+        return (sum(f.constraints_nllf() for f in self.models)
+                + BaseFitProblem.constraints_nllf(self))
+
+    def simulate_data(self, noise=None):
+        """Simulate data with added noise"""
+        for f in self.models:
+            f.simulate_data(noise=noise)
+
+    def resynth_data(self):
+        """Resynthesize data with noise from the uncertainty estimates."""
+        for f in self.models:
+            f.resynth_data()
+
+    def restore_data(self):
+        """Restore original data after resynthesis."""
+        for f in self.models:
+            f.restore_data()
+
+    def residuals(self):
+        resid = np.hstack([w * f.residuals()
+                              for w, f in zip(self.weights, self.models)])
+        return resid
+
+    def save(self, basename):
+        for i, f in enumerate(self.models):
+            f.save(basename + "-%d" % (i + 1))
+
+    def show(self):
+        for i, f in enumerate(self.models):
+            print("-- Model %d %s" % (i, f.name))
+            f.show()
+        print("[overall chisq=%s, nllf=%g]" % (self.chisq_str(), self.nllf()))
+
+    def plot(self, p=None, fignum=1, figfile=None, view=None):
+        import pylab
+        if p is not None:
+            self.setp(p)
+        for i, f in enumerate(self.models):
+            f.plot(fignum=i + fignum, view=view)
+            pylab.suptitle('Model %d - %s' % (i, f.name))
+            if figfile is not None:
+                pylab.savefig(figfile + "-model%d.png" % i, format='png')
+
+    # Note: restore default behaviour of getstate/setstate rather than
+    # inheriting from BaseFitProblem
+    def __getstate__(self):
+        return self.__dict__
+
+    def __setstate__(self, state):
+        self.__dict__ = state
+
+
+# TODO: consider adding nllf_scale to FitProblem.
+ONE_SIGMA=0.68268949213708585
+def nllf_scale(problem):
+    r"""
+    Return the scale factor for reporting the problem nllf as an approximate
+    normalized chisq, along with an associated "uncertainty".  The uncertainty
+    is the amount that chisq must change in order for the fit to be
+    significantly better.
+
+    From Numerical Recipes 15.6: *Confidence Limits on Estimated Model
+    Parameters*, the $1-\sigma$ contour in parameter space corresponds
+    to $\Delta\chi^2 = \text{invCDF}(1-\sigma,k)$ where
+    $1-\sigma \approx 0.6827$ and $k$ is the number of fitting parameters.
+    Since we are reporting the normalized $\chi^2$, this needs to be scaled
+    by the problem degrees of freedom, $n-k$, where $n$ is the number of
+    measurements.  To first approximation, the uncertainty in $\chi^2_N$
+    is $k/(n-k)$
+    """
+    dof = getattr(problem, 'dof', np.NaN)
+    if dof <= 0 or np.isnan(dof) or np.isinf(dof):
+        return 1., 0.
+    else:
+        #return 2./dof, 1./dof
+        from scipy.stats import chi2
+        npars = max(len(problem.getp()), 1)
+        return 2./dof, chi2.ppf(ONE_SIGMA, npars)/dof
+
+def load_problem(filename, options=None):
+    """
+    Load a problem definition from a python script file.
+
+    sys.argv is set to ``[file] + options`` within the context of the script.
+
+    The user must define ``problem=FitProblem(...)`` within the script.
+
+    Raises ValueError if the script does not define problem.
+    """
+    ctx = dict(__file__=filename, __name__="bumps_model")
+    old_argv = sys.argv
+    sys.argv = [filename] + options if options else [filename]
+    source = open(filename).read()
+    code = compile(source, filename, 'exec')
+    exec(code, ctx)
+    sys.argv = old_argv
+    try:
+        problem = ctx["problem"]
+    except KeyError:
+        raise ValueError(filename + " requires 'problem = FitProblem(...)'")
+
+    return problem
diff --git a/bumps/fitservice.py b/bumps/fitservice.py
new file mode 100644
index 0000000..a526081
--- /dev/null
+++ b/bumps/fitservice.py
@@ -0,0 +1,107 @@
+"""
+Fit job definition for the distributed job queue.
+"""
+from __future__ import print_function
+
+import os
+import sys
+import json
+import pickle
+
+from . import cli
+from . import __version__
+
+# Site configuration determines what kind of mapper to use
+# This should be true in cli.py as well
+from .mapper import MPMapper as Mapper
+from . import monitor
+from .fitters import FitDriver
+
+
+def fitservice(request):
+    import matplotlib
+    matplotlib.use('Agg')
+
+    path = os.getcwd()
+
+    service_version = __version__
+    request_version = str(request['version'])
+    if service_version != request_version:
+        raise ValueError('fitter version %s does not match request %s'
+                         % (service_version, request_version))
+
+    data = request['data']
+    model = str(data['package'])
+
+    service_model_version = __version__
+    request_model_version = str(data['version'])
+    if service_model_version != request_model_version:
+        raise ValueError('%s version %s does not match request %s'
+                         % (model, service_model_version, request_model_version))
+    options = pickle.loads(str(data['options']))
+    problem = pickle.loads(str(data['problem']))
+    problem.store = path
+    problem.output_path = os.path.join(path, 'model')
+
+    fitdriver = FitDriver(options.fit, problem=problem, **options)
+
+    fitdriver.mapper = Mapper.start_mapper(problem, options.args)
+    problem.show()
+    print("#", " ".join(sys.argv))
+    best, fbest = fitdriver.fit()
+    cli.save_best(fitdriver, problem, best)
+    matplotlib.pyplot.show()
+    return list(best), fbest
+
+
+class ServiceMonitor(monitor.TimedUpdate):
+
+    """
+    Display fit progress on the console
+    """
+
+    def __init__(self, problem, path, progress=60, improvement=60):
+        monitor.TimedUpdate.__init__(self, progress=progress,
+                                     improvement=improvement)
+        self.path = path
+        self.problem = problem
+        self.images = []
+
+    def show_progress(self, history):
+        p = self.problem.getp()
+        try:
+            self.problem.setp(history.point[0])
+            dof = self.problem.dof
+            summary = self.problem.summarize()
+        finally:
+            self.problem.setp(p)
+
+        status = {
+            "step":  history.step[0],
+            "cost":  history.value[0] / dof,
+            "pars":  history.point[0],
+        }
+        json_status = json.dumps(status)
+        open(os.path.join(self.path, 'status.json'), "wt").write(json_status)
+        status['table'] = summary
+        status['images'] = "\n".join('<img file="%s" alt="%s" />' % (f, f)
+                                     for f in self.images)
+        html_status = """\
+<html><body>
+Generation %(step)d, chisq %(cost)g
+<pre>
+%(table)s
+</pre>
+%(images)s
+</body></html>
+""" % status
+        open(os.path.join(self.path, 'status.html'), "wt").write(html_status)
+
+    def show_improvement(self, history):
+        import pylab
+
+        # print "step",history.step[0],"chisq",history.value[0]
+        self.problem.setp(history.point[0])
+        pylab.hold(False)
+        self.problem.plot(figfile=os.path.join(self.path, 'K'))
+        pylab.gcf().canvas.draw()
diff --git a/bumps/fitters.py b/bumps/fitters.py
new file mode 100644
index 0000000..f46fa4c
--- /dev/null
+++ b/bumps/fitters.py
@@ -0,0 +1,979 @@
+"""
+Interfaces to various optimizers.
+"""
+from __future__ import print_function, division
+
+import sys
+import time
+from copy import copy
+
+import numpy as np
+
+from . import monitor
+from . import initpop
+from . import lsqerror
+
+from .history import History
+from .formatnum import format_uncertainty
+from .fitproblem import nllf_scale
+
+from .dream import MCMCModel
+
+
+class ConsoleMonitor(monitor.TimedUpdate):
+    """
+    Display fit progress on the console
+    """
+    def __init__(self, problem, progress=1, improvement=30):
+        monitor.TimedUpdate.__init__(self, progress=progress,
+                                     improvement=improvement)
+        self.problem = problem
+
+    def show_progress(self, history):
+        scale, err = nllf_scale(self.problem)
+        chisq = format_uncertainty(scale*history.value[0], err)
+        print("step", history.step[0], "cost", chisq)
+        sys.stdout.flush()
+
+    def show_improvement(self, history):
+        # print "step",history.step[0],"chisq",history.value[0]
+        p = self.problem.getp()
+        try:
+            self.problem.setp(history.point[0])
+            print(self.problem.summarize())
+        finally:
+            self.problem.setp(p)
+        sys.stdout.flush()
+
+
+class StepMonitor(monitor.Monitor):
+    """
+    Collect information at every step of the fit and save it to a file.
+
+    *fid* is the file to save the information to
+    *fields* is the list of "step|time|value|point" fields to save
+
+    The point field should be last in the list.
+    """
+    FIELDS = ['step', 'time', 'value', 'point']
+
+    def __init__(self, problem, fid, fields=FIELDS):
+        if any(f not in self.FIELDS for f in fields):
+            raise ValueError("invalid monitor field")
+        self.fid = fid
+        self.fields = fields
+        self._pattern = "%%(%s)s\n" % (")s %(".join(fields))
+        fid.write("# " + ' '.join(fields) + '\n')
+
+    def config_history(self, history):
+        history.requires(time=1, value=1, point=1, step=1)
+
+    def __call__(self, history):
+        point = " ".join("%.15g" % v for v in history.point[0])
+        time = "%g" % history.time[0]
+        step = "%d" % history.step[0]
+        scale, _ = nllf_scale(self.problem)
+        value = "%.15g" % (scale * history.value[0])
+        out = self._pattern % dict(point=point, time=time,
+                                   value=value, step=step)
+        self.fid.write(out)
+
+class MonitorRunner(object):
+    """
+    Adaptor which allows solvers to accept progress monitors.
+    """
+    def __init__(self, monitors, problem):
+        if monitors is None:
+            monitors = [ConsoleMonitor(problem)]
+        self.monitors = monitors
+        self.history = History(time=1, step=1, point=1, value=1,
+                               population_points=1, population_values=1)
+        for M in self.monitors:
+            M.config_history(self.history)
+        self._start = time.time()
+
+    def __call__(self, step, point, value,
+                 population_points=None, population_values=None):
+        self.history.update(time=time.time() - self._start,
+                            step=step, point=point, value=value,
+                            population_points=population_points,
+                            population_values=population_values)
+        for M in self.monitors:
+            M(self.history)
+
+
+class FitBase(object):
+    """
+    FitBase defines the interface from bumps models to the various fitting
+    engines available within bumps.
+
+    Each engine is defined in its own class with a specific set of attributes
+    and methods.
+
+    The *name* attribute is the name of the optimizer.  This is just a simple
+    string.
+
+    The *settings* attribute is a list of pairs (name, default), where the
+    names are defined as fields in FitOptions.  A best attempt should be
+    made to map the fit options for the optimizer to the standard fit options,
+    since each of these becomes a new command line option when running
+    bumps.  If that is not possible, then a new option should be added
+    to FitOptions.  A plugin architecture might be appropriate here, if
+    there are reasons why specific problem domains might need custom fitters,
+    but this is not yet supported.
+
+    Each engine takes a fit problem in its constructor.
+
+    The :meth:`solve` method runs the fit.  It accepts a
+    monitor to track updates, a mapper to distribute work and
+    key-value pairs defining the settings.
+
+    There are a number of optional methods for the fitting engines.  Basically,
+    all the methods in :class:`FitDriver` first check if they are specialized
+    in the fit engine before performing a default action.
+
+    The *load*/*save* methods load and save the fitter state in a given
+    directory with a specific base file name.  The fitter can choose a file
+    extension to add to the base name.  Some care is needed to be sure that
+    the extension doesn't collide with other extensions such as .mon for
+    the fit monitor.
+
+    The *plot* method shows any plots to help understand the performance of
+    the fitter, such as a convergence plot showing the the range of values
+    in the population over time, as well as plots of the parameter uncertainty
+    if available.  The plot should work within  is given a figure canvas to work with
+
+    The *stderr*/*cov* methods should provide summary statistics for the
+    parameter uncertainties.  Some fitters, such as MCMC, will compute these
+    directly from the population.  Others, such as BFGS, will produce an
+    estimate of the uncertainty as they go along.  If the fitter does not
+    provide these estimates, then they will be computed from numerical
+    derivatives at the minimum in the FitDriver method.
+    """
+    def __init__(self, problem):
+        """Fit the models and show the results"""
+        self.problem = problem
+
+    def solve(self, monitors=None, mapper=None, **options):
+        raise NotImplementedError
+
+
+class MultiStart(FitBase):
+    """
+    Multi-start monte carlo fitter.
+
+    This fitter wraps a local optimizer, restarting it a number of times
+    to give it a chance to find a different local minimum.  If the keep_best
+    option is True, then restart near the best fit, otherwise restart at
+    random.
+    """
+    name = "Multistart Monte Carlo"
+    settings = [('starts', 100)]
+
+    def __init__(self, fitter):
+        FitBase.__init__(self, fitter.problem)
+        self.fitter = fitter
+
+    def solve(self, monitors=None, mapper=None, **options):
+        # TODO: need better way of tracking progress
+        import logging
+        starts = options.pop('starts', 1)
+        reset = not options.pop('keep_best', True)
+        f_best = np.inf
+        x_best = self.problem.getp()
+        for _ in range(max(starts, 1)):
+            logging.info("multistart round %d"%_)
+            x, fx = self.fitter.solve(monitors=monitors, mapper=mapper,
+                                      **options)
+            if fx < f_best:
+                x_best, f_best = x, fx
+                logging.info("multistart f(x),x: %s %s"%(str(fx),str(x_best)))
+            if reset:
+                self.problem.randomize()
+            else:
+                # Jitter
+                self.problem.setp(x_best)
+                pop = initpop.eps_init(1, self.problem.getp(),
+                                       self.problem.bounds(),
+                                       use_point=False, eps=1e-3)
+                self.problem.setp(pop[0])
+        return x_best, f_best
+
+
+class DEFit(FitBase):
+    """
+    Classic Storn and Price differential evolution optimizer.
+    """
+    name = "Differential Evolution"
+    id = "de"
+    settings = [('steps', 1000), ('pop', 10), ('CR', 0.9), ('F', 2.0),
+                ('ftol', 1e-8), ('xtol', 1e-6), #('stop', ''),
+                ]
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+        from .mystic.optimizer import de
+        from .mystic.solver import Minimizer
+        from .mystic import stop
+        if monitors is None:
+            monitors = [ConsoleMonitor(self.problem)]
+        if mapper is not None:
+            _mapper = lambda p, v: mapper(v)
+        else:
+            _mapper = lambda p, v: list(map(self.problem.nllf, v))
+        resume = hasattr(self, 'state')
+        steps = options['steps'] + (self.state['step'][-1] if resume else 0)
+        strategy = de.DifferentialEvolution(npop=options['pop'],
+                                            CR=options['CR'],
+                                            F=options['F'],
+                                            crossover=de.c_bin,
+                                            mutate=de.rand1u)
+        success = parse_tolerance(options)
+        failure = stop.Steps(steps)
+        self.history = History()
+        # Step adds to current step number if resume
+        minimize = Minimizer(strategy=strategy, problem=self.problem,
+                             history=self.history, monitors=monitors,
+                             success=success, failure=failure)
+        if resume:
+            self.history.restore(self.state)
+        x = minimize(mapper=_mapper, abort_test=abort_test, resume=resume)
+        #print(minimize.termination_condition())
+        #with open("/tmp/evals","a") as fid:
+        #   print >>fid,minimize.history.value[0],minimize.history.step[0],\
+        #       minimize.history.step[0]*options['pop']*len(self.problem.getp())
+        return x, self.history.value[0]
+
+    def load(self, input_path):
+        self.state = load_history(input_path)
+
+    def save(self, output_path):
+        save_history(output_path, self.history.snapshot())
+
+
+def parse_tolerance(options):
+    from .mystic import stop
+    if options.get('stop', ''):
+        return stop.parse_condition(options['stop'])
+
+    xtol, ftol = options['xtol'], options['ftol']
+    if xtol == 0:
+        if ftol == 0:
+            return None
+        if ftol < 0:
+            return stop.Rf(-ftol, scaled=True)
+        return stop.Rf(ftol, scaled=False)
+    else:
+        if xtol == 0:
+            return None
+        if xtol < 0:
+            return stop.Rx(-xtol, scaled=True)
+        return stop.Rx(xtol, scaled=False)
+
+
+def _history_file(path):
+    return path + "-history.json"
+
+
+def load_history(path):
+    """
+    Load fitter details from a history file.
+    """
+    import json
+    with open(_history_file(path), "r") as fid:
+        return json.load(fid)
+
+
+def save_history(path, state):
+    """
+    Save fitter details to a history file as JSON.
+
+    The content of the details are fitter specific.
+    """
+    import json
+    with open(_history_file(path), "w") as fid:
+        json.dump(state, fid)
+
+
+class BFGSFit(FitBase):
+    """
+    BFGS quasi-newton optimizer.
+    """
+    name = "Quasi-Newton BFGS"
+    id = "newton"
+    settings = [('steps', 3000), ('starts', 1),
+                ('ftol', 1e-6), ('xtol', 1e-12)]
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+        from .quasinewton import quasinewton
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        result = quasinewton(fn=self.problem.nllf,
+                             x0=self.problem.getp(),
+                             monitor=self._monitor,
+                             abort_test=abort_test,
+                             itnlimit=options['steps'],
+                             gradtol=options['ftol'],
+                             steptol=1e-12,
+                             macheps=1e-8,
+                             eta=1e-8,
+                             )
+        self.result = result
+        #code = result['status']
+        #from .quasinewton import STATUS
+        #print("%d: %s, x=%s, fx=%s"
+        #      % (code, STATUS[code], result['x'], result['fx']))
+        return result['x'], result['fx']
+
+    # BFGS estimates hessian and its cholesky decomposition, but initial
+    # tests give uncertainties quite different from the directly computed
+    # jacobian in levenburg-marquardt or the hessian estimated at the
+    # minimum by numdifftools
+    def Hstderr(self):
+        return lsqerror.chol_stderr(self.result['L'])
+
+    def Hcov(self):
+        return lsqerror.chol_cov(self.result['L'])
+
+    def _monitor(self, step, x, fx):
+        self._update(step=step, point=x, value=fx,
+                     population_points=[x],
+                     population_values=[fx])
+        return True
+
+
+class PSFit(FitBase):
+    """
+    Particle swarm optimizer.
+    """
+    name = "Particle Swarm"
+    id = "ps"
+    settings = [('steps', 3000), ('pop', 1)]
+
+    def solve(self, monitors=None, mapper=None, **options):
+        options = _fill_defaults(options, self.settings)
+        if mapper is None:
+            mapper = lambda x: list(map(self.problem.nllf, x))
+        from .random_lines import particle_swarm
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        low, high = self.problem.bounds()
+        cfo = dict(parallel_cost=mapper,
+                   n=len(low),
+                   x0=self.problem.getp(),
+                   x1=low,
+                   x2=high,
+                   f_opt=0,
+                   monitor=self._monitor)
+        npop = int(cfo['n'] * options['pop'])
+
+        result = particle_swarm(cfo, npop, maxiter=options['steps'])
+        satisfied_sc, n_feval, f_best, x_best = result
+
+        return x_best, f_best
+
+    def _monitor(self, step, x, fx, k):
+        self._update(step=step, point=x[:, k], value=fx[k],
+                     population_points=x.T, population_values=fx)
+        return True
+
+
+class RLFit(FitBase):
+    """
+    Random lines optimizer.
+    """
+    name = "Random Lines"
+    id = "rl"
+    settings = [('steps', 3000), ('starts', 20), ('pop', 0.5), ('CR', 0.9)]
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+        if mapper is None:
+            mapper = lambda x: list(map(self.problem.nllf, x))
+        from .random_lines import random_lines
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        low, high = self.problem.bounds()
+        cfo = dict(parallel_cost=mapper,
+                   n=len(low),
+                   x0=self.problem.getp(),
+                   x1=low,
+                   x2=high,
+                   f_opt=0,
+                   monitor=self._monitor)
+        npop = max(int(cfo['n'] * options['pop']), 3)
+
+        result = random_lines(cfo, npop, abort_test=abort_test,
+                              maxiter=options['steps'], CR=options['CR'])
+        satisfied_sc, n_feval, f_best, x_best = result
+
+        return x_best, f_best
+
+    def _monitor(self, step, x, fx, k):
+        # print "rl best",k, x.shape,fx.shape
+        self._update(step=step, point=x[:, k], value=fx[k],
+                     population_points=x.T, population_values=fx)
+        return True
+
+
+class PTFit(FitBase):
+    """
+    Parallel tempering optimizer.
+    """
+    name = "Parallel Tempering"
+    id = "pt"
+    settings = [('steps', 400), ('nT', 24), ('CR', 0.9),
+                ('burn', 100), ('Tmin', 0.1), ('Tmax', 10)]
+
+    def solve(self, monitors=None, mapper=None, **options):
+        options = _fill_defaults(options, self.settings)
+        # TODO: no mapper??
+        from .partemp import parallel_tempering
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        t = np.logspace(np.log10(options['Tmin']),
+                           np.log10(options['Tmax']),
+                           options['nT'])
+        history = parallel_tempering(nllf=self.problem.nllf,
+                                     p=self.problem.getp(),
+                                     bounds=self.problem.bounds(),
+                                     # logfile="partemp.dat",
+                                     T=t,
+                                     CR=options['CR'],
+                                     steps=options['steps'],
+                                     burn=options['burn'],
+                                     monitor=self._monitor)
+        return history.best_point, history.best
+
+    def _monitor(self, step, x, fx, P, E):
+        self._update(step=step, point=x, value=fx,
+                     population_points=P, population_values=E)
+        return True
+
+
+class SimplexFit(FitBase):
+    """
+    Nelder-Mead simplex optimizer.
+    """
+    name = "Nelder-Mead Simplex"
+    id = "amoeba"
+    settings = [('steps', 1000), ('starts', 1), ('radius', 0.15),
+                ('xtol', 1e-6), ('ftol', 1e-8)]
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        from .simplex import simplex
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+        # TODO: no mapper??
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        # print "bounds",self.problem.bounds()
+        result = simplex(f=self.problem.nllf, x0=self.problem.getp(),
+                         bounds=self.problem.bounds(),
+                         abort_test=abort_test,
+                         update_handler=self._monitor,
+                         maxiter=options['steps'],
+                         radius=options['radius'],
+                         xtol=options['xtol'],
+                         ftol=options['ftol'])
+        # Let simplex propose the starting point for the next amoeba
+        # fit in a multistart amoeba context.  If the best is always
+        # used, the fit can get stuck in a local minimum.
+        self.problem.setp(result.next_start)
+        #print("amoeba %s %s"%(result.x,result.fx))
+        return result.x, result.fx
+
+    def _monitor(self, k, n, x, fx):
+        self._update(step=k, point=x[0], value=fx[0],
+                     population_points=x, population_values=fx)
+        return True
+
+
+class LevenbergMarquardtFit(FitBase):
+    """
+    Levenberg-Marquardt optimizer.
+    """
+    name = "Levenberg-Marquardt"
+    id = "lm"
+    settings = [('steps', 200), ('ftol', 1.5e-8), ('xtol', 1.5e-8)]
+    # LM also has
+    #    gtol: orthoganality between jacobian columns
+    #    epsfcn: numerical derivative step size
+    #    factor: initial radius
+    #    diag: variable scale factors to bring them near 1
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        from scipy import optimize
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+        self._low, self._high = self.problem.bounds()
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        x0 = self.problem.getp()
+        maxfev = options['steps']*(len(x0)+1)
+        result = optimize.leastsq(self._bounded_residuals,
+                                  x0,
+                                  ftol=options['ftol'],
+                                  xtol=options['xtol'],
+                                  maxfev=maxfev,
+                                  epsfcn=1e-8,
+                                  full_output=True)
+        x, cov_x, info, mesg, success = result
+        if not 1 <= success <= 4:
+            # don't treat "reached maxfev" as a true failure
+            if "reached maxfev" in mesg:
+                # unless the x values are bad
+                if not np.all(np.isfinite(x)):
+                    x = None
+                    mesg = "Levenberg-Marquardt fit failed with bad values"
+            else:
+                x = None
+        self._cov = cov_x if x is not None else None
+        # compute one last time with x forced inside the boundary, and using
+        # problem.nllf as returned by other optimizers.  We will ignore the
+        # covariance output and calculate it again ourselves.  Not ideal if
+        # f is expensive, but it will be consistent with other optimizers.
+        if x is not None:
+            self.problem.setp(x + self._stray_delta(x))
+            fx = self.problem.nllf()
+        else:
+            fx = None
+        return x, fx
+
+    def _bounded_residuals(self, p):
+        # Force the fit point into the valid region
+        stray = self._stray_delta(p)
+        stray_cost = np.sum(stray**2)
+        if stray_cost > 0: stray_cost += 1e6
+        self.problem.setp(p + stray)
+        # treat prior probabilities on the parameters as additional
+        # measurements
+        residuals = np.hstack(
+            (self.problem.residuals().flat, self.problem.parameter_residuals()))
+        # Tally costs for straying outside the boundaries plus other costs
+        extra_cost = stray_cost + self.problem.constraints_nllf()
+        # Spread the cost over the residuals.  Since we are smoothly increasing
+        # residuals as we leave the boundary, this should push us back into the
+        # boundary (within tolerance) during the lm fit.
+        residuals += np.sign(residuals) * (extra_cost / len(residuals))
+        return residuals
+
+    def _stray_delta(self, p):
+        """calculate how far point is outside the boundary"""
+        return (np.where(p < self._low, self._low - p, 0)
+                + np.where(p > self._high, self._high - p, 0))
+
+    def cov(self):
+        return self._cov
+
+
+class SnobFit(FitBase):
+    name = "SNOBFIT"
+    id = "snobfit"
+    settings = [('steps', 200)]
+
+    def solve(self, monitors=None, mapper=None, **options):
+        options = _fill_defaults(options, self.settings)
+        # TODO: no mapper??
+        from snobfit.snobfit import snobfit
+        self._update = MonitorRunner(problem=self.problem,
+                                     monitors=monitors)
+        x, fx, _ = snobfit(self.problem, self.problem.getp(),
+                           self.problem.bounds(),
+                           fglob=0, callback=self._monitor)
+        return x, fx
+
+    def _monitor(self, k, x, fx, improved):
+        # TODO: snobfit does have a population...
+        self._update(step=k, point=x, value=fx,
+                     population_points=[x], population_values=[fx])
+
+
+class DreamModel(MCMCModel):
+
+    """
+    DREAM wrapper for fit problems.
+    """
+
+    def __init__(self, problem=None, mapper=None):
+        """
+        Create a sampling from the multidimensional likelihood function
+        represented by the problem set using dream.
+        """
+        # print "dream"
+        self.problem = problem
+        self.bounds = self.problem.bounds()
+        self.labels = self.problem.labels()
+
+        self.mapper = mapper if mapper else lambda p: list(map(self.nllf, p))
+
+    def log_density(self, x):
+        return -self.nllf(x)
+
+    def nllf(self, x):
+        """Negative log likelihood of seeing models given *x*"""
+        # Note: usually we will be going through the provided mapper, and
+        # this function will never be called.
+        # print "eval",x; sys.stdout.flush()
+        return self.problem.nllf(x)
+
+    def map(self, pop):
+        # print "calling mapper",self.mapper
+        return -np.array(self.mapper(pop))
+
+
+class DreamFit(FitBase):
+    name = "DREAM"
+    id = "dream"
+    settings = [('samples', int(1e4)), ('burn', 100), ('pop', 10),
+                ('init', 'eps'), ('thin', 1),
+                ('steps', 0),  # deprecated: use --samples instead
+               ]
+
+    def __init__(self, problem):
+        FitBase.__init__(self, problem)
+        self.dream_model = DreamModel(problem)
+        self.state = None
+
+    def solve(self, monitors=None, abort_test=None, mapper=None, **options):
+        from .dream import Dream
+        if abort_test is None:
+            abort_test = lambda: False
+        options = _fill_defaults(options, self.settings)
+
+        if mapper:
+            self.dream_model.mapper = mapper
+        self._update = MonitorRunner(problem=self.dream_model.problem,
+                                     monitors=monitors)
+
+        population = initpop.generate(self.dream_model.problem, **options)
+        pop_size = population.shape[0]
+        draws, steps = int(options['samples']), options['steps']
+        if steps == 0:
+            steps = (draws + pop_size-1) // pop_size
+        # TODO: need a better way to announce number of steps
+        # maybe somehow print iteration # of # iters in the monitor?
+        print("# steps: %d, # draws: %d"%(steps, pop_size*steps))
+        population = population[None, :, :]
+        sampler = Dream(model=self.dream_model, population=population,
+                        draws=pop_size * steps,
+                        burn=pop_size * options['burn'],
+                        thinning=options['thin'],
+                        monitor=self._monitor,
+                        DE_noise=1e-6)
+
+        self.state = sampler.sample(state=self.state, abort_test=abort_test)
+        self.state.mark_outliers()
+        self.state.keep_best()
+        self.state.title = self.dream_model.problem.name
+
+        # TODO: Temporary hack to apply a post-mcmc action to the state vector
+        # The problem is that if we manipulate the state vector before saving
+        # it then we will not be able to use the --resume feature.  We can
+        # get around this by just not writing state for the derived variables,
+        # at which point we can remove this notice.
+        # TODO: Add derived/visible variable support to other optimizers
+        fn, labels = getattr(self.problem, 'derive_vars', (None, None))
+        if fn is not None:
+            self.state.derive_vars(fn, labels=labels)
+        visible_vars = getattr(self.problem, 'visible_vars', None)
+        if visible_vars is not None:
+            self.state.set_visible_vars(visible_vars)
+        integer_vars = getattr(self.problem, 'integer_vars', None)
+        if integer_vars is not None:
+            self.state.integer_vars(integer_vars)
+
+        x, fx = self.state.best()
+
+        # Check that the last point is the best point
+        #points, logp = self.state.sample()
+        #assert logp[-1] == fx
+        #print(points[-1], x)
+        #assert all(points[-1, i] == xi for i, xi in enumerate(x))
+        return x, -fx
+
+    def entropy(self, **kw):
+        return self.state.entropy(**kw)
+
+    def _monitor(self, state, pop, logp):
+        # Get an early copy of the state
+        self._update.history.uncertainty_state = state
+        step = state.generation
+        x, fx = state.best()
+        self._update(step=step, point=x, value=-fx,
+                     population_points=pop, population_values=-logp)
+        return True
+
+    def stderr(self):
+        """
+        Approximate standard error as 1/2 the 68% interval fo the sample,
+        which is a more robust measure than the mean of the sample for
+        non-normal distributions.
+        """
+        from .dream.stats import var_stats
+
+        vstats = var_stats(self.state.draw())
+        return np.array([(v.p68[1] - v.p68[0]) / 2 for v in vstats], 'd')
+
+    #def cov(self):
+    #    # Covariance estimate from final 1000 points
+    #    return np.cov(self.state.draw().points[-1000:])
+
+    def load(self, input_path):
+        from .dream.state import load_state
+        print("loading saved state (this might take awhile) ...")
+        self.state = load_state(input_path, report=100)
+
+    def save(self, output_path):
+        self.state.save(output_path)
+
+    def plot(self, output_path):
+        self.state.show(figfile=output_path)
+        self.error_plot(figfile=output_path)
+
+    def show(self):
+        pass
+
+    def error_plot(self, figfile):
+        # Produce error plot
+        import pylab
+        from . import errplot
+        # TODO: shouldn't mix calc and display!
+        res = errplot.calc_errors_from_state(self.dream_model.problem,
+                                             self.state)
+        if res is not None:
+            pylab.figure()
+            errplot.show_errors(res)
+            pylab.savefig(figfile + "-errors.png", format='png')
+
+
+class Resampler(FitBase):
+    # TODO: why isn't cli.resynth using this?
+
+    def __init__(self, fitter):
+        self.fitter = fitter
+        raise NotImplementedError
+
+    def solve(self, **options):
+        starts = options.pop('starts', 1)
+        restart = options.pop('restart', False)
+        x, fx = self.fitter.solve(**options)
+        points = _resampler(self.fitter, x, samples=starts,
+                            restart=restart, **options)
+        self.points = points  # save points for later plotting
+        return x, fx
+
+
+def _resampler(fitter, xinit, samples=100, restart=False, **options):
+    """
+    Refit the result multiple times with resynthesized data, building
+    up an array in Result.samples which contains the best fit to the
+    resynthesized data.  *samples* is the number of samples to generate.
+    *fitter* is the (local) optimizer to use. **kw are the parameters
+    for the optimizer.
+    """
+    x = xinit
+    points = []
+    try:  # TODO: some solvers already catch KeyboardInterrupt
+        for _ in range(samples):
+            # print "== resynth %d of %d" % (i, samples)
+            fitter.problem.resynth_data()
+            if restart:
+                fitter.problem.randomize()
+            else:
+                fitter.problem.setp(x)
+            x, fx = fitter.solve(**options)
+            points.append(np.hstack((fx, x)))
+            # print self.problem.summarize()
+            # print "[chisq=%g]" % (nllf*2/self.problem.dof)
+    except KeyboardInterrupt:
+        # On keyboard interrupt we can declare that we are finished sampling
+        # without it being an error condition, so let this exception pass.
+        pass
+    finally:
+        # Restore the state of the problem
+        fitter.problem.restore_data()
+        fitter.problem.setp(xinit)
+        fitter.problem.model_update()
+    return points
+
+
+class FitDriver(object):
+
+    def __init__(self, fitclass=None, problem=None, monitors=None,
+                 abort_test=None, mapper=None, **options):
+        self.fitclass = fitclass
+        self.problem = problem
+        self.options = options
+        self.monitors = monitors
+        self.abort_test = abort_test
+        self.mapper = mapper if mapper else lambda p: list(map(problem.nllf, p))
+
+    def fit(self, resume=None):
+
+        if hasattr(self, '_cov'): del self._cov
+        if hasattr(self, '_stderr'): del self._stderr
+        fitter = self.fitclass(self.problem)
+        if resume:
+            fitter.load(resume)
+        starts = self.options.get('starts', 1)
+        if starts > 1:
+            fitter = MultiStart(fitter)
+        t0 = time.clock()
+        x, fx = fitter.solve(monitors=self.monitors,
+                             abort_test=self.abort_test,
+                             mapper=self.mapper,
+                             **self.options)
+        self.fitter = fitter
+        self.time = time.clock() - t0
+        self.result = x, fx
+        if x is not None:
+            self.problem.setp(x)
+        return x, fx
+
+    def entropy(self):
+        if hasattr(self.fitter, 'entropy'):
+            return self.fitter.entropy()
+        else:
+            from .dream import entropy
+            return entropy.cov_entropy(self.cov()), 0
+
+    def cov(self):
+        """
+        Return an estimate of the covariance of the fit.
+
+        Depending on the fitter and the problem, this may be computed from
+        existing evaluations within the fitter, or from numerical
+        differentiation around the minimum.  The numerical differentiation
+        will use the Hessian estimated from nllf.   If the problem uses
+        $\chi^2/2$ as its nllf, then you may want to instead compute
+        the covariance from the Jacobian::
+
+            J = lsqerror.jacobian(fitdriver.result[0])
+            cov = lsqerror.cov(J)
+
+        This should be faster and more accurate than the Hessian of nllf
+        when you can use it.
+        """
+        if not hasattr(self, '_cov'):
+            self._cov = None
+            if hasattr(self.fitter, 'cov'):
+                self._cov = self.fitter.cov()
+        if self._cov is None:
+            if hasattr(self.problem, 'residuals'):
+                J = lsqerror.jacobian(self.problem, self.result[0])
+                self._cov = lsqerror.cov(J)
+            else:
+                H = lsqerror.hessian(self.problem, self.result[0])
+                H, L = lsqerror.perturbed_hessian(H)
+                self._cov = lsqerror.chol_cov(L)
+        return self._cov
+
+    def stderr(self):
+        """
+        Return an estimate of the standard error of the fit.
+
+        Depending on the fitter and the problem, this may be computed from
+        existing evaluations within the fitter, or from numerical
+        differentiation around the minimum.
+        """
+        if not hasattr(self, '_stderr'):
+            self._stderr = None
+            if hasattr(self.fitter, 'stderr'):
+                self._stderr = self.fitter.stderr()
+        if self._stderr is None:
+            # If no stderr from the fitter then compute it from the covariance
+            self._stderr = lsqerror.stderr(self.cov())
+        return self._stderr
+
+    def show(self):
+        if hasattr(self.fitter, 'show'):
+            self.fitter.show()
+        if hasattr(self.problem, 'show'):
+            self.problem.show()
+
+    def show_err(self):
+        """
+        Display the error approximation from the numerical derivative.
+
+        Warning: cost grows as the cube of the number of parameters.
+        """
+        # TODO: need cheaper uncertainty estimate
+        # Note: error estimated from hessian diagonal is insufficient.
+        err = lsqerror.stderr(self.cov())
+        norm = np.sqrt(self.problem.chisq())
+        print("=== Uncertainty est. from curvature: par    dx           dx/sqrt(chisq) ===")
+        for k, v, dv in zip(self.problem.labels(), self.problem.getp(), err):
+            print("%40s %-15s %-15s" %(k,
+                                       format_uncertainty(v, dv),
+                                       format_uncertainty(v, dv/norm),
+                                       ))
+        print("="*75)
+
+    def save(self, output_path):
+        # print "calling driver save"
+        if hasattr(self.fitter, 'save'):
+            self.fitter.save(output_path)
+        if hasattr(self.problem, 'save'):
+            self.problem.save(output_path)
+
+    def load(self, input_path):
+        # print "calling driver save"
+        if hasattr(self.fitter, 'load'):
+            self.fitter.load(input_path)
+        if hasattr(self.problem, 'load'):
+            self.problem.load(input_path)
+
+    def plot(self, output_path, view=None):
+        # print "calling fitter.plot"
+        if hasattr(self.problem, 'plot'):
+            self.problem.plot(figfile=output_path, view=view)
+        if hasattr(self.fitter, 'plot'):
+            self.fitter.plot(output_path=output_path)
+
+
+def _fill_defaults(options, settings):
+    """
+    Returns options dict with missing values filled from settings.
+    """
+    result = dict(settings)  # settings is a list of (key,value) pairs
+    result.update(options)
+    return result
+
+# List of (parameter,factory value) required for each algorithm
+FITTERS = [
+    SimplexFit,
+    DEFit,
+    DreamFit,
+    BFGSFit,
+    LevenbergMarquardtFit,
+    PSFit,
+    PTFit,
+    RLFit,
+    SnobFit,
+    ]
+
+FIT_AVAILABLE_IDS = [f.id for f in FITTERS]
+
+
+FIT_ACTIVE_IDS = [
+    SimplexFit.id,
+    DEFit.id,
+    DreamFit.id,
+    BFGSFit.id,
+    LevenbergMarquardtFit.id,
+    ]
+
+FIT_DEFAULT_ID = SimplexFit.id
+
+assert FIT_DEFAULT_ID in FIT_ACTIVE_IDS
+assert all(f in FIT_AVAILABLE_IDS for f in FIT_ACTIVE_IDS)
diff --git a/bumps/formatnum.py b/bumps/formatnum.py
new file mode 100644
index 0000000..c10de97
--- /dev/null
+++ b/bumps/formatnum.py
@@ -0,0 +1,450 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Format values and uncertainties nicely for printing.
+
+The formatted value uses only the number of digits warranted by
+the uncertainty in the measurement.
+
+:func:`format_value` shows the value without the uncertainty.
+
+:func:`format_uncertainty_pm` shows the expanded format v +/- err.
+
+:func:`format_uncertainty_compact` shows the compact format v(##),
+where the number in parenthesis is the uncertainty in the last two digits of v.
+
+:func:`format_uncertainty` uses the compact format by default, but this
+can be changed to use the expanded +/- format by setting
+format_uncertainty.compact to False.  This is a global setting which should
+be considered a user preference.  Any library code that depends on a specific
+format style should use the corresponding formatting function.
+
+If the uncertainty is 0 or not otherwise provided, the simple
+%g floating point format option is used.
+
+Infinite and indefinite numbers are represented as inf and NaN.
+
+Example::
+
+    >>> v,dv = 757.2356,0.01032
+    >>> print(format_uncertainty_pm(v,dv))
+    757.236 +/- 0.010
+    >>> print(format_uncertainty_compact(v,dv))
+    757.236(10)
+    >>> print(format_uncertainty(v,dv))
+    757.236(10)
+    >>> format_uncertainty.compact = False
+    >>> print(format_uncertainty(v,dv))
+    757.236 +/- 0.010
+"""
+from __future__ import division
+import math
+
+from numpy import isinf, isnan, inf, NaN
+
+__all__ = ['format_value', 'format_uncertainty',
+           'format_uncertainty_compact', 'format_uncertainty_pm',
+           ]
+
+
+# Coordinating scales across a set of numbers is not supported.  For easy
+# comparison a set of numbers should be shown in the same scale.  One could
+# force this from the outside by adding scale parameter (either 10**n, n, or
+# a string representing the desired SI prefix) and having a separate routine
+# which computes the scale given a set of values.
+
+# Coordinating scales with units offers its own problems.  Again, the user
+# may want to force particular units.  This can be done by outside of the
+# formatting routines by scaling the numbers to the appropriate units then
+# forcing them to print with scale 10**0.  If this is a common operation,
+# however, it may want to happen inside.
+
+# The value e<n> is currently formatted into the number.  Alternatively this
+# scale factor could be returned so that the user can choose the appropriate
+# SI prefix when printing the units.  This gets tricky when talking about
+# composite units such as 2.3e-3 m**2 -> 2300 mm**2, and with volumes
+# such as 1 g/cm**3 -> 1 kg/L.
+
+
+def format_value(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return a string v which is
+    the value formatted with the appropriate number of digits.
+    """
+    return _format_uncertainty(value, uncertainty, compact=None)
+
+
+def format_uncertainty_pm(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return a string v +/- dv.
+    """
+    return _format_uncertainty(value, uncertainty, compact=False)
+
+
+def format_uncertainty_compact(value, uncertainty):
+    """
+    Given *value* v and *uncertainty* dv, return the compact
+    representation v(##), where ## are the first two digits of
+    the uncertainty.
+    """
+    return _format_uncertainty(value, uncertainty, compact=True)
+
+
+def format_uncertainty(value, uncertainty):
+    """
+    Value and uncertainty formatter.
+
+    Either the expanded v +/- dv form or the compact v(##) form will be
+    used depending on whether *format_uncertainty.compact* is True or False.
+    The default is True.
+    """
+    return _format_uncertainty(value, uncertainty, format_uncertainty.compact)
+format_uncertainty.compact = True
+
+def _format_uncertainty(value, uncertainty, compact):
+    """
+    Implementation of both the compact and the +/- formats.
+    """
+    # Handle indefinite value
+    if isinf(value):
+        return "inf" if value > 0 else "-inf"
+    if isnan(value):
+        return "NaN"
+
+    # Handle indefinite uncertainty
+    if uncertainty is None or uncertainty <= 0 or isnan(uncertainty):
+        return "%g" % value
+    if isinf(uncertainty):
+        if compact is None:
+            return "%.2g" % value
+        elif compact:
+            return "%.2g(inf)" % value
+        else:
+            return "%.2g +/- inf" % value
+
+    # Handle zero and negative values
+    sign = "-" if value < 0 else ""
+    value = abs(value)
+
+    # Determine scale of value and error
+    err_place = int(math.floor(math.log10(uncertainty)))
+    if value == 0:
+        val_place = err_place - 1
+    else:
+        val_place = int(math.floor(math.log10(value)))
+
+    if err_place > val_place:
+        # Degenerate case: error bigger than value
+        # The mantissa is 0.#(##)e#, 0.0#(##)e# or 0.00#(##)e#
+        val_place = err_place + 2
+    elif err_place == val_place:
+        # Degenerate case: error and value the same order of magnitude
+        # The value is ##(##)e#, #.#(##)e# or 0.##(##)e#
+        val_place = err_place + 1
+    elif err_place <= 1 and val_place >= -3:
+        # Normal case: nice numbers and errors
+        # The value is ###.###(##)
+        val_place = 0
+    else:
+        # Extreme cases: zeros before value or after error
+        # The value is ###.###(##)e#, ##.####(##)e# or #.#####(##)e#
+        pass
+
+    # Force engineering notation, with exponent a multiple of 3
+    val_place = int(math.floor(val_place / 3.)) * 3
+
+    # Format the result
+    digits_after_decimal = abs(val_place - err_place + 1)
+    # Only use one digit of uncertainty if no precision included in result
+    #if compact is None: digits_after_decimal -= 1
+    val_str = "%.*f" % (digits_after_decimal, value / 10. ** val_place)
+    exp_str = "e%d" % val_place if val_place != 0 else ""
+    if compact is None:
+        result = "".join((sign, val_str, exp_str))
+    elif compact:
+        err_str = "(%2d)" % int(uncertainty / 10. ** (err_place - 1) + 0.5)
+        result = "".join((sign, val_str, err_str, exp_str))
+    else:
+        err_str = "%.*f" % (digits_after_decimal,
+                            uncertainty / 10. ** val_place)
+        result = "".join((sign, val_str, exp_str + " +/- ", err_str, exp_str))
+    # print sign,value, uncertainty, "=>", result
+    return result
+
+
+def test_compact():
+    # Oops... renamed function after writing tests
+    value_str = format_uncertainty_compact
+
+    # val_place > err_place
+    assert value_str(1235670, 766000) == "1.24(77)e6"
+    assert value_str(123567., 76600) == "124(77)e3"
+    assert value_str(12356.7, 7660) == "12.4(77)e3"
+    assert value_str(1235.67, 766) == "1.24(77)e3"
+    assert value_str(123.567, 76.6) == "124(77)"
+    assert value_str(12.3567, 7.66) == "12.4(77)"
+    assert value_str(1.23567, .766) == "1.24(77)"
+    assert value_str(.123567, .0766) == "0.124(77)"
+    assert value_str(.0123567, .00766) == "0.0124(77)"
+    assert value_str(.00123567, .000766) == "0.00124(77)"
+    assert value_str(.000123567, .0000766) == "124(77)e-6"
+    assert value_str(.0000123567, .00000766) == "12.4(77)e-6"
+    assert value_str(.00000123567, .000000766) == "1.24(77)e-6"
+    assert value_str(.000000123567, .0000000766) == "124(77)e-9"
+    assert value_str(.00000123567, .0000000766) == "1.236(77)e-6"
+    assert value_str(.0000123567, .0000000766) == "12.357(77)e-6"
+    assert value_str(.000123567, .0000000766) == "123.567(77)e-6"
+    assert value_str(.00123567, .000000766) == "0.00123567(77)"
+    assert value_str(.0123567, .00000766) == "0.0123567(77)"
+    assert value_str(.123567, .0000766) == "0.123567(77)"
+    assert value_str(1.23567, .000766) == "1.23567(77)"
+    assert value_str(12.3567, .00766) == "12.3567(77)"
+    assert value_str(123.567, .0764) == "123.567(76)"
+    assert value_str(1235.67, .764) == "1235.67(76)"
+    assert value_str(12356.7, 7.64) == "12356.7(76)"
+    assert value_str(123567, 76.4) == "123567(76)"
+    assert value_str(1235670, 764) == "1.23567(76)e6"
+    assert value_str(12356700, 764) == "12.35670(76)e6"
+    assert value_str(123567000, 764) == "123.56700(76)e6"
+    assert value_str(123567000, 7640) == "123.5670(76)e6"
+    assert value_str(1235670000, 76400) == "1.235670(76)e9"
+
+    # val_place == err_place
+    assert value_str(123567, 764000) == "0.12(76)e6"
+    assert value_str(12356.7, 76400) == "12(76)e3"
+    assert value_str(1235.67, 7640) == "1.2(76)e3"
+    assert value_str(123.567, 764) == "0.12(76)e3"
+    assert value_str(12.3567, 76.4) == "12(76)"
+    assert value_str(1.23567, 7.64) == "1.2(76)"
+    assert value_str(.123567, .764) == "0.12(76)"
+    assert value_str(.0123567, .0764) == "12(76)e-3"
+    assert value_str(.00123567, .00764) == "1.2(76)e-3"
+    assert value_str(.000123567, .000764) == "0.12(76)e-3"
+
+    # val_place == err_place-1
+    assert value_str(123567, 7640000) == "0.1(76)e6"
+    assert value_str(12356.7, 764000) == "0.01(76)e6"
+    assert value_str(1235.67, 76400) == "0.001(76)e6"
+    assert value_str(123.567, 7640) == "0.1(76)e3"
+    assert value_str(12.3567, 764) == "0.01(76)e3"
+    assert value_str(1.23567, 76.4) == "0.001(76)e3"
+    assert value_str(.123567, 7.64) == "0.1(76)"
+    assert value_str(.0123567, .764) == "0.01(76)"
+    assert value_str(.00123567, .0764) == "0.001(76)"
+    assert value_str(.000123567, .00764) == "0.1(76)e-3"
+
+    # val_place == err_place-2
+    assert value_str(12356700, 7640000000) == "0.0(76)e9"
+    assert value_str(1235670, 764000000) == "0.00(76)e9"
+    assert value_str(123567, 76400000) == "0.000(76)e9"
+    assert value_str(12356, 7640000) == "0.0(76)e6"
+    assert value_str(1235, 764000) == "0.00(76)e6"
+    assert value_str(123, 76400) == "0.000(76)e6"
+    assert value_str(12, 7640) == "0.0(76)e3"
+    assert value_str(1, 764) == "0.00(76)e3"
+    assert value_str(0.1, 76.4) == "0.000(76)e3"
+    assert value_str(0.01, 7.64) == "0.0(76)"
+    assert value_str(0.001, 0.764) == "0.00(76)"
+    assert value_str(0.0001, 0.0764) == "0.000(76)"
+    assert value_str(0.00001, 0.00764) == "0.0(76)e-3"
+
+    # val_place == err_place-3
+    assert value_str(12356700, 76400000000) == "0.000(76)e12"
+    assert value_str(1235670, 7640000000) == "0.0(76)e9"
+    assert value_str(123567, 764000000) == "0.00(76)e9"
+    assert value_str(12356, 76400000) == "0.000(76)e9"
+    assert value_str(1235, 7640000) == "0.0(76)e6"
+    assert value_str(123, 764000) == "0.00(76)e6"
+    assert value_str(12, 76400) == "0.000(76)e6"
+    assert value_str(1, 7640) == "0.0(76)e3"
+    assert value_str(0.1, 764) == "0.00(76)e3"
+    assert value_str(0.01, 76.4) == "0.000(76)e3"
+    assert value_str(0.001, 7.64) == "0.0(76)"
+    assert value_str(0.0001, 0.764) == "0.00(76)"
+    assert value_str(0.00001, 0.0764) == "0.000(76)"
+    assert value_str(0.000001, 0.00764) == "0.0(76)e-3"
+
+    # Zero values
+    assert value_str(0, 7640000) == "0.0(76)e6"
+    assert value_str(0, 764000) == "0.00(76)e6"
+    assert value_str(0,  76400) == "0.000(76)e6"
+    assert value_str(0,   7640) == "0.0(76)e3"
+    assert value_str(0,    764) == "0.00(76)e3"
+    assert value_str(0,     76.4) == "0.000(76)e3"
+    assert value_str(0,      7.64) == "0.0(76)"
+    assert value_str(0,      0.764) == "0.00(76)"
+    assert value_str(0,      0.0764) == "0.000(76)"
+    assert value_str(0,      0.00764) == "0.0(76)e-3"
+    assert value_str(0,      0.000764) == "0.00(76)e-3"
+    assert value_str(0,      0.0000764) == "0.000(76)e-3"
+
+    # negative values
+    assert value_str(-1235670, 765000) == "-1.24(77)e6"
+    assert value_str(-1.23567, .766) == "-1.24(77)"
+    assert value_str(-.00000123567, .0000000766) == "-1.236(77)e-6"
+    assert value_str(-12356.7, 7.64) == "-12356.7(76)"
+    assert value_str(-123.567, 764) == "-0.12(76)e3"
+    assert value_str(-1235.67, 76400) == "-0.001(76)e6"
+    assert value_str(-.000123567, .00764) == "-0.1(76)e-3"
+    assert value_str(-12356, 7640000) == "-0.0(76)e6"
+    assert value_str(-12, 76400) == "-0.000(76)e6"
+    assert value_str(-0.0001, 0.764) == "-0.00(76)"
+
+    # non-finite values
+    assert value_str(-inf, None) == "-inf"
+    assert value_str(inf, None) == "inf"
+    assert value_str(NaN, None) == "NaN"
+
+    # bad or missing uncertainty
+    assert value_str(-1.23567, NaN) == "-1.23567"
+    assert value_str(-1.23567, -inf) == "-1.23567"
+    assert value_str(-1.23567, -0.1) == "-1.23567"
+    assert value_str(-1.23567, 0) == "-1.23567"
+    assert value_str(-1.23567, None) == "-1.23567"
+    assert value_str(-1.23567, inf) == "-1.2(inf)"
+
+
+def test_pm():
+    # Oops... renamed function after writing tests
+    value_str = format_uncertainty_pm
+
+    # val_place > err_place
+    assert value_str(1235670, 766000) == "1.24e6 +/- 0.77e6"
+    assert value_str(123567., 76600) == "124e3 +/- 77e3"
+    assert value_str(12356.7,  7660) == "12.4e3 +/- 7.7e3"
+    assert value_str(1235.67,   766) == "1.24e3 +/- 0.77e3"
+    assert value_str(123.567,    76.6) == "124 +/- 77"
+    assert value_str(12.3567,     7.66) == "12.4 +/- 7.7"
+    assert value_str(1.23567,      .766) == "1.24 +/- 0.77"
+    assert value_str(.123567,      .0766) == "0.124 +/- 0.077"
+    assert value_str(.0123567,     .00766) == "0.0124 +/- 0.0077"
+    assert value_str(.00123567,    .000766) == "0.00124 +/- 0.00077"
+    assert value_str(.000123567,   .0000766) == "124e-6 +/- 77e-6"
+    assert value_str(.0000123567,  .00000766) == "12.4e-6 +/- 7.7e-6"
+    assert value_str(.00000123567, .000000766) == "1.24e-6 +/- 0.77e-6"
+    assert value_str(.000000123567, .0000000766) == "124e-9 +/- 77e-9"
+    assert value_str(.00000123567, .0000000766) == "1.236e-6 +/- 0.077e-6"
+    assert value_str(.0000123567,  .0000000766) == "12.357e-6 +/- 0.077e-6"
+    assert value_str(.000123567,   .0000000766) == "123.567e-6 +/- 0.077e-6"
+    assert value_str(.00123567,    .000000766) == "0.00123567 +/- 0.00000077"
+    assert value_str(.0123567,     .00000766) == "0.0123567 +/- 0.0000077"
+    assert value_str(.123567,      .0000766) == "0.123567 +/- 0.000077"
+    assert value_str(1.23567,      .000766) == "1.23567 +/- 0.00077"
+    assert value_str(12.3567,      .00766) == "12.3567 +/- 0.0077"
+    assert value_str(123.567,      .0764) == "123.567 +/- 0.076"
+    assert value_str(1235.67,      .764) == "1235.67 +/- 0.76"
+    assert value_str(12356.7,     7.64) == "12356.7 +/- 7.6"
+    assert value_str(123567,     76.4) == "123567 +/- 76"
+    assert value_str(1235670,   764) == "1.23567e6 +/- 0.00076e6"
+    assert value_str(12356700,  764) == "12.35670e6 +/- 0.00076e6"
+    assert value_str(123567000, 764) == "123.56700e6 +/- 0.00076e6"
+    assert value_str(123567000, 7640) == "123.5670e6 +/- 0.0076e6"
+    assert value_str(1235670000, 76400) == "1.235670e9 +/- 0.000076e9"
+
+    # val_place == err_place
+    assert value_str(123567, 764000) == "0.12e6 +/- 0.76e6"
+    assert value_str(12356.7, 76400) == "12e3 +/- 76e3"
+    assert value_str(1235.67, 7640) == "1.2e3 +/- 7.6e3"
+    assert value_str(123.567, 764) == "0.12e3 +/- 0.76e3"
+    assert value_str(12.3567, 76.4) == "12 +/- 76"
+    assert value_str(1.23567, 7.64) == "1.2 +/- 7.6"
+    assert value_str(.123567, .764) == "0.12 +/- 0.76"
+    assert value_str(.0123567, .0764) == "12e-3 +/- 76e-3"
+    assert value_str(.00123567, .00764) == "1.2e-3 +/- 7.6e-3"
+    assert value_str(.000123567, .000764) == "0.12e-3 +/- 0.76e-3"
+
+    # val_place == err_place-1
+    assert value_str(123567, 7640000) == "0.1e6 +/- 7.6e6"
+    assert value_str(12356.7, 764000) == "0.01e6 +/- 0.76e6"
+    assert value_str(1235.67, 76400) == "0.001e6 +/- 0.076e6"
+    assert value_str(123.567, 7640) == "0.1e3 +/- 7.6e3"
+    assert value_str(12.3567, 764) == "0.01e3 +/- 0.76e3"
+    assert value_str(1.23567, 76.4) == "0.001e3 +/- 0.076e3"
+    assert value_str(.123567, 7.64) == "0.1 +/- 7.6"
+    assert value_str(.0123567, .764) == "0.01 +/- 0.76"
+    assert value_str(.00123567, .0764) == "0.001 +/- 0.076"
+    assert value_str(.000123567, .00764) == "0.1e-3 +/- 7.6e-3"
+
+    # val_place == err_place-2
+    assert value_str(12356700, 7640000000) == "0.0e9 +/- 7.6e9"
+    assert value_str(1235670, 764000000) == "0.00e9 +/- 0.76e9"
+    assert value_str(123567, 76400000) == "0.000e9 +/- 0.076e9"
+    assert value_str(12356, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(1235, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(123, 76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(12, 7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(1, 764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0.1, 76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0.01, 7.64) == "0.0 +/- 7.6"
+    assert value_str(0.001, 0.764) == "0.00 +/- 0.76"
+    assert value_str(0.0001, 0.0764) == "0.000 +/- 0.076"
+    assert value_str(0.00001, 0.00764) == "0.0e-3 +/- 7.6e-3"
+
+    # val_place == err_place-3
+    assert value_str(12356700, 76400000000) == "0.000e12 +/- 0.076e12"
+    assert value_str(1235670, 7640000000) == "0.0e9 +/- 7.6e9"
+    assert value_str(123567, 764000000) == "0.00e9 +/- 0.76e9"
+    assert value_str(12356, 76400000) == "0.000e9 +/- 0.076e9"
+    assert value_str(1235, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(123, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(12, 76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(1, 7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(0.1, 764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0.01, 76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0.001, 7.64) == "0.0 +/- 7.6"
+    assert value_str(0.0001, 0.764) == "0.00 +/- 0.76"
+    assert value_str(0.00001, 0.0764) == "0.000 +/- 0.076"
+    assert value_str(0.000001, 0.00764) == "0.0e-3 +/- 7.6e-3"
+
+    # Zero values
+    assert value_str(0, 7640000) == "0.0e6 +/- 7.6e6"
+    assert value_str(0, 764000) == "0.00e6 +/- 0.76e6"
+    assert value_str(0,  76400) == "0.000e6 +/- 0.076e6"
+    assert value_str(0,   7640) == "0.0e3 +/- 7.6e3"
+    assert value_str(0,    764) == "0.00e3 +/- 0.76e3"
+    assert value_str(0,     76.4) == "0.000e3 +/- 0.076e3"
+    assert value_str(0,      7.64) == "0.0 +/- 7.6"
+    assert value_str(0,      0.764) == "0.00 +/- 0.76"
+    assert value_str(0,      0.0764) == "0.000 +/- 0.076"
+    assert value_str(0,      0.00764) == "0.0e-3 +/- 7.6e-3"
+    assert value_str(0,      0.000764) == "0.00e-3 +/- 0.76e-3"
+    assert value_str(0,      0.0000764) == "0.000e-3 +/- 0.076e-3"
+
+    # negative values
+    assert value_str(-1235670, 766000) == "-1.24e6 +/- 0.77e6"
+    assert value_str(-1.23567, .766) == "-1.24 +/- 0.77"
+    assert value_str(-.00000123567, .0000000766) == "-1.236e-6 +/- 0.077e-6"
+    assert value_str(-12356.7, 7.64) == "-12356.7 +/- 7.6"
+    assert value_str(-123.567, 764) == "-0.12e3 +/- 0.76e3"
+    assert value_str(-1235.67, 76400) == "-0.001e6 +/- 0.076e6"
+    assert value_str(-.000123567, .00764) == "-0.1e-3 +/- 7.6e-3"
+    assert value_str(-12356, 7640000) == "-0.0e6 +/- 7.6e6"
+    assert value_str(-12, 76400) == "-0.000e6 +/- 0.076e6"
+    assert value_str(-0.0001, 0.764) == "-0.00 +/- 0.76"
+
+    # non-finite values
+    assert value_str(-inf, None) == "-inf"
+    assert value_str(inf, None) == "inf"
+    assert value_str(NaN, None) == "NaN"
+
+    # bad or missing uncertainty
+    assert value_str(-1.23567, NaN) == "-1.23567"
+    assert value_str(-1.23567, -inf) == "-1.23567"
+    assert value_str(-1.23567, -0.1) == "-1.23567"
+    assert value_str(-1.23567, 0) == "-1.23567"
+    assert value_str(-1.23567, None) == "-1.23567"
+    assert value_str(-1.23567, inf) == "-1.2 +/- inf"
+
+
+def test():
+    # Check compact and plus/minus formats
+    test_compact()
+    test_pm()
+    # Check that the default is the compact format
+    assert format_uncertainty(-1.23567, 0.766) == "-1.24(77)"
+
+    import doctest
+    doctest.testmod()
+
+if __name__ == "__main__":
+    test()
diff --git a/bumps/gui/__init__.py b/bumps/gui/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/bumps/gui/about.py b/bumps/gui/about.py
new file mode 100644
index 0000000..450a1d4
--- /dev/null
+++ b/bumps/gui/about.py
@@ -0,0 +1,195 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Author: James Krycka
+
+"""
+This module contains a custom About Dialog class and associated text strings
+used for informational display purposes.  Note that the product version is
+maintained in the version.py file and therefore is imported here.
+"""
+
+import os
+import wx
+
+try:
+    from agw.hyperlink import HyperLinkCtrl
+except ImportError: # if it's not there, try the older location.
+    from wx.lib.agw.hyperlink import HyperLinkCtrl
+
+from wx.lib.wordwrap import wordwrap
+
+from .. import __version__ as APP_VERSION
+from .utilities import resource
+
+# Resource files.
+PROG_ICON = "bumps.ico"
+
+# Text strings used in About Dialog boxes and for other project identification
+# purposes.
+#
+# Note that paragraphs intended to be processed by wordwrap are formatted as
+# one string without newline characters.
+APP_NAME = "Bumps"
+
+APP_TITLE = "Bumps: curve fitter with uncertainty estimation"
+
+APP_COPYRIGHT = "(C) 2011 University of Maryland"
+
+APP_DESCRIPTION = """\
+The Bumps uncertainty modeler provides an interactive user interface for \
+modelling an experiment and fitting it to data.\
+varying free form layers.
+"""
+
+APP_LICENSE = """\
+Permission is hereby granted, free of charge, to any person obtaining a copy \
+of this software and associated documentation files (the "Software"), to deal \
+in the Software without restriction, including without limitation the rights \
+to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell \
+copies of the Software, and to permit persons to whom the Software is \
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in \
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN \
+THE SOFTWARE.
+"""
+
+APP_CREDITS = """\
+This program was developed jointly by the University of Maryland (UMD) and \
+the National Institute of Standards and Technology (NIST) as part of the \
+Distributed Data Analysis of Neutron Scattering Experiments (DANSE) project \
+funded by the US National Science Foundation under grant DMR-0520547. \
+Principal contributors:
+
+Paul Kienzle, NIST
+    - Application concept and design
+    - Application API and Command Line Interface development
+    - Optimizers and uncertainty analysis
+    - Documentation
+
+Ismet Sahin, UMD
+    - Optimizers
+
+James Krycka, UMD
+    - Graphical User Interface design and development
+    - Window's installer
+
+Nikunj Patel, UMD
+    - Graphical User Interface design and development
+
+Christopher Metting, UMD
+    - Artwork
+"""
+
+APP_PROJECT_URL = "http://github.com/bumps"
+APP_PROJECT_TAG = "Bumps home page"
+
+APP_TUTORIAL_URL = "http://pypi.org/docs/bumps"
+APP_TUTORIAL_TAG = "Bumps documentation"
+APP_TUTORIAL = """\
+For the Bumps User's Guide and Reference Manual, please visit:\
+"""
+
+#==============================================================================
+
+class AboutDialog(wx.Dialog):
+    """
+    This class creates a pop-up About Dialog box with several display options.
+    """
+
+    def __init__(self,
+                 parent=None,
+                 id = wx.ID_ANY,
+                 title="About",
+                 pos=wx.DefaultPosition,
+                 size=wx.DefaultSize,
+                 style=wx.DEFAULT_DIALOG_STYLE,
+                 show_name=True,
+                 show_notice=True,
+                 show_link=True,
+                 show_link_docs=False,
+                 info="..."
+                ):
+        wx.Dialog.__init__(self, parent, id, title, pos, size, style)
+
+        # Display the application's icon in the title bar.
+        icon = wx.Icon(resource(PROG_ICON), wx.BITMAP_TYPE_ICO)
+        self.SetIcon(icon)
+
+        # Set the font for this window and all child windows (widgets) from the
+        # parent window, or from the system defaults if no parent is given.
+        # A dialog box does not inherit font info from its parent, so we will
+        # explicitly get it from the parent and apply it to the dialog box.
+        if parent is not None:
+            font = parent.GetFont()
+            self.SetFont(font)
+
+        # Display program name and version.
+        if show_name:
+            prog = wx.StaticText(self, wx.ID_ANY,
+                                 label=(APP_NAME + " " + APP_VERSION))
+            font = prog.GetFont()
+            font.SetPointSize(font.GetPointSize() + 1)
+            font.SetWeight(wx.BOLD)
+            prog.SetFont(font)
+
+        # Display copyright notice.
+        if show_notice:
+            copyright = wx.StaticText(self, wx.ID_ANY, label=APP_COPYRIGHT)
+
+        # Display hyperlink to the home page and/or doc page.
+        if show_link:
+            hyper1 = HyperLinkCtrl(self, wx.ID_ANY, label=APP_PROJECT_TAG,
+                                                    URL=APP_PROJECT_URL)
+        if show_link_docs:
+            hyper2 = HyperLinkCtrl(self, wx.ID_ANY, label=APP_TUTORIAL_TAG,
+                                                    URL=APP_TUTORIAL_URL)
+
+        # Display the body of text for this about dialog box.
+        info = wx.StaticText(self, wx.ID_ANY,
+                             label=wordwrap(info, 530, wx.ClientDC(self)))
+        # Create the OK button control.
+        ok_button = wx.Button(self, wx.ID_OK, "OK")
+        ok_button.SetDefault()
+
+        # Use a vertical box sizer to manage the widget layout..
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        if show_name:
+            sizer.Add(prog, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
+        if show_notice:
+            sizer.Add(copyright, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
+        sizer.Add(info, 0, wx.ALL, border=10)
+        if show_link:
+            sizer.Add(hyper1, 0, wx.ALL, border=10)
+        if show_link_docs:
+            sizer.Add(hyper2, 0, wx.ALL, border=10)
+        sizer.Add(ok_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
+
+        # Finalize the sizer and establish the dimensions of the dialog box.
+        self.SetSizer(sizer)
+        sizer.Fit(self)
diff --git a/bumps/gui/app_frame.py b/bumps/gui/app_frame.py
new file mode 100644
index 0000000..77bee7d
--- /dev/null
+++ b/bumps/gui/app_frame.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: James Krycka
+
+"""
+This module implements the AppFrame class which creates the main frame of the
+GUI for the Bumps application including a basic menu, tool bar, and status bar.
+"""
+
+#==============================================================================
+
+import sys
+
+import wx
+
+from .about import (AboutDialog, APP_TITLE, APP_DESCRIPTION, APP_LICENSE,
+                    APP_CREDITS, APP_TUTORIAL)
+from .app_panel import AppPanel
+from .console import NumpyConsole
+from .utilities import resource, choose_fontsize, display_fontsize
+
+# Resource files.
+PROG_ICON = "bumps.ico"
+
+#==============================================================================
+class ModelConsole(NumpyConsole):
+    def OnChanged(self, added=[], changed=[], removed=[]):
+        pass
+    def OnClose(self, event):
+        self.Show(False)
+
+class AppFrame(wx.Frame):
+    """
+    This class creates the top-level frame for the application and populates it
+    with application specific panels and widgets.
+    """
+
+    def __init__(self, parent=None, id=wx.ID_ANY, title=APP_TITLE,
+                 pos=wx.DefaultPosition, size=wx.DefaultSize, name="AppFrame"):
+        wx.Frame.__init__(self, parent, id, title, pos, size, name=name)
+
+        # Display the application's icon in the title bar.
+        icon = wx.Icon(resource(PROG_ICON), wx.BITMAP_TYPE_ICO)
+        self.SetIcon(icon)
+
+        # Set the default font family and font size for the application.
+        self.set_default_font()
+
+        # Initialize the menu bar with common items.
+        self.add_menubar()
+
+        # Initialize the tool bar.
+        self.add_toolbar()
+
+        # Initialize the status bar.
+        self.add_statusbar()
+
+        # Build the application panels for the GUI on the frame.
+        self.panel = AppPanel(self)
+        self.panel.console = ModelConsole(self)
+        self.panel.console['app'] = self
+
+        # Note: Do not call self.Fit() as this will reduce the frame to its
+        # bare minimum size; we want it to keep its default size.
+
+    def set_default_font(self):
+        """
+        Sets the default font family and font size for the frame which will be
+        inherited by all child windows subsequently created.
+        """
+
+        # Save the system default font information before we make any changes.
+        default_fontname = self.GetFont().GetFaceName()
+        default_fontsize = self.GetFont().GetPointSize()
+
+        # If requested, override the font name to use.  Note that:
+        # - the MS Windows default font appears to be the same as Tahoma
+        # - Arial tends to be narrower and taller than Tahoma.
+        # - Verdana tends to be wider and shorter than Tahoma.
+        fontname = default_fontname
+        if len(sys.argv) > 1:
+            if '--tahoma' in sys.argv[1:]: fontname = "Tahoma"
+            if '--arial' in sys.argv[1:]: fontname = "Arial"
+            if '--verdana' in sys.argv[1:]: fontname = "Verdana"
+
+        fontsize = choose_fontsize(fontname=fontname)
+
+        # If requested, override the font point size to use.
+        if len(sys.argv) > 1:
+            if '--12pt' in sys.argv[1:]: fontsize = 12
+            if '--11pt' in sys.argv[1:]: fontsize = 11
+            if '--10pt' in sys.argv[1:]: fontsize = 10
+            if '--9pt' in sys.argv[1:]: fontsize = 9
+            if '--8pt' in sys.argv[1:]: fontsize = 8
+            if '--7pt' in sys.argv[1:]: fontsize = 7
+            if '--6pt' in sys.argv[1:]: fontsize = 6
+
+        # Set the default font for this and all child windows.  The font of the
+        # frame's title bar is not affected (which is a good thing).  However,
+        # setting the default font does not affect the font used in the frame's
+        # menu bar or menu items (which is not such a good thing because the
+        # menu text size be different than the size used by the application's
+        # other widgets).  The menu font cannot be changed by wxPython.
+        self.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
+                             fontname))
+
+        # If requested, display font and miscellaneous platform information.
+        if len(sys.argv) > 1 and '--platform' in sys.argv[1:]:
+            print("*** Platform =", wx.PlatformInfo)
+            print("*** Default font is %s  Chosen font is %s"\
+                  %(default_fontname, self.GetFont().GetFaceName()))
+            print("*** Default point size = %d  Chosen point size = %d"\
+                  %(default_fontsize, self.GetFont().GetPointSize()))
+            display_fontsize(fontname=fontname)
+
+
+    def add_menubar(self):
+        """Creates a default menu bar, menus, and menu options."""
+
+        # Create the menu bar.
+        mb = wx.MenuBar()
+        wx.MenuBar.SetAutoWindowMenu(False)
+
+        # Add a 'File' menu to the menu bar and define its options.
+        file_menu = wx.Menu()
+
+        _item = file_menu.Append(wx.ID_ANY, "&Exit", "Terminate application")
+        self.Bind(wx.EVT_MENU, self.OnExit, _item)
+
+        mb.Append(file_menu, "&File")
+
+        # Add a 'Help' menu to the menu bar and define its options.
+        help_menu = wx.Menu()
+
+        _item = help_menu.Append(wx.ID_ANY, "&About",
+                                            "Get description of application")
+        self.Bind(wx.EVT_MENU, self.OnAbout, _item)
+        _item = help_menu.Append(wx.ID_ANY, "&Documentation",
+                                            "Get User's Guide and Reference Manual")
+        self.Bind(wx.EVT_MENU, self.OnTutorial, _item)
+        _item = help_menu.Append(wx.ID_ANY, "License",
+                                            "Read license and copyright notice")
+        self.Bind(wx.EVT_MENU, self.OnLicense, _item)
+        _item = help_menu.Append(wx.ID_ANY, "Credits",
+                                            "Get list of authors and sponsors")
+        self.Bind(wx.EVT_MENU, self.OnCredits, _item)
+
+        help_menu.AppendSeparator()
+        _item = help_menu.Append(wx.ID_ANY, "&Console",
+                                            "Interactive Python shell")
+        self.Bind(wx.EVT_MENU, self.OnConsole, _item)
+
+        mb.Append(help_menu, "&Help")
+
+        # Attach the menu bar to the frame.
+        self.SetMenuBar(mb)
+
+
+    def add_toolbar(self):
+        """Creates a default tool bar."""
+
+        #tb = self.CreateToolBar()
+        tb = wx.ToolBar(parent=self, style=wx.TB_HORIZONTAL|wx.NO_BORDER)
+        tb.Realize()
+        self.SetToolBar(tb)
+
+
+    def add_statusbar(self):
+        """Creates a default status bar."""
+
+        sb = self.statusbar = self.CreateStatusBar()
+        sb.SetFieldsCount(1)
+
+
+    def OnAbout(self, evt):
+        """Shows the About dialog box."""
+
+        dlg = AboutDialog(parent=self, title="About", info=APP_DESCRIPTION,
+                          show_name=True, show_notice=True, show_link=True,
+                          show_link_docs=True)
+        dlg.ShowModal()
+        dlg.Destroy()
+
+
+    def OnCredits(self, evt):
+        """Shows the Credits dialog box."""
+
+        dlg = AboutDialog(parent=self, title="Credits", info=APP_CREDITS,
+                          show_name=True, show_notice=True, show_link=False,
+                          show_link_docs=False)
+        dlg.ShowModal()
+        dlg.Destroy()
+
+
+    def OnExit(self, event):
+        """Terminates the program."""
+        self.Close()
+
+
+    def OnLicense(self, evt):
+        """Shows the License dialog box."""
+
+        dlg = AboutDialog(parent=self, title="License", info=APP_LICENSE,
+                          show_name=True, show_notice=True, show_link=False,
+                          show_link_docs=False)
+        dlg.ShowModal()
+        dlg.Destroy()
+
+
+    def OnTutorial(self, event):
+        """Shows the Tutorial dialog box."""
+
+        dlg = AboutDialog(parent=self, title="Tutorial", info=APP_TUTORIAL,
+                          show_name=False, show_notice=False, show_link=False,
+                          show_link_docs=True)
+        dlg.ShowModal()
+        dlg.Destroy()
+
+    def OnConsole(self, event):
+        """Raise python console."""
+        self.panel.console.Show(True)
diff --git a/bumps/gui/app_panel.py b/bumps/gui/app_panel.py
new file mode 100644
index 0000000..542a402
--- /dev/null
+++ b/bumps/gui/app_panel.py
@@ -0,0 +1,602 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: James Krycka, Nikunj Patel
+
+"""
+This module implements the AppPanel class which creates the main panel on top
+of the frame of the GUI for the Bumps application.
+"""
+
+# ==============================================================================
+
+from __future__ import division
+import os
+import threading
+import cPickle as pickle
+
+import wx
+import wx.aui
+
+from .. import plugin
+from ..cli import load_model
+from ..util import redirect_console
+from ..dream import stats as dream_stats
+
+from .plot_view import PlotView
+from .summary_view import SummaryView
+from .parameter_view import ParameterView
+from .log_view import LogView
+from .convergence_view import ConvergenceView
+from .uncertainty_view import CorrelationView, UncertaintyView, TraceView, ModelErrorView
+from .fit_dialog import show_fit_config
+from .fit_thread import (FitThread, EVT_FIT_PROGRESS, EVT_FIT_COMPLETE)
+from .util import nice
+from . import signal
+from .utilities import get_bitmap
+
+# File selection strings.
+MODEL_EXT = ".pickle"
+MODEL_FILES = "Model files (*%s)|*%s"%(MODEL_EXT,MODEL_EXT)
+PYTHON_FILES = "Script files (*.py)|*.py"
+DATA_FILES = "Data files (*.dat)|*.dat"
+TEXT_FILES = "Text files (*.txt)|*.txt"
+ALL_FILES = "All files (*.*)|*"
+
+# Custom colors.
+WINDOW_BKGD_COLOUR = "#ECE9D8"
+
+#==============================================================================
+
+class AppPanel(wx.Panel):
+    """
+    This class builds the GUI for the application on a panel and attaches it
+    to the frame.
+    """
+
+    def __init__(self, *args, **kw):
+        # Create a panel on the frame.  This will be the only child panel of
+        # the frame and it inherits its size from the frame which is useful
+        # during resize operations (as it provides a minimal size to sizers).
+
+        wx.Panel.__init__(self, *args, **kw)
+
+        self.SetBackgroundColour("WHITE")
+
+        # Modify the tool bar.
+        frame = self.GetTopLevelParent()
+        self.init_toolbar(frame)
+        self.init_menubar(frame)
+
+        # Reconfigure the status bar.
+        self.init_statusbar(frame, [-34, -50, -16, -16])
+
+        # Create the model views
+        self.init_views()
+
+        # Add data menu
+        mb = frame.GetMenuBar()
+        data_view = self.view['data']
+        if hasattr(data_view, 'menu'):
+            mb.Append(data_view.menu(), data_view.title)
+
+        # Create a PubSub receiver.
+        signal.connect(self.OnLogMessage, "log")
+        signal.connect(self.OnModelNew, "model.new")
+        signal.connect(self.OnModelChange, "model.update_structure")
+        signal.connect(self.OnModelSetpar, "model.update_parameters")
+
+        EVT_FIT_PROGRESS(self, self.OnFitProgress)
+        EVT_FIT_COMPLETE(self, self.OnFitComplete)
+        self.fit_thread = None
+        self.fit_config = None
+
+    def init_menubar(self, frame):
+        """
+        Adds items to the menu bar, menus, and menu options.
+        The menu bar should already have a simple File menu and a Help menu.
+        """
+        mb = frame.GetMenuBar()
+
+        file_menu_id = mb.FindMenu("File")
+        file_menu = mb.GetMenu(file_menu_id)
+        #help_menu = mb.GetMenu(mb.FindMenu("Help"))
+
+        # Add items to the "File" menu (prepending them in reverse order).
+        # Grey out items that are not currently implemented.
+        file_menu.PrependSeparator()
+
+        _item = file_menu.Prepend(wx.ID_ANY,
+                                  "E&xport Results ...",
+                                  "Save theory, data and parameters")
+        frame.Bind(wx.EVT_MENU, self.OnFileExportResults, _item)
+
+        _item = file_menu.Prepend(wx.ID_ANY,
+                                  "&Reload",
+                                  "Reload the existing model")
+        frame.Bind(wx.EVT_MENU, self.OnFileReload, _item)
+
+        _item = file_menu.Prepend(wx.ID_SAVEAS,
+                                  "Save &As",
+                                  "Save model as another name")
+        frame.Bind(wx.EVT_MENU, self.OnFileSaveAs, _item)
+        #file_menu.Enable(id=wx.ID_SAVEAS, enable=False)
+        _item = file_menu.Prepend(wx.ID_SAVE,
+                                  "&Save",
+                                  "Save model")
+        frame.Bind(wx.EVT_MENU, self.OnFileSave, _item)
+        #file_menu.Enable(id=wx.ID_SAVE, enable=False)
+        _item = file_menu.Prepend(wx.ID_OPEN,
+                                  "&Open",
+                                  "Open existing model")
+        frame.Bind(wx.EVT_MENU, self.OnFileOpen, _item)
+        #file_menu.Enable(id=wx.ID_OPEN, enable=False)
+        _item = file_menu.Prepend(wx.ID_NEW,
+                                  "&New",
+                                  "Create new model")
+        frame.Bind(wx.EVT_MENU, self.OnFileNew, _item)
+        #file_menu.Enable(id=wx.ID_NEW, enable=False)
+
+        # Add 'Fitting' menu to the menu bar and define its options.
+        # Items are initially greyed out, but will be enabled after a script
+        # is loaded.
+        fit_menu = self.fit_menu = wx.Menu()
+
+        _item = fit_menu.Append(wx.ID_ANY,
+                                "Start",
+                                "Start fitting operation")
+        frame.Bind(wx.EVT_MENU, self.OnFitStart, _item)
+        fit_menu.Enable(id=_item.GetId(), enable=False)
+        self.fit_menu_start = _item
+
+        _item = fit_menu.Append(wx.ID_ANY,
+                                "Stop",
+                                "Stop fitting operation")
+        frame.Bind(wx.EVT_MENU, self.OnFitStop, _item)
+        fit_menu.Enable(id=_item.GetId(), enable=False)
+        self.fit_menu_stop = _item
+
+        _item = fit_menu.Append(wx.ID_ANY,
+                                "&Options ...",
+                                "Edit fitting options")
+        frame.Bind(wx.EVT_MENU, self.OnFitOptions, _item)
+        self.fit_menu_options = _item
+
+        #_item = fit_menu.Append(wx.ID_ANY,
+        #                        "&Save ...",
+        #                        "Save fit results")
+        #frame.Bind(wx.EVT_MENU, self.OnFitSave, _item)
+        #fit_menu.Enable(id=_item.GetId(), enable=False)
+        self.fit_menu_options = _item
+
+        mb.Append(fit_menu, "&Fitting")
+
+
+    def init_toolbar(self, frame):
+        """Populates the tool bar."""
+        tb = self.tb = frame.GetToolBar()
+
+        script_bmp = get_bitmap("import_script.png", wx.BITMAP_TYPE_PNG)
+        reload_bmp = get_bitmap("reload.png", wx.BITMAP_TYPE_PNG)
+        start_bmp = get_bitmap("start_fit.png", wx.BITMAP_TYPE_PNG)
+        stop_bmp = get_bitmap("stop_fit.png", wx.BITMAP_TYPE_PNG)
+
+        _tool = tb.AddSimpleTool(wx.ID_ANY, script_bmp,
+                                 "Open model",
+                                 "Load model from script")
+        frame.Bind(wx.EVT_TOOL, self.OnFileOpen, _tool)
+        _tool = tb.AddSimpleTool(wx.ID_ANY, reload_bmp,
+                                 "Reload model",
+                                 "Reload model from script")
+        frame.Bind(wx.EVT_TOOL, self.OnFileReload, _tool)
+        # TODO: add reload
+
+        tb.AddSeparator()
+
+        _tool = tb.AddSimpleTool(wx.ID_ANY, start_bmp,
+                                 "Start Fit",
+                                 "Start fitting operation")
+        frame.Bind(wx.EVT_TOOL, self.OnFitStart, _tool)
+        tb.EnableTool(_tool.GetId(), False)
+        self.tb_start = _tool
+
+        _tool = tb.AddSimpleTool(wx.ID_ANY, stop_bmp,
+                                 "Stop Fit",
+                                 "Stop fitting operation")
+        frame.Bind(wx.EVT_TOOL, self.OnFitStop, _tool)
+        tb.EnableTool(_tool.GetId(), False)
+        self.tb_stop = _tool
+
+        tb.Realize()
+        frame.SetToolBar(tb)
+
+    def init_statusbar(self, frame, subbars):
+        """Divides the status bar into multiple segments."""
+
+        self.sb = frame.GetStatusBar()
+        self.sb.SetFieldsCount(len(subbars))
+        self.sb.SetStatusWidths(subbars)
+
+    def init_views(self):
+        # initial view
+        self.aui = wx.aui.AuiNotebook(self)
+        self.aui.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnViewTabClose)
+        self.view_constructor = {
+            'data': plugin.data_view(),
+            'model': plugin.model_view(),
+            'parameter': ParameterView,
+            'summary': SummaryView,
+            'log': LogView,
+            'convergence': ConvergenceView,
+            'uncertainty': UncertaintyView,
+            'correlation': CorrelationView,
+            'trace': TraceView,
+            'error': ModelErrorView,
+            }
+        self.view_list = ['data','model','parameter',
+                          'summary','log','convergence',
+                          'uncertainty','correlation','trace','error']
+        self.view = {}
+        for v in self.view_list:
+            if self.view_constructor[v]:
+                self.view[v] = self.view_constructor[v](self.aui,
+                                                        size=(600,600))
+                self.aui.AddPage(self.view[v],self.view_constructor[v].title)
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        sizer.Add(self.aui, 1, wx.EXPAND)
+        self.SetSizer(sizer)
+
+    def show_view(self, tag):
+        if self.view[tag].Parent == self.aui:
+            self.aui.SetSelection(self.aui.GetPageIndex(self.view[tag]))
+        else:
+            self.view[tag].Raise()
+            self.view[tag].SetFocus()
+
+    def OnViewTabClose(self, evt):
+        win = self.aui.GetPage(evt.GetSelection())
+        #print "Closing tab",win.GetId()
+        for k, w in self.view.items():
+            if w == win:
+                tag = k
+                break
+        else:
+            raise RuntimeError("Lost track of view")
+        #print "creating external frame"
+        state = self.view[tag].get_state()
+        constructor = self.view_constructor[tag]
+        frame = wx.Frame(self, title=constructor.title,
+                         size=constructor.default_size)
+        panel = constructor(frame)
+        self.view[tag] = panel
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        sizer.Add(panel, 1, wx.EXPAND)
+        frame.SetSizer(sizer)
+        frame.Bind(wx.EVT_CLOSE, self.OnViewFrameClose)
+        frame.Show()
+        panel.set_state(state)
+        evt.Skip()
+
+
+    def OnViewFrameClose(self, evt):
+        win = evt.GetEventObject()
+        #print "Closing frame",win.GetId()
+        for k, w in self.view.items():
+            if w.GetParent() == win:
+                tag = k
+                break
+        else:
+            raise RuntimeError("Lost track of view!")
+        state = self.view[tag].get_state()
+        constructor = self.view_constructor[tag]
+        panel = constructor(self.aui)
+        self.view[tag] = panel
+        self.aui.AddPage(panel, constructor.title)
+        panel.set_state(state)
+        evt.Skip()
+
+    # model viewer interface
+    def OnLogMessage(self, message):
+        for v in self.view.values():
+            if hasattr(v, 'log_message'):
+                v.log_message(message)
+
+    def OnModelNew(self, model):
+        self.set_model(model)
+
+    def OnModelChange(self, model):
+        for v in self.view.values():
+            if hasattr(v, 'update_model'):
+                v.update_model(model)
+
+    def OnModelSetpar(self, model):
+        for v in self.view.values():
+            if hasattr(v, 'update_parameters'):
+                v.update_parameters(model)
+
+    def OnFileNew(self, event):
+        self.new_model()
+
+    def OnFileOpen(self, event):
+        # Load the script which will contain model definition and data.
+        dlg = wx.FileDialog(self,
+                            message="Select File",
+                            #defaultDir=os.getcwd(),
+                            #defaultFile="",
+                            wildcard=(ALL_FILES),
+                            style=wx.OPEN|wx.CHANGE_DIR)
+
+        # Wait for user to close the dialog.
+        status = dlg.ShowModal()
+        path = dlg.GetPath()
+        dlg.Destroy()
+
+        # Process file if user clicked okay.
+        if status == wx.ID_OK:
+            self.load_model(path)
+
+    def OnFileReload(self, event):
+        path = getattr(self, '_reload_path', self.model.path)
+        self.load_model(path)
+
+    def OnFileSave(self, event):
+        if self.model is not None:
+            # Force the result to be a pickle
+            self.model.path = os.path.splitext(self.model.path)[0]+MODEL_EXT
+            self.save_model(self.model.path)
+        else:
+            self.OnFileSaveAs(event)
+
+    def OnFileSaveAs(self, event):
+        dlg = wx.FileDialog(self,
+                            message="Select File",
+                            defaultDir=os.getcwd(),
+                            defaultFile="",
+                            wildcard=(MODEL_FILES+"|"+ALL_FILES),
+                            style=wx.SAVE|wx.CHANGE_DIR|wx.OVERWRITE_PROMPT)
+        # Wait for user to close the dialog.
+        status = dlg.ShowModal()
+        path = dlg.GetPath()
+        dlg.Destroy()
+
+        # Process file if user clicked okay.
+        if status == wx.ID_OK:
+            self.model.path = os.path.splitext(path)[0]+MODEL_EXT
+            self.save_model(self.model.path)
+
+
+    def OnFileExportResults(self, event):
+        dlg = wx.DirDialog(self,
+                           message="Export results",
+                           defaultPath=os.getcwd(),
+                           style=wx.DD_DEFAULT_STYLE)
+        # Wait for user to close the dialog.
+        status = dlg.ShowModal()
+        path = dlg.GetPath()
+        dlg.Destroy()
+
+        # Process file if user clicked okay.
+        if status == wx.ID_OK:
+            self.save_results(path)
+
+    def OnFitOptions(self, event):
+        # If there is an error here, it is because fit_config was not set
+        # when the panel was created.  Since this will never happen, we
+        # won't put in a runtime check.  Option processing happens in
+        # gui_app.MainApp.after_show as of this writing.
+        show_fit_config(self, self.fit_config)
+
+    def OnFitStart(self, event):
+        self.uncertainty_state = False
+        if self.fit_thread:
+            self.sb.SetStatusText("Error: Fit already running")
+            return
+        # TODO: better access to model parameters
+        if len(self.model.getp()) == 0:
+            raise ValueError ("Problem has no fittable parameters")
+
+        # Start a new thread worker and give fit problem to the worker.
+        fitclass = self.fit_config.selected_fitter
+        options = self.fit_config.selected_values
+        self.fitLock = threading.Lock()
+        self.fitAbort = 0
+        
+        def abort_test():
+            return self.fitAbort
+        self.fit_thread = FitThread(win=self, fitLock=self.fitLock,
+                                    abort_test=abort_test,
+                                    problem=self.model,
+                                    fitclass=fitclass,
+                                    options=options)
+        self.sb.SetStatusText("Fit status: Running", 3)
+
+    def OnFitStop(self, event):
+        with self.fitLock:
+            self.fitAbort = 1
+
+    def OnFitComplete(self, event):
+        self.fit_thread = None
+        chisq = nice(2*event.value/event.problem.dof)
+        event.problem.setp(event.point)
+        signal.update_parameters(model=event.problem)
+        signal.log_message(message="done with chisq %g"%chisq)
+        signal.log_message(message=event.info)
+        self.sb.SetStatusText("Fit status: Complete", 3)
+        beep()
+
+    def OnFitSave(self, event):
+        raise NotImplementedError()
+        dlg = wx.FileDialog(self,
+                            message="Fit results",
+                            defaultDir=os.getcwd(),
+                            defaultFile="",
+                            wildcard=(MODEL_FILES+"|"+ALL_FILES),
+                            style=wx.SAVE|wx.CHANGE_DIR|wx.OVERWRITE_PROMPT)
+        # Wait for user to close the dialog.
+        status = dlg.ShowModal()
+        path = dlg.GetPath()
+        dlg.Destroy()
+
+        # Process file if user clicked okay.
+        if status == wx.ID_OK:
+            self.save_fit(path)
+
+
+    def OnFitProgress(self, event):
+        if event.message == 'progress':
+            chisq = nice(2*event.value/event.problem.dof)
+            message = "step %5d chisq %g"%(event.step, chisq)
+            signal.log_message(message=message)
+        elif event.message == 'improvement':
+            event.problem.setp(event.point)
+            event.problem.model_update()
+            signal.update_parameters(model=event.problem)
+        elif event.message == 'convergence_update':
+            self.view['convergence'].OnFitProgress(event)
+        elif event.message in ('uncertainty_update', 'uncertainty_final'):
+            self.uncertainty_state = event.uncertainty_state
+            self.console['state'] = self.uncertainty_state
+            self.view['uncertainty'].OnFitProgress(event)
+            self.view['correlation'].OnFitProgress(event)
+            self.view['trace'].OnFitProgress(event)
+            if event.message == 'uncertainty_final':
+                self.view['error'].OnFitProgress(event)
+            # variable stats are needed in order to plot UncertaintyView, and
+            # so are computed therein.  Format them nicely and show them on
+            # the console as well.
+            signal.log_message(dream_stats.format_vars(self.view['uncertainty'].plot_state[1]))
+        else:
+            raise ValueError("Unknown fit progress message "+event.message)
+
+    def new_model(self):
+        from ..plugin import new_model as gen
+        self.set_model(gen())
+
+    def load_model(self, path):
+        self._reload_path = path
+        model = load_model(path)
+        signal.model_new(model=model)
+
+    def save_model(self, path):
+        try:
+           with open(path,'wb') as fid:
+               pickle.dump(self.model, fid)
+        except Exception:
+            import traceback
+            signal.log_message(message=traceback.format_exc())
+
+
+    def save_results(self, path):
+        output_path = os.path.join(path, self.model.name)
+
+        # Storage directory
+        if not os.path.exists(path):
+            os.mkdir(path)
+
+        # Ask model to save its information
+        self.model.save(output_path)
+
+        # Save a pickle of the model that can be reloaded
+        self.save_model(output_path+MODEL_EXT)
+
+        # Save the current state of the parameters
+        with redirect_console(output_path+".out"):
+            self.model.show()
+        pardata = "".join("%s %.15g\n"%(name, value) for name, value in
+                          zip(self.model.labels(), self.model.getp()))
+        open(output_path+".par",'wt').write(pardata)
+
+        # Produce model plots
+        self.model.plot(figfile=output_path)
+
+        # Produce uncertainty plots
+        if hasattr(self, 'uncertainty_state') and self.uncertainty_state:
+            with redirect_console(output_path+".err"):
+                self.uncertainty_state.show(figfile=output_path)
+            self.uncertainty_state.save(output_path)
+
+
+    def _add_measurement_type(self, type):
+        """
+        Add the panels needed to view a measurement of the given type.
+
+        *type* is fitness.__class__, where fitness is the measurement cost function.
+        """
+        name = type.__name__
+        if type not in self.data_tabs:
+            tab = self.data_notebook.add_tab(type, name+" Data")
+            constructor = getattr(type, 'data_panel', PlotView)
+            constructor(tab)
+        if type not in self.model_notebook and hasattr(type, 'model_panel'):
+            tab = self.model_notebook.add_tab(type, name+" Model")
+            type.model_panel(tab)
+
+    def _view_problem(self, problem):
+        """
+        Set the model and data views to those necessary to display the problem.
+        """
+        # What types of measurements do we have?
+        models = problem.models if hasattr(problem,'models') else [problem]
+        types = set(p.fitness.__class__ for p in models)
+        for p in types: self._add_measurement_type(p)
+
+        # Show only the relevant views
+        for p,tab in self.data_notebook.tabs():
+            tab.Show(p in types)
+        for p,tab in self.model_notebook.tabs():
+            tab.Show(p in types)
+
+    def set_model(self, model):
+        # Inform the various tabs that the model they are viewing has changed.
+        self.model = model
+
+        # Point all of our views at the new model
+        for v in self.view.values():
+            if hasattr(v,'set_model'):
+                v.set_model(model)
+        self.console['model'] = model
+
+        # Enable appropriate menu items.
+        self.fit_menu.Enable(id=self.fit_menu_start.GetId(), enable=True)
+        self.fit_menu.Enable(id=self.fit_menu_stop.GetId(), enable=True)
+        self.fit_menu.Enable(id=self.fit_menu_options.GetId(), enable=True)
+
+        # Enable appropriate toolbar items.
+        self.tb.EnableTool(id=self.tb_start.GetId(), enable=True)
+        self.tb.EnableTool(id=self.tb_stop.GetId(), enable=True)
+        if hasattr(model, 'path'):
+            signal.log_message(message="loaded "+model.path)
+            self.GetTopLevelParent().SetTitle("Bumps: %s"%model.name)
+        else:
+            signal.log_message(message="new model")
+            self.GetTopLevelParent().SetTitle("Bumps")
+
+SOUND = None
+def beep():
+    """
+    Play fit completion sound.
+    """
+    wx.Bell()
+    ## FIXME why doesn't sound work?
+    #global SOUND
+    #if SOUND is None:
+    #    SOUND = wx.Sound(resource('done.wav'))
+    #if SOUND.IsOk():
+    #    SOUND.Play(wx.SOUND_ASYNC)
diff --git a/bumps/gui/console.py b/bumps/gui/console.py
new file mode 100644
index 0000000..149fc6f
--- /dev/null
+++ b/bumps/gui/console.py
@@ -0,0 +1,208 @@
+"""
+Interactive console widget support.
+
+Defines NumpyConsole class.
+
+TODO: Fix cut/paste for multiline commands
+TODO: Trigger change notification when numpy array has changed
+"""
+from __future__ import print_function
+
+import wx, wx.py
+
+def shapestr(v):
+    """Return shape string for numeric variables suitable for printing"""
+    try:
+        shape = v.shape
+    except AttributeError:
+        return "scalar"
+    else:
+        return "array "+"x".join([str(i) for i in shape])
+
+class NumpyConsole(wx.py.shell.ShellFrame):
+    """
+    NumpyConsole defines an interactive console which is aware of all the
+    numerical variables in the local name space.  When variables are added
+    or removed, it signals that the set of variables has changed.
+
+    This is intended to be used as an embedded console in an interactive
+    application for which the user can define and manipulate numerical
+    types and automatically show a list of variables, e.g., available for
+    plotting.
+
+    If you subclass and replace self.init_code, be sure to do so _before_ calling
+    the superclass __init__.
+    """
+
+    # code to define the initial namespace
+    init_code = """
+from pylab import *
+"""
+    introText = """
+Welcome to the numpy console!
+
+Example:
+
+x = linspace(0,1,300)
+y = sin(2*pi*x*3)
+vars()
+plot(x,y)
+"""
+
+    def __init__(self, *args, **kwargs):
+        # Start interpreter and monitor statement execution
+        msg = kwargs.pop('introText', self.introText)
+        wx.py.shell.ShellFrame.__init__(self, *args, **kwargs)
+
+        # Print welcome message.
+        # TODO: replace this when (if?) ShellFrame allows introText=msg
+        print(msg, file=self.shell)
+        self.shell.prompt()
+
+        # Initialize the interpreter namespace with useful commands
+        self.shell.interp.runcode(compile(self.init_code,"__main__","exec"))
+
+        # steal draw_if_interactive
+        import pylab
+        from matplotlib._pylab_helpers import Gcf
+        from matplotlib import pyplot
+        self._dirty = set()
+        def draw_if_interactive():
+            #print "calling draw_if_interactive with",Gcf.get_active()
+            self._dirty.add(Gcf.get_active())
+        pyplot.draw_if_interactive = draw_if_interactive
+
+        # add vars command to the interpreter
+        self.shell.interp.locals['vars'] = self._print_vars
+
+        # ignore the variables defined by numpy
+        self.ignore = set(self.shell.interp.locals.keys())
+        self._existing = {} # No new variables recorded yet
+
+        # remember which variables are current so we can detect changes
+        wx.py.dispatcher.connect(receiver=self._onPush,
+                                 signal='Interpreter.push')
+
+    def filter(self,key,value):
+        """
+        Return True if var should be listed in the available variables.
+        """
+        return key not in self.ignore
+
+    # Dictionary interface
+    def items(self):
+        """
+        Return the list of key,value pairs for all locals not ignored.
+        """
+        locals = self.shell.interp.locals
+        for (k,v) in locals.items():
+            if self.filter(k,v): yield k,v
+
+    def update(self, *args, **kw):
+        """
+        Update a set of variables from a dictionary.
+        """
+        self.shell.interp.locals.update(*args, **kw)
+        self._existing.update(*args, **kw)
+
+    def __setitem__(self,var,val):
+        """
+        Define or replace a variable in the interpreter.
+        """
+        self.shell.interp.locals[var] = val
+        self._existing[var] = val
+
+    def __getitem__(self,var):
+        """
+        Retrieve a variable from the interpreter.
+        """
+        return self.shell.interp.locals[var]
+
+    def __delitem__(self,var):
+        """
+        Delete a variable from the interpreter.
+        """
+        del self.shell.interp.locals[var]
+        try:
+            del self._existing[var]
+        except KeyError:
+            pass
+
+    # Stream interface
+    def write(self, msg):
+        """
+        Support 'print >>console, blah' for putting output on console.
+
+        TODO: Maybe redirect stdout to console if console is open?
+        """
+        self.shell.write(self, msg)
+
+    # ==== Internal messages ====
+    def OnChanged(self,added=[],changed=[],removed=[]):
+        """
+        Override this method to perform your changed operation.
+
+        Note that we cannot detect changes within a variable without considerable
+        effort: we would need to keep a deep copy of the original value, and
+        use a deep comparison to see if it has changed.
+        """
+        for var in added:
+            print("added",var, file=self.shell)
+        for var in changed:
+            print("changed",var, file=self.shell)
+        for var in removed:
+            print("deleted",var, file=self.shell)
+        print("override the OnChanged message to update your application state", file=self.shell)
+
+    def _print_vars(self):
+        """
+        Print the available numeric variables and their shapes.
+
+        This is a command available to the user as vars().
+        """
+        locals = self.shell.interp.locals
+        for (k,v) in self.items():
+            print(k, shapestr(v), file=self.shell)
+
+
+    def _onPush(self,**kw):
+        """On command execution, detect if variable list has changed."""
+        # Note: checking for modify is too hard ... build it into the types?
+        #print >>self.shell, "checking for add/delete..."
+        # Update graphs if changed
+        if self._dirty:
+            import pylab
+            from matplotlib._pylab_helpers import Gcf
+            #print "figs",Gcf.figs
+            #print "dirty",self._dirty
+            for fig in self._dirty:
+                #print fig, Gcf.figs.values(),fig in Gcf.figs.values()
+                if fig and fig in Gcf.figs.values():
+                    #print "drawing"
+                    fig.canvas.draw()
+            pylab.show()
+            self._dirty.clear()
+
+        items = dict(list(self.items()))
+        oldkeys = set(self._existing.keys())
+        newkeys = set(items.keys())
+        added   = newkeys - oldkeys
+        removed = oldkeys - newkeys
+        changed = set(k for k in (oldkeys&newkeys)
+                      if items[k] is not self._existing[k])
+        if added or changed or removed:
+            self.OnChanged(added=added,changed=changed,removed=removed)
+        self._existing = items
+
+def demo():
+    """Example use of the console."""
+    import numpy as np
+    app = wx.App(redirect=False)
+    ignored = { 'f': lambda x: 3+x }
+    console = NumpyConsole(locals=ignored)
+    console.update({ 'x': np.array([[42,15],[-10,12]]), 'z': 42. })
+    console.Show(True)
+    app.MainLoop()
+
+if __name__ == "__main__":
+    demo()
diff --git a/bumps/gui/convergence_view.py b/bumps/gui/convergence_view.py
new file mode 100644
index 0000000..f49c404
--- /dev/null
+++ b/bumps/gui/convergence_view.py
@@ -0,0 +1,67 @@
+from __future__ import with_statement
+
+import numpy as np
+
+from .. import monitor
+from ..plotutil import coordinated_colors
+from .plot_view import PlotView
+
+
+class ConvergenceMonitor(monitor.Monitor):
+    """
+    Gather statistics about the best, worst, median and +/- 1 interquartile
+    range.  This will be the input for the convergence plot.
+    """
+    def __init__(self):
+        self.pop = []
+    def config_history(self, history):
+        history.requires(population_values=1, value=1)
+    def __call__(self, history):
+        best = history.value[0]
+        try:
+            pop = history.population_values[0]
+            n = len(pop)
+            p = np.sort(pop)
+            QI,Qmid, = int(0.2*n),int(0.5*n)
+            self.pop.append((best, p[0],p[QI],p[Qmid],p[-1-QI],p[-1]))
+        except AttributeError:
+            self.pop.append((best, ))
+    def progress(self):
+        if not self.pop:
+            return dict(pop=np.empty((0,1),'d'))
+        else:
+            return dict(pop=np.array(self.pop))
+
+
+class ConvergenceView(PlotView):
+    title = "Convergence"
+    def plot(self):
+        if not self.plot_state: return
+        pop,best = self.plot_state
+        with self.pylab_interface as pylab:
+            pylab.clf()
+            ni,npop = pop.shape
+            iternum = np.arange(1,ni+1)
+            tail = int(0.25*ni)
+            c = coordinated_colors(base=(0.4,0.8,0.2))
+            if npop==5:
+                pylab.fill_between(iternum[tail:], pop[tail:,1], pop[tail:,3],
+                                   color=c['light'], label='_nolegend_')
+                pylab.plot(iternum[tail:],pop[tail:,2],
+                           label="80% range", color=c['base'], hold=True)
+                pylab.plot(iternum[tail:],pop[tail:,0],
+                           label="_nolegend_", color=c['base'], hold=True)
+            pylab.plot(iternum[tail:], best[tail:], label="best",
+                       color=c['dark'], hold=True)
+            pylab.xlabel('iteration number')
+            pylab.ylabel('chisq')
+            pylab.legend()
+            #pylab.gca().set_yscale('log')
+            pylab.draw()
+    def update(self, best, pop):
+        self.plot_state = pop, best
+        self.plot()
+    def OnFitProgress(self, event):
+        if event.problem != self.model: return
+        pop = 2*event.pop/event.problem.dof
+        self.update(pop[:,0], pop[:,1:])
diff --git a/bumps/gui/data_view.py b/bumps/gui/data_view.py
new file mode 100755
index 0000000..69817ab
--- /dev/null
+++ b/bumps/gui/data_view.py
@@ -0,0 +1,232 @@
+from __future__ import with_statement
+
+import wx
+# Can't seem to detect when notebook should be drawn on Mac
+IS_MAC = (wx.Platform == '__WXMAC__')
+
+from numpy import inf
+
+from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
+from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
+
+# The Figure object is used to create backend-independent plot representations.
+from matplotlib.figure import Figure
+
+from ..fitproblem import MultiFitProblem
+
+from .util import EmbeddedPylab
+
+# ------------------------------------------------------------------------
+class DataView(wx.Panel):
+    title = 'Data'
+    default_size = (600,400)
+    def __init__(self, *args, **kw):
+        wx.Panel.__init__(self, *args, **kw)
+
+        # Instantiate a figure object that will contain our plots.
+        figure = Figure(figsize=(1,1), dpi=72)
+
+        # Initialize the figure canvas, mapping the figure object to the plot
+        # engine backend.
+        canvas = FigureCanvas(self, wx.ID_ANY, figure)
+
+        # Wx-Pylab magic ...
+        # Make our canvas an active figure manager for pylab so that when
+        # pylab plotting statements are executed they will operate on our
+        # canvas and not create a new frame and canvas for display purposes.
+        # This technique allows this application to execute code that uses
+        # pylab stataments to generate plots and embed these plots in our
+        # application window(s).  Use _activate_figure() to set.
+        self.pylab_interface = EmbeddedPylab(canvas)
+
+        # Instantiate the matplotlib navigation toolbar and explicitly show it.
+        mpl_toolbar = Toolbar(canvas)
+        mpl_toolbar.Realize()
+
+        # Create a vertical box sizer to manage the widgets in the main panel.
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        sizer.Add(canvas, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, border=0)
+        sizer.Add(mpl_toolbar, 0, wx.EXPAND|wx.ALL, border=0)
+
+        # Associate the sizer with its container.
+        self.SetSizer(sizer)
+        sizer.Fit(self)
+
+        self._need_redraw = False
+        self.Bind(wx.EVT_SHOW, self.OnShow)
+        self._calculating = False
+        self.toolbar = mpl_toolbar
+        self.view = 'linear'
+
+    def menu(self):
+        # Add 'View' menu to the menu bar and define its options.
+        # Present y-axis plotting scales as radio buttons.
+        # Grey out items that are not currently implemented.
+        frame = wx.GetTopLevelParent(self)
+        menu = wx.Menu()
+        _item = menu.AppendRadioItem(wx.ID_ANY,
+                                          "Li&near",
+                                          "Plot y-axis in linear scale")
+        _item.Check(True)
+        frame.Bind(wx.EVT_MENU, self.OnLinear, _item)
+        _item = menu.AppendRadioItem(wx.ID_ANY,
+                                          "&Log",
+                                          "Plot y-axis in log scale")
+        frame.Bind(wx.EVT_MENU, self.OnLog, _item)
+
+        menu.AppendSeparator()
+
+        _item = menu.Append(wx.ID_ANY,
+                                 "&Residuals",
+                                 "Show residuals on plot panel")
+        frame.Bind(wx.EVT_MENU, self.OnResiduals, _item)
+        menu.Enable(id=_item.GetId(), enable=True)
+
+        return menu
+
+    # ==== Views ====
+    # TODO: can probably parameterize the view selection.
+    def OnLog(self, event):
+        self.view = "log"
+        self.redraw()
+
+    def OnLinear(self, event):
+        self.view = "linear"
+        self.redraw()
+
+    def OnResiduals(self, event):
+        self.view = "residual"
+        self.redraw()
+
+    # ==== Model view interface ===
+    def OnShow(self, event):
+        #print "theory show"
+        if not event.Show: return
+        #print "showing theory"
+        if self._need_redraw:
+            #print "-redraw"
+            self.redraw()
+    def get_state(self):
+        return self.problem
+    def set_state(self, state):
+        self.set_model(state)
+
+    def set_model(self, model):
+        self.problem = model
+        self.redraw(reset=True)
+
+    def update_model(self, model):
+        if self.problem == model:
+            self.redraw()
+
+    def update_parameters(self, model):
+        if self.problem == model:
+            self.redraw()
+    # =============================
+
+    def redraw(self, reset=False):
+        # Hold off drawing until the tab is visible
+        if not IS_MAC and not self.IsShown():
+            self._need_redraw = True
+            return
+        #print "drawing theory"
+
+        if self._calculating:
+            # That means that I've entered the thread through a
+            # wx.Yield for the currently executing redraw.  I need
+            # to cancel the running thread and force it to start
+            # the calculation over.
+            self._cancel_calculate = True
+            #print "canceling calculation"
+            return
+
+        self._need_redraw = False
+        self._calculating = True
+
+        # Calculate theory
+        #print "calling again"
+        while True:
+            #print "restarting"
+            # We are restarting the calculation, so clear the reset flag
+            self._cancel_calculate = False
+
+            # clear graph and exit if problem is not defined
+            if self.problem is None:
+                with self.pylab_interface as pylab:
+                    pylab.clf() # clear the canvas
+                    break
+
+            # Preform the calculation
+            if isinstance(self.problem,MultiFitProblem):
+                #print "n=",len(self.problem.models)
+                for p in self.problem.models:
+                    self._precalc(p)
+                    #print "cancel",self._cancel_calculate,"reset",p.updating
+                    if self._cancel_calculate: break
+                if self._cancel_calculate: continue
+            else:
+                self._precalc(self.problem)
+                if self._cancel_calculate: continue
+
+            # Redraw the canvas with newly calculated theory
+            # TODO: drawing is 10x too slow!
+            with self.pylab_interface as pylab:
+                ax = pylab.gca()
+                #print "reset",reset, ax.get_autoscalex_on(), ax.get_xlim()
+                reset = reset or ax.get_autoscalex_on()
+                xrange = ax.get_xlim()
+                #print "composing"
+                pylab.clf() # clear the canvas
+                #shift=20 if self.view == 'log' else 0
+                shift=0
+                if isinstance(self.problem, MultiFitProblem):
+                    for i,p in enumerate(self.problem.models):
+                        #if hasattr(p.fitness,'plot'):
+                        p.fitness.plot(view=self.view)
+                        pylab.hold(True)
+                        if self._cancel_calculate: break
+                    pylab.text(0.01, 0.01,
+                               'chisq=%s' % self.problem.chisq_str(),
+                               transform=pylab.gca().transAxes)
+                    if self._cancel_calculate: continue
+                else:
+                    #if hasattr(self.problem.fitness,'plot'):
+                    self.problem.plot(view=self.view)
+                    if self._cancel_calculate: continue
+
+                #print "drawing"
+                if not reset:
+                    self.toolbar.push_current()
+                    set_xrange(pylab.gca(), xrange)
+                    self.toolbar.push_current()
+                pylab.draw()
+                #print "done drawing"
+                break
+
+        self._calculating = False
+
+    def _precalc(self, problem):
+        """
+        Calculate each model separately, hopefully not blocking the gui too long.
+        Individual problems may want more control, e.g., between computing theory
+        and resolution.
+        """
+        _ = problem.nllf()
+        wx.Yield()
+
+def set_xrange(ax, xrange):
+    miny,maxy = inf,-inf
+    for L in ax.get_lines():
+        x,y = L.get_data()
+        idx = (x>xrange[0]) & (x<xrange[1])
+        if idx.any():
+            miny = min(miny,min(y[idx]))
+            maxy = max(maxy,max(y[idx]))
+    if miny < maxy:
+        if ax.get_yscale() == 'linear':
+            padding = 0.05*(maxy-miny)
+            miny,maxy = miny-padding, maxy+padding
+        else:
+            miny,maxy = miny*0.95, maxy*1.05
+    ax.set_xlim(xrange)
+    ax.set_ylim(miny,maxy)
diff --git a/bumps/gui/fit_dialog.py b/bumps/gui/fit_dialog.py
new file mode 100644
index 0000000..99dc151
--- /dev/null
+++ b/bumps/gui/fit_dialog.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: Nikunj Patel, James Krycka, Paul Kienzle
+
+"""
+This module implements the FitControl class which presents a pop-up dialog box
+for the user to control fitting options.
+"""
+
+# TODO: reset button sets values to factory settings for current optimizer
+
+#==============================================================================
+import wx
+
+import wx.lib.newevent
+
+from .. import options
+from .input_list import InputListPanel
+
+(FitterChangedEvent, EVT_FITTER_CHANGED) = wx.lib.newevent.NewCommandEvent()
+
+class FitConfig(wx.Frame):
+    """
+    FitControl lets the user set fitting options from a pop-up dialog box.
+    """
+    def __init__(self,
+                 parent = None,
+                 id     = wx.ID_ANY,
+                 title  = "Fit Options",
+                 pos    = wx.DefaultPosition,
+                 size   = wx.DefaultSize, # dialog box size will be calculated
+                 style  = wx.DEFAULT_DIALOG_STYLE,
+                 name   = "",
+                 config = None,
+                 help = None,
+                 fontsize = None,
+                ):
+        wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
+
+        self.config = config
+        self.help = help
+
+        pairs = [(config.names[id],id) for id in config.active_ids]
+        self.active_ids = [id for _,id in sorted(pairs)]
+
+        # Set the font for this window and all child windows (widgets) from the
+        # parent window, or from the system defaults if no parent is given.
+        # A dialog box does not inherit font info from its parent, so we will
+        # explicitly get it from the parent and apply it to the dialog box.
+        if parent is not None:
+            font = parent.GetFont()
+            self.SetFont(font)
+
+        # If the caller specifies a font size, override the default value.
+        if fontsize is not None:
+            font = self.GetFont()
+            font.SetPointSize(fontsize)
+            self.SetFont(font)
+
+        # Section 1
+        self.panel1 = wx.Panel(self, -1)
+        static_box1 = wx.StaticBox(self.panel1, -1, "Fit Algorithms")
+
+        rows = (len(self.active_ids)+1)/2
+
+        flexsizer = wx.FlexGridSizer(rows, 2, hgap=20, vgap=10)
+
+        self.fitter_button = {}
+        for fitter in self.active_ids:
+            button = wx.RadioButton(self.panel1, -1,
+                    label=config.names[fitter], name=fitter)
+            self.fitter_button[fitter] = button
+            self.Bind(wx.EVT_RADIOBUTTON, self.OnRadio, id=button.GetId())
+            flexsizer.Add(button, 0, 0)
+
+        fit_hsizer = wx.StaticBoxSizer(static_box1, orient=wx.VERTICAL)
+        fit_hsizer.Add(flexsizer, 0, wx.ALL, 5)
+
+        self.panel1.SetSizer(fit_hsizer)
+
+        self.vbox = wx.BoxSizer(wx.VERTICAL)
+        self.vbox.Add(self.panel1, 0, wx.ALL, 10)
+
+        # Section 2
+        # Create list of all panels for later use in hiding and showing panels.
+        self.fitter_panel = {}
+        for fitter in self.active_ids:
+            items = [(options.FIT_FIELDS[field][0],
+                      field,
+                      config.values[fitter][field],
+                      options.FIT_FIELDS[field][1])
+                     for field, default in config.settings[fitter]]
+            #print fitter, items
+            panel = ParameterPanel(self, items, config.names[fitter])
+            self.fitter_panel[fitter] = panel
+            self.vbox.Add(panel, 1, wx.EXPAND|wx.ALL, 10)
+            panel.Hide()
+
+        # Make the current panel active
+        self.fitter_button[config.selected_id].SetValue(True)
+        self.fitter_panel[config.selected_id].Show()
+
+        # Section 3
+        # Create the button controls (Reset, Apply) and bind their events.
+        #apply_btn = wx.Button(self, wx.ID_APPLY, "Apply")
+        #apply_btn.SetToolTipString("Accept new options for the optimizer")
+        #apply_btn.SetDefault()
+        #reset_btn = wx.Button(self, wx.ID_ANY, "Reset")
+        #reset_btn.SetToolTipString("Restore default options for the optimizer")
+        accept_btn = wx.Button(self, wx.ID_OK)
+        accept_btn.SetToolTipString("Accept new options for the optimizer")
+        accept_btn.SetDefault()
+        cancel_btn = wx.Button(self, wx.ID_CANCEL)
+        cancel_btn.SetToolTipString("Restore default options for the optimizer")
+        if help is not None:
+            help_btn = wx.Button(self, wx.ID_HELP, 'Help')
+            #help_btn = wx.Button(self, wx.ID_ANY, 'Help')
+            help_btn.SetToolTipString("Help on the options for the optimizer")
+
+
+        self.Bind(wx.EVT_BUTTON, self.OnAccept, accept_btn)
+        self.Bind(wx.EVT_BUTTON, self.OnCancel, cancel_btn)
+        if help is not None:
+            self.Bind(wx.EVT_BUTTON, self.OnHelp, help_btn)
+
+        self.Bind(wx.EVT_CLOSE, self.OnClose)
+
+        # Create the button sizer that will put the buttons in a row, right
+        # justified, and with a fixed amount of space between them.  This
+        # emulates the Windows convention for placing a set of buttons at the
+        # bottom right of the window.
+        btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
+        btn_sizer.Add((10,20), 1)  # stretchable whitespace
+        btn_sizer.Add(accept_btn, 0)
+        btn_sizer.Add((10,20), 0)  # non-stretchable whitespace
+        btn_sizer.Add(cancel_btn, 0)
+        btn_sizer.Add((10,20), 0)  # non-stretchable whitespace
+        if help is not None:
+            btn_sizer.Add((10,20), 0)  # non-stretchable whitespace
+            btn_sizer.Add(help_btn, 0)
+
+        # Add the button sizer to the main sizer.
+        self.vbox.Add(btn_sizer, 0, wx.EXPAND|wx.ALL, 10)
+
+        # Finalize the sizer and establish the dimensions of the dialog box.
+        # The minimum width is explicitly set because the sizer is not able to
+        # take into consideration the width of the enclosing frame's title.
+        self.SetSizer(self.vbox)
+        #self.vbox.SetMinSize((size[0], -1))
+        self.vbox.Fit(self)
+
+        self.Centre()
+
+    def OnRadio(self, event):
+
+        button = event.GetEventObject()
+        for panel in self.fitter_panel.values():
+            panel.Hide()
+        self.fitter_panel[button.Name].Show()
+        self.vbox.Layout()
+
+    def OnCancel(self, event):
+        """
+        Restore options for the selected fitter to the default values.
+        """
+        fitter = self._get_fitter()
+        panel = self.fitter_panel[fitter]
+        panel.Parameters = dict(self.config.settings[fitter])
+        self.Hide()
+
+    def OnAccept(self, event):
+        """
+        Save the current fitter and options to the fit config.
+        """
+        fitter = self._get_fitter()
+        options = self.fitter_panel[fitter].Parameters
+        self.config.selected_id = fitter
+        self.config.values[fitter] = options
+        self.Hide()
+
+        # Signal a change in fitter
+        event = FitterChangedEvent(self.Id, config=self.config)
+        wx.PostEvent(self, event)
+
+    def OnHelp(self, event):
+        """
+        Provide help on the selected fitter.
+        """
+        if self.help is not None:
+            self.help(self._get_fitter())
+
+    def OnClose(self, event):
+        """
+        Don't close the window, just hide it.
+        """
+        if event.CanVeto():
+            self.Hide()
+            event.Veto()
+        else:
+            event.Skip()
+
+    def _get_fitter(self):
+        """
+        Returns the currently selected algorithm, or None if no algorithm is
+        selected.
+        """
+        for button in self.fitter_button.values():
+            if button.Value:
+                return button.Name
+        else:
+            return None
+
+    def _get_options(self):
+        fitter = self._get_fitter()
+        options = self.fitter_panel[fitter].Parameters
+
+        return fitter, options
+
+
+class ParameterPanel(wx.Panel):
+
+    def __init__(self, parent, parameters, fitter_name):
+        wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
+
+        self.fields = []
+        itemlist = []
+        sbox = wx.StaticBox(self, wx.ID_ANY, fitter_name+" Fitting Parameters")
+
+        for parameter in parameters:
+            label, field, curr_value, datatype = parameter
+            if not label.endswith(':'):
+                label += ':'
+            if hasattr(datatype, 'choices'):
+                extra = [str(v) for v in datatype.choices]
+                mode = 'CRE'
+            else:
+                mode = 'RE'
+                extra = None
+            sub_list = [label, curr_value, datatype, mode, extra]
+            itemlist.append(sub_list)
+            self.fields.append(field)
+
+        # Set the same minimum height for each panel.  The y-size value should
+        # be sufficient to display at least 6 input fields without the need for
+        # a scroll bar.  Adjust the size.y value if the maximum number of
+        # number of input parameters across fitters changes.
+        self.fit_params = InputListPanel(parent=self, itemlist=itemlist,
+                                         align=True, size=(-1,220))
+
+        sbox_sizer = wx.StaticBoxSizer(sbox, wx.VERTICAL)
+        sbox_sizer.Add(self.fit_params, 1, wx.EXPAND|wx.ALL, 5)
+        self.SetSizer(sbox_sizer)
+        sbox_sizer.Fit(self)
+
+    @property
+    def Parameters(self):
+        values = self.fit_params.GetResults()
+        return dict(zip(self.fields, values))
+
+    @Parameters.setter
+    def Parameters(self, parameters):
+        values = [parameters[k] for k in self.fields]
+        self.fit_params.update_items_in_panel(values)
+
+_fit_config_frame = None
+def show_fit_config(parent, help=None):
+    global _fit_config_frame
+    if _fit_config_frame is None:
+        _fit_config_frame = FitConfig(parent=parent,
+                                      config=options.FIT_CONFIG, help=help)
+    _fit_config_frame.Show()
+    _fit_config_frame.Raise()
+    return _fit_config_frame
+
+if __name__=="__main__":
+    opts = options.getopts()
+    def _help(algo):
+        print("asking for help with "+algo)
+
+    app = wx.App()
+    top = wx.Frame(None)
+    text = wx.TextCtrl(top, wx.ID_ANY, "some text")
+    button = wx.Button(top, wx.ID_ANY, "Options...")
+    button.Bind(wx.EVT_BUTTON,
+                lambda ev: show_fit_config(top, help=_help))
+
+    sizer = wx.BoxSizer(wx.VERTICAL)
+    sizer.Add(text)
+    sizer.Add(button)
+    sizer.Fit(top)
+    top.SetSizer(sizer)
+    top.Show()
+    app.MainLoop()
diff --git a/bumps/gui/fit_thread.py b/bumps/gui/fit_thread.py
new file mode 100644
index 0000000..f4a4ded
--- /dev/null
+++ b/bumps/gui/fit_thread.py
@@ -0,0 +1,191 @@
+
+from copy import deepcopy
+from threading import Thread
+import os
+
+import wx.lib.newevent
+
+from .. import monitor
+from ..fitters import FitDriver
+from ..mapper import  MPMapper, SerialMapper
+from ..util import redirect_console
+
+from .convergence_view import ConvergenceMonitor
+#==============================================================================
+
+PROGRESS_DELAY = 5
+IMPROVEMENT_DELAY = 5
+
+(FitProgressEvent, EVT_FIT_PROGRESS) = wx.lib.newevent.NewEvent()
+(FitCompleteEvent, EVT_FIT_COMPLETE) = wx.lib.newevent.NewEvent()
+
+# NOTE: GUI monitors are running in a separate thread.  They should not
+# touch the problem internals.
+class GUIProgressMonitor(monitor.TimedUpdate):
+    def __init__(self, win, problem, progress=None, improvement=None):
+        monitor.TimedUpdate.__init__(self, progress=progress or PROGRESS_DELAY,
+                                     improvement=improvement or IMPROVEMENT_DELAY)
+        self.win = win
+        self.problem = problem
+
+    def show_progress(self, history):
+        evt = FitProgressEvent(problem=self.problem,
+                               message="progress",
+                               step=history.step[0],
+                               value=history.value[0],
+                               point=history.point[0]+0) # avoid race
+        wx.PostEvent(self.win, evt)
+
+    def show_improvement(self, history):
+        evt = FitProgressEvent(problem=self.problem,
+                               message="improvement",
+                               step=history.step[0],
+                               value=history.value[0],
+                               point=history.point[0]+0) # avoid race
+        wx.PostEvent(self.win, evt)
+
+
+class GUIMonitor(monitor.Monitor):
+    """
+    Generic GUI monitor.
+
+    Sends a fit progress event messge, **monitor.progress() every n seconds.
+    """
+    def __init__(self, win, problem, message, monitor, rate=None):
+        self.time = 0
+        self.rate = rate or 10
+        self.win = win
+        self.problem = problem
+        self.message = message
+        self.monitor = monitor
+    def config_history(self, history):
+        self.monitor.config_history(history)
+        history.requires(time=1)
+    def __call__(self, history):
+        self.monitor(history)
+        if history.time[0] >= self.time+self.rate:
+            evt = FitProgressEvent(problem=self.problem,
+                                   message=self.message,
+                                   **self.monitor.progress())
+            wx.PostEvent(self.win, evt)
+            self.time = history.time[0]
+    def final(self):
+        """
+        Close out the monitor
+        """
+        evt = FitProgressEvent(problem=self.problem,
+                               message=self.message,
+                               **self.monitor.progress())
+        wx.PostEvent(self.win, evt)
+
+# Horrible hack: we put the DREAM state in the fitter object the first time
+# back from the DREAM monitor; if our fitter object contains dream_state,
+# then we will send the dream_update notifications periodically.
+class DreamMonitor(monitor.Monitor):
+    def __init__(self, win, problem, message, fitter, rate=None):
+        self.time = 0
+        self.rate = rate or 60
+        self.win = win
+        self.problem = problem
+        self.fitter = fitter
+        self.message = message
+        self.uncertainty_state = None
+    def config_history(self, history):
+        history.requires(time=1)
+    def __call__(self, history):
+        try:
+            self.uncertainty_state = history.uncertainty_state
+            if history.time[0] >= self.time+self.rate:
+                # Gack! holding on to state for final
+                evt = FitProgressEvent(problem=self.problem,
+                                       message="uncertainty_update",
+                                       uncertainty_state = deepcopy(self.uncertainty_state))
+                wx.PostEvent(self.win, evt)
+                self.time = history.time[0]
+        except AttributeError:
+            self.uncertainty_state = None
+            pass
+
+    def final(self):
+        """
+        Close out the monitor
+        """
+        if self.uncertainty_state:
+            evt = FitProgressEvent(problem=self.problem,
+                                   message="uncertainty_final",
+                                   uncertainty_state = deepcopy(self.uncertainty_state))
+            wx.PostEvent(self.win, evt)
+
+#==============================================================================
+
+class FitThread(Thread):
+    """Run the fit in a separate thread from the GUI thread."""
+    def __init__(self, win, fitLock=None, abort_test=None, problem=None,
+                 fitclass=None, options=None, mapper=None):
+        # base class initialization
+        #Process.__init__(self)
+
+        Thread.__init__(self)
+        self.win = win
+        self.fitLock = fitLock
+        self.abort_test = abort_test
+        self.problem = problem
+        self.fitclass = fitclass
+        self.options = options
+        self.mapper = mapper
+        self.start() # Start it working.
+
+    def run(self):
+        # TODO: we have no interlocks on changes in problem state.  What
+        # happens when the user changes the problem while a fit is being run?
+        # May want to keep a history of changes to the problem definition,
+        # along with a function to reverse them so we can handle undo.
+
+        # NOTE: Problem must be the original problem (not a copy) when used
+        # inside the GUI monitor otherwise AppPanel will not be able to
+        # recognize that it is the same problem when updating views.
+        monitors = [GUIProgressMonitor(self.win, self.problem),
+                    GUIMonitor(self.win, self.problem,
+                               message="convergence_update",
+                               monitor=ConvergenceMonitor(),
+                               rate=5),
+                    DreamMonitor(self.win, self.problem,
+                                 fitter = self.fitclass,
+                                 message="uncertainty_update",
+                                 rate=30),
+                    ]
+        try:
+            # Only use parallel on windows if the problem can be pickled
+            if os.name == "nt":
+                import cPickle as pickle
+                pickle.dumps(self.problem)
+            mapper = MPMapper
+        except Exception:
+            mapper = SerialMapper
+
+        # Be safe and keep a private copy of the problem while fitting
+        #print "fitclass",self.fitclass
+        problem = deepcopy(self.problem)
+        #print "fitclass id",id(self.fitclass),self.fitclass,threading.current_thread()
+        def abort_wrapper():
+            with self.fitLock:
+                return self.abort_test()
+        driver = FitDriver(self.fitclass, problem=problem,
+                           monitors=monitors, abort_test = abort_wrapper,
+                           mapper = mapper.start_mapper(problem, []),
+                           **self.options)
+
+        x,fx = driver.fit()
+        # Give final state message from monitors
+        for M in monitors:
+            if hasattr(M, 'final'): M.final()
+
+        with redirect_console() as fid:
+            driver.show()
+            captured_output = fid.getvalue()
+
+        evt = FitCompleteEvent(problem=self.problem,
+                               point=x,
+                               value=fx,
+                               info=captured_output)
+        wx.PostEvent(self.win, evt)
diff --git a/bumps/gui/fit_view.py b/bumps/gui/fit_view.py
new file mode 100755
index 0000000..6242722
--- /dev/null
+++ b/bumps/gui/fit_view.py
@@ -0,0 +1,56 @@
+import sys
+
+import wx
+
+class FitView(wx.Panel):
+    def __init__(self, parent):
+
+        wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
+        self.parent = parent
+
+        sizer1 = wx.BoxSizer(wx.HORIZONTAL)
+        label1 = wx.StaticText(self, 1, label="Store Folder:")
+
+        self.store_file = wx.TextCtrl(self, 2, value="", style=wx.TE_RIGHT)
+        sizer1.Add(label1, 0, border=5,
+                        flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.ALL)
+        sizer1.Add(self.store_file, 1, wx.EXPAND|wx.RIGHT, border=10)
+
+        # Create the Fit button.
+        self.btn_fit = wx.Button(self, wx.ID_ANY, "Fit")
+        self.btn_fit.SetToolTip(wx.ToolTip("click to start fit"))
+        self.Bind(wx.EVT_BUTTON, self.OnFit, self.btn_fit)
+
+        # Create a horizontal box sizer for the buttons.
+        sizer2 = wx.BoxSizer(wx.HORIZONTAL)
+        sizer2.Add(self.btn_fit, 0, wx.ALL, 5)
+
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        sizer.Add((10,10), 0)  # whitespace
+        sizer.Add(sizer1, 0, wx.ALL, 5)
+        sizer.Add(sizer2, 0, wx.ALL, 5)
+
+        self.SetSizer(sizer)
+        self.SetAutoLayout(True)
+
+    def OnFit(self, event):
+        btnLabel = self.btn_fit.GetLabel()
+        if btnLabel == "Fit":
+            self.btn_fit.SetLabel("Stop")
+            self.store = self.store_file.GetValue()
+            ################LOGIC######################
+            # send fit event message to panel with
+            # all required data to fit
+            # the panel will listen to event and start
+            # the fit.
+            ###########################################
+            send("fit", store=self.store)
+
+        else:
+            print('stop logic goes here')
+            self.btn_fit.SetLabel("Fit")
+            pass
+
+
+    def OnFitComplete(self, event):
+        self.btn_fit.SetLabel("Fit")
diff --git a/bumps/gui/gui_app.py b/bumps/gui/gui_app.py
new file mode 100755
index 0000000..76c722d
--- /dev/null
+++ b/bumps/gui/gui_app.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: James Krycka
+
+"""
+This module creates the GUI for the Bumps application.
+It builds the initial wxPython frame, presents a splash screen to the user,
+and then constructs the rest of the GUI.
+
+From the command line, the application is run from a startup script that calls
+the main function of this module.  From the root directory of the package, you
+can run this application in GUI mode as follows:
+
+$ python bin/bumps_gui [<optional parameters>]
+
+The following is a list of command line parameters for development and
+debugging purposes.  None are documented and they may change at any time.
+
+Options for showing diagnostic info:
+    --platform      Display platform specific info, especially about fonts
+    --syspath       Display the contents of sys.path
+    --time          Display diagnostic timing information
+
+Options for overriding the default font and point size attributes where
+parameters within each set are mutually exclusive (last one takes precedence):
+    --arial, --tahoma, --verdana
+    --6pt, --7pt, --8pt, --9pt, --10pt, --11pt, --12pt
+
+Options for controlling the development and testing environment:
+    --inspect       Run the wxPython Widget Inspection Tool in a debug window
+"""
+
+#==============================================================================
+
+import sys
+import traceback
+from StringIO import StringIO
+
+import wx
+
+from bumps import plugin
+from bumps import cli
+from bumps import options as bumps_options
+
+from .about import APP_TITLE
+from .utilities import resource_dir, resource, log_time
+
+# Defer import of AppFrame until after the splash screen has been displayed.
+# When running for the first time (where imported modules are not in cache),
+# importing AppFrame can take several seconds because it results in importing
+# matplotlib, numpy, and most application modules.
+### from .app_frame import AppFrame
+
+# Desired initial application frame size (if physical screen size permits).
+FRAME_WIDTH = 1200
+FRAME_HEIGHT = 900
+
+# Desired plash screen size and other information.
+# Note that it is best to start with an image having the desired dimensions or
+# larger.  If image is smaller the image conversion time may be noticeable.
+SPLASH_FILE = "bumps_splash.jpg"
+SPLASH_TIMEOUT = 30  # in miliseconds
+SPLASH_WIDTH = 720
+SPLASH_HEIGHT = 540
+
+# Diagnostic timing information.
+LOGTIM = True if (len(sys.argv) > 1 and '--time' in sys.argv[1:]) else False
+
+#==============================================================================
+
+class MainApp(wx.App):
+    """
+    This class builds the wxPython GUI for the Bumps Modeler application.
+
+    First a splash screen is displayed, then the application frame is created
+    but not shown until the splash screen exits.  The splash screen remains
+    active while the application frame is busy initializing itself (which may
+    be time consuming if many imports are performed and the data is not in the
+    system cache, e.g., on running the application for the first time).  Only
+    when initialization of the application is complete and control drops into
+    the wx event loop, can the splash screen terminate (via timeout or a mouse
+    click on the splash screen) which causes the frame to be made visible.
+    """
+    def __init__(self, *args, **kw):
+        wx.App.__init__(self, *args, **kw)
+
+    def OnInit(self):
+        # Determine the position and size of the splash screen based on the
+        # desired size and screen real estate that we have to work with.
+        pos, size = self.window_placement(SPLASH_WIDTH, SPLASH_HEIGHT)
+        #print "splash pos and size =", pos, size
+
+        # Display the splash screen.  It will remain visible until the caller
+        # executes app.MainLoop() AND either the splash screen timeout expires
+        # or the user left clicks over the splash screen.
+        #if LOGTIM: log_time("Starting to display the splash screen")
+        #pic = resource(SPLASH_FILE)
+        #self.display_splash_screen(img_name=pic, pos=pos, size=size)
+
+        # Determine the position and size of the application frame based on the
+        # desired size and screen real estate that we have to work with.
+        pos, size = self.window_placement(FRAME_WIDTH, FRAME_HEIGHT)
+        #print "frame pos and size =", pos, size
+
+        # Create the application frame, but it will not be shown until the
+        # splash screen terminates.  Note that import of AppFrame is done here
+        # while the user is viewing the splash screen.
+        if LOGTIM: log_time("Starting to build the GUI application")
+
+        # Can't delay matplotlib configuration any longer
+        cli.config_matplotlib('WXAgg')
+
+        from .app_frame import AppFrame
+        self.frame = AppFrame(parent=None, title=APP_TITLE,
+                              pos=pos, size=size)
+
+        # Declare the application frame to be the top window.
+        self.SetTopWindow(self.frame)
+
+        # To have the frame visible behind the spash screen, comment out the following
+        #wx.CallAfter(self.after_show)
+        self.after_show()
+
+        # To test that the splash screen will not go away until the frame
+        # initialization is complete, simulate an increase in startup time
+        # by taking a nap.
+        #time.sleep(6)
+        return True
+
+
+    def window_placement(self, desired_width, desired_height):
+        """
+        Determines the position and size of a window such that it fits on the
+        user's screen without obstructing (or being obstructed by) the task bar.
+        The returned size is bounded by the desired width and height passed in,
+        but it may be smaller if the screen is too small.  Usually the returned
+        position (upper left coordinates) will result in centering the window
+        on the screen excluding the task bar area.  However, for very large
+        monitors it will be placed on the left side of the screen.
+        """
+
+        # WORKAROUND: When running Linux and using an Xming (X11) server on a
+        # PC with a dual monitor configuration, the reported display count may
+        # be 1 (instead of 2) with a display size of both monitors combined.
+        # (For example, on a target PC with an extended desktop consisting of
+        # two 1280x1024 monitors, the reported monitor size was 2560x1045.)
+        # To avoid displaying the window across both monitors, we check for
+        # screen 'too big'.  If so, we assume a smaller width which means the
+        # application will be placed towards the left hand side of the screen.
+
+        x, y, w, h = wx.Display().GetClientArea() # size excludes task bar
+        #print "*** x, y, w, h", x, y, w, h
+        xpos, ypos = x, y
+        h -= 20  # to make room for Mac window decorations
+        if len(sys.argv) > 1 and '--platform' in sys.argv[1:]:
+            j, k = wx.DisplaySize()  # size includes task bar area
+            print("*** Reported screen size including taskbar is %d x %d"%(j, k))
+            print("*** Reported screen size excluding taskbar is %d x %d"%(w, h))
+
+        if w > 1920: w = 1280  # display on left side, not centered on screen
+        if w > desired_width:  xpos = x + (w - desired_width)/2
+        if h > desired_height: ypos = y + (h - desired_height)/2
+
+        # Return the suggested position and size for the application frame.
+        return (xpos, ypos), (min(w, desired_width), min(h, desired_height))
+
+    def display_splash_screen(self, img_name=None, pos=None, size=(320, 240)):
+        """Displays a splash screen and the specified position and size."""
+        # Prepare the picture.
+        w, h = size
+        image = wx.Image(img_name, wx.BITMAP_TYPE_JPEG)
+        image.Rescale(w, h, wx.IMAGE_QUALITY_HIGH)
+        bm = image.ConvertToBitmap()
+
+        # Create and show the splash screen.  It will disappear only when the
+        # program has entered the event loop AND either the timeout has expired
+        # or the user has left clicked on the screen.  Thus any processing
+        # performed by the calling routine (including doing imports) will
+        # prevent the splash screen from disappearing.
+        splash = wx.SplashScreen(bitmap=bm,
+                                 splashStyle=(wx.SPLASH_TIMEOUT|
+                                              wx.SPLASH_CENTRE_ON_SCREEN),
+                                 style=(wx.SIMPLE_BORDER|
+                                        wx.FRAME_NO_TASKBAR|
+                                        wx.STAY_ON_TOP),
+                                 milliseconds=SPLASH_TIMEOUT,
+                                 parent=None, id=wx.ID_ANY)
+        splash.Bind(wx.EVT_CLOSE, self.OnCloseSplashScreen)
+
+        # Repositon if center of screen placement is overridden by caller.
+        if pos is not None:
+            splash.SetPosition(pos)
+        splash.Show()
+
+    def OnCloseSplashScreen(self, event):
+        """
+        Make the application frame visible when the splash screen is closed.
+        """
+
+        # To show the frame earlier, uncomment Show() code in OnInit.
+        if LOGTIM: log_time("Terminating the splash screen and showing the GUI")
+        #self.after_show()
+        #wx.CallAfter(self.after_show)
+        event.Skip()
+
+    def after_show(self):
+        from . import signal
+        sys.excepthook = excepthook
+
+        # Process options
+        bumps_options.BumpsOpts.FLAGS |= set(('inspect','syspath'))
+        opts = bumps_options.getopts()
+
+        # For wx debugging, load the wxPython Widget Inspection Tool if requested.
+        # It will cause a separate interactive debugger window to be displayed.
+        if opts.inspect: inspect()
+
+        if opts.syspath:
+            print("*** Resource directory:  "+resource_dir())
+            print("*** Python path is:")
+            for i, p in enumerate(sys.path):
+                print("%5d  %s" %(i, p))
+
+        # Put up the initial model
+        model, output = initial_model(opts)
+        if not model: model = plugin.new_model()
+        signal.log_message(message=output)
+        self.frame.panel.set_model(model=model)
+        self.frame.panel.fit_config = opts.fit_config
+
+        self.frame.panel.Layout()
+        self.frame.panel.aui.Split(0, wx.TOP)
+        self.frame.Show()
+
+
+#==============================================================================
+
+def initial_model(opts):
+    # Capture stdout from problem definition
+    saved_stdout = sys.stdout
+    sys.stdout = StringIO()
+    try:
+        problem = cli.initial_model(opts)
+        error = ''
+    except Exception:
+        problem = None
+        limit = len(traceback.extract_stack())-4
+        #sys.stderr.write("limit=%d\n"%limit)
+        #sys.stderr.write(repr(traceback.extract_stack()))
+        error = traceback.format_exc(limit)
+    finally:
+        output = sys.stdout.getvalue()
+        sys.stdout = saved_stdout
+    return problem, output.strip()+error
+
+
+def inspect():
+    import wx.lib.inspection
+    wx.lib.inspection.InspectionTool().Show()
+
+
+def excepthook(type, value, tb):
+    from . import signal
+    error = traceback.format_exception(type, value, tb)
+    indented = "   "+"\n   ".join(error)
+    signal.log_message(message="Error:\n"+indented)
+    wx.GetApp().frame.panel.show_view('log')
+
+
+def _protected_main():
+    if LOGTIM: log_time("Starting Bumps")
+
+    # Instantiate the application class and give control to wxPython.
+    app = MainApp(redirect=0)
+
+    # Enter event loop which allows the user to interact with the application.
+    if LOGTIM: log_time("Entering the event loop")
+    app.MainLoop()
+
+def main():
+    try:
+        _protected_main()
+    except:  # make sure traceback is printed
+        traceback.print_exc()
+        sys.exit()
+
+# Allow "python -m bumps.gui.gui_app options..."
+if __name__ == "__main__":
+    main()
diff --git a/bumps/gui/input_list.py b/bumps/gui/input_list.py
new file mode 100644
index 0000000..b70beb4
--- /dev/null
+++ b/bumps/gui/input_list.py
@@ -0,0 +1,1093 @@
+#!/usr/bin/python
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Author: James Krycka
+
+"""
+This module implements InputListPanel, InputListDialog, and InputListValidator
+classes to provide general purpose mechanisms for obtaining and validating user
+input from a structured list of input fields.
+"""
+from __future__ import print_function
+
+import wx
+from wx.lib.scrolledpanel import ScrolledPanel
+
+WINDOW_BKGD_COLOUR = "#ECE9D8"
+PALE_YELLOW = "#FFFFB0"
+
+DATA_ENTRY_ERRMSG = """\
+Please correct any highlighted field in error.
+Yellow means an input value is required.
+Pink indicates a syntax error."""
+
+
+class ItemListValidator(wx.PyValidator):
+    """
+    This class implements a custom input field validator.  Each instance of
+    this class services one data entry field (typically implemented as
+    wx.TextCtrl or a wx.ComboBox widget).  Parameters are:
+
+    - datatype of the field (used when validating user input) as follows:
+      o int       => signed or unsigned integer value
+      o float     => floating point value
+      o str       => string of characters
+      o 'str_alpha' => string of alphabetic characters {A-Z, a-z}
+      o 'str_alnum' => string of alphanumeric characters {A-Z, a-z, 0-9}
+      o 'str_id'    => string identifier consisting of {A-Z, a-z, 0-9, _, -}
+      o '' or any unknown datatype is treated the same as 'str'
+
+    - flag to indicate whether user input is required (True) or optional (False)
+    """
+
+    def __init__(self, datatype='str', required=False):
+        wx.PyValidator.__init__(self)
+        self.datatype = datatype
+        self.required = required
+
+
+    def Clone(self):
+        # Every validator must implement the Clone() method that returns a
+        # instance of the class as follows:
+        return ItemListValidator(self.datatype, self.required)
+
+
+    def Validate(self, win):
+        """
+        Verify user input according to the expected datatype.  Leading and
+        trailing whitespace is always stripped before evaluation.  Floating and
+        integer values are returned as normalized float or int objects; thus
+        conversion can generate an error.  On error, the field is highlighted
+        and the cursor is placed there.  Note that all string datatypes are
+        returned stripped of leading and trailing whitespace.
+        """
+
+        text_ctrl = self.GetWindow()
+        text = text_ctrl.GetValue().strip()
+
+        try:
+            if callable(self.datatype):
+                self.value = self.value_alt = self.datatype(text)
+            elif self.datatype == int:
+                if len(text) == 0:
+                    self.value = 0
+                    self.value_alt = None
+                else:
+                    float_value = float(text)
+                    if float_value != int(float_value):
+                        raise ValueError("input must be an integer")
+                    self.value = self.value_alt = int(float_value)
+            elif self.datatype == float:
+                if len(text) == 0:
+                    self.value = 0.0
+                    self.value_alt = None
+                else:
+                    self.value = self.value_alt = float(text)
+            elif self.datatype == 'str_alpha':
+                if len(text) == 0:
+                    self.value = ''
+                    self.value_alt = None
+                    if self.required:
+                        raise RuntimeError("input required")
+                else:
+                    if text.isalpha():
+                        self.value = self.value_alt = str(text)
+                    else:
+                        raise ValueError("input must be alphabetic")
+            elif self.datatype == 'str_alnum':
+                if len(text) == 0:
+                    self.value = ''
+                    self.value_alt = None
+                else:
+                    if text.isalnum():
+                        self.value = self.value_alt = str(text)
+                    else:
+                        raise ValueError("input must be alphanumeric")
+            elif self.datatype == 'str_id':
+                if len(text) == 0:
+                    self.value = ''
+                    self.value_alt = None
+                else:
+                    temp = text.replace('_', 'a').replace('-','a')
+                    if temp.isalnum():
+                        self.value = self.value_alt = str(text)
+                    else:
+                        raise ValueError("input must be alphanumeric, _, or -")
+            else:  # For self.datatype of "str", "", or any unrecognized type.
+                if len(text) == 0:
+                    self.value = ''
+                    self.value_alt = None
+                else:
+                    self.value = self.value_alt = str(text)
+
+            if len(text) == 0 and self.required:
+                raise RuntimeError("input required")
+
+        except RuntimeError:
+            from traceback import print_exc; print_exc()
+            text_ctrl.SetBackgroundColour(PALE_YELLOW)
+            text_ctrl.SetFocus()
+            text_ctrl.Refresh()
+            return False
+
+        except Exception:
+            from traceback import print_exc; print_exc()
+            text_ctrl.SetBackgroundColour("PINK")
+            text_ctrl.SetFocus()
+            text_ctrl.Refresh()
+            return False
+
+        else:
+            text_ctrl.SetBackgroundColour(
+                wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
+            text_ctrl.Refresh()
+            self.TransferFromWindow()
+            return True
+
+
+    def TransferToWindow(self):
+        # The parent of this class is responsible for setting the default value
+        # for the field (e.g., by calling wx.TextCtrl() or wx.ComboBox() or
+        # instance.SetValue(), etc.).
+        return True  # Default is False for failure
+
+
+    def TransferFromWindow(self):
+        # Data has already been transferred from the window and validated
+        # in Validate(), so there is nothing useful to do here.
+        return True  # Default is False for failure
+
+
+    def GetValidatedInput(self):
+        # Return the validated value or zero or blank for a null input.
+        return self.value
+
+
+    def GetValidatedInputAlt(self):
+        # Return the validated value or None for a null input.
+        return self.value_alt
+
+#==============================================================================
+
+class InputListPanel(ScrolledPanel):
+    """
+    This class implements a general purpose mechanism for obtaining and
+    validating user input from several fields in a window with scroll bars.
+    (See InputListDialog that uses a dialog box instead of a scrolled window.)
+
+    It creates a scrolled window in which to display one or more input fields
+    each preceded by a label.  The input fields can be a combination of simple
+    data entry boxes or drop down combo boxes.  Automatic validation of user
+    input is performed.  The caller can use the GetResults() method to obtain
+    the final results from all fields in the form of a list of values.
+
+    The scrolled window object is created as a child of the parent panel passed
+    in.  Normally the caller of this class puts this returned object in a sizer
+    attached to the parent panel to allow it to expand or contract based on the
+    size constraints imposed by its parent.
+
+    The layout is:
+
+    +-------------------------------------+-+
+    |                                     |v|
+    |  Label-1:   [<drop down list>  |V]  |e|
+    |                                     |r|   Note that drop down lists and
+    |  Label-2:   [<data entry field-2>]  |t|   simple data entry fields can
+    |  ...                                |||   be specified in any order.
+    |  Label-n:   [<data entry field-n>]  |||
+    |                                     |v|
+    +-------------------------------------+-+   Note that scroll bars are
+    |      horizontal scroll bar -->      | |   visible only when needed.
+    +-------------------------------------+-+
+
+    The itemlist parameter controls the display.  It is a list of input field
+    description lists where each description list contains 5 or 6 elements and
+    the 6th element is optional.  The items in the description list are:
+
+    [0] Label string prefix for the input field
+    [1] Default value
+    [2] Datatype for validation (see ItemListValidator docstring for details)
+    [3] Flags parameter in the form of a string of characters as follows:
+        R - input is required; otherwise input is optional and can be blank
+        E - field is editable by the user; otherwise it is non-editable and box
+            is grayed-out; a non-editable field has its default value returned
+        C - field is a combobox; otherwise it is a simple data entry box
+        L - field is preceded by a divider line; 'L' takes precedent over 'H'
+        H - field is preceded by a header given in the 6th element of the list;
+            the following header sub-options are valid only if 'H' is specified:
+            0 - header text size is same as label text size (default)
+            1 - header text size is label text size + 1 point (large)
+            2 - header text size is label text size + 2 points (x-large)
+            3 - header text size is label text size + 3 points (2x-large)
+            B - header text is bolded
+            U - header text is underlined
+        Options can be combined in the flags string such as 'REHB2' which means
+        field is required, editable, and preceeded by a bold, extra-large header
+    [4] List of values for a combo box or None for a simple data entry field
+    [5] Header string to be displayed above the label string of the input field;
+        if 'H' is not specified, this list element can be omitted or can be None
+
+    The align parameter determines whether input fields are aligned across when
+    the input fields are grouped into sections.  If True, the widest text label
+    determines the space allocated for all labels; if False, the text label
+    width is determined separately for each section.
+
+    The fontsize parameter allows the caller to specify a font point size to
+    override the default point size.
+
+    See the AppTestFrame class for a comprehensive example.
+    """
+
+    def __init__(self,
+                 parent,
+                 id       = wx.ID_ANY,
+                 pos      = wx.DefaultPosition,
+                 size     = wx.DefaultSize,
+                 style    = wx.TAB_TRAVERSAL,
+                 name     = "",
+                 itemlist = [],
+                 align    = False,
+                 fontsize = None
+                ):
+        ScrolledPanel.__init__(self, parent, id, pos, size, style, name)
+
+        #self.SetBackgroundColour(WINDOW_BKGD_COLOUR)
+        self.align = align
+        self.itemlist = itemlist
+        self.item_cnt = len(self.itemlist)
+        if self.item_cnt == 0:
+            return
+
+        # Set the default font for this and all child windows (widgets) if the
+        # caller specifies a size; otherwise let it default from the parent.
+        if fontsize is not None:
+            font = self.GetFont()
+            font.SetPointSize(fontsize)
+            self.SetFont(font)
+        #print "Input List Panel font ptsize =", self.GetFont().GetPointSize()
+
+        # Specify the widget layout using sizers.
+        main_sizer = wx.BoxSizer(wx.VERTICAL)
+
+        # Create the text controls for labels and associated input fields
+        # and any optional headers.
+        self.add_items_to_panel()
+
+        # Divide the input items into sections prefaced by header text (except
+        # that the first section is not required to have a header).  A section
+        # list is created that contains the index of the item that starts a new
+        # section plus a final entry that is one beyond the last item.
+        sect = [0]  # declare item 0 to be start of a new section
+        for i in range(self.item_cnt):
+            if i > 0 and self.headers[i] is not None:
+                sect.append(i)
+        sect.append(self.item_cnt)
+
+        # Place the items for each section in its own flex grid sizer.
+        for i in range(len(sect)-1):
+            j = sect[i]; k = sect[i+1] - 1
+            fg_sizer = self.add_items_to_sizer(j, k)
+
+            # Add the flex grid sizer to the main sizer.
+            if self.headers[j] is not None:  # self.headers[0] could be None
+                main_sizer.Add(self.headers[j], 0, border=10,
+                               flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT)
+            main_sizer.Add(fg_sizer, 0, border=10,
+                           flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT)
+
+        # Finalize the sizer and establish the dimensions of the input box.
+        self.SetSizer(main_sizer)
+        main_sizer.Fit(self)
+
+        # Enable scrolling and initialize the validators (required when
+        # validators are not used in the context of a dialog box).
+        self.SetupScrolling(scroll_x=True, scroll_y=True)
+        self.InitDialog()
+
+
+    def add_items_to_panel(self):
+        """
+        For each input item, create a header (optional), label, and input box
+        widget to instantiate it.  Put the handles for these widgets in the
+        headers, labels, and inputs lists where the length of each list is the
+        same as the number of input boxes.
+        """
+
+        self.headers = []; self.labels = []; self.inputs = []
+        self.widest = 0
+
+        for x in range(self.item_cnt):
+            params = len(self.itemlist[x])
+            if params == 6:
+                text, default, datatype, flags, plist, header = self.itemlist[x]
+            elif params == 5:
+                text, default, datatype, flags, plist = self.itemlist[x]
+                header = None
+            if default is None: default = ""  # display None as a null string
+
+            # Process the flags parameter.
+            required = False
+            if 'R' in flags: required = True
+            editable = False
+            if 'E' in flags: editable = True
+            combo = False
+            if 'C' in flags: combo = True
+            line = False
+            if 'L' in flags: line = True
+            hdr = False
+            if 'H' in flags and header is not None: hdr = True
+            if hdr:
+                delta_pts = 0
+                if '1' in flags: delta_pts = 1  # large
+                if '2' in flags: delta_pts = 2  # X-large
+                if '3' in flags: delta_pts = 3  # 2X-large
+                weight = wx.NORMAL
+                if 'B' in flags: weight = wx.BOLD
+                underlined = False
+                if 'U' in flags: underlined = True
+
+            # Optionally, create a header widget to display above the input box.
+            # A dividing line is treated as a special case header.
+            if line:
+                lin = wx.StaticLine(self, wx.ID_ANY, style=wx.LI_HORIZONTAL)
+                self.headers.append(lin)
+            elif hdr:
+                hdr = wx.StaticText(self, wx.ID_ANY, label=header,
+                                    style=wx.ALIGN_CENTER)
+                font = hdr.GetFont()
+                ptsize = font.GetPointSize() + delta_pts
+                font.SetPointSize(ptsize)
+                font.SetWeight(weight)
+                font.SetUnderlined(underlined)
+                hdr.SetFont(font)
+                hdr.SetForegroundColour("BLUE")
+                self.headers.append(hdr)
+            else:
+                self.headers.append(None)
+
+            # Create the text label widget.
+            self.labels.append(wx.StaticText(self, wx.ID_ANY, label=text,
+                               style=wx.ALIGN_LEFT))
+            w, h = self.labels[x].GetSize()
+            if w > self.widest: self.widest = w
+
+            # Create the input box widget (combo box or simple data entry box)
+            if combo:              # it is a drop down combo box list
+                self.inputs.append(wx.ComboBox(self, wx.ID_ANY,
+                                   value=str(default),
+                                   validator=ItemListValidator(datatype, required),
+                                   choices=plist,
+                                   style=wx.CB_DROPDOWN|wx.CB_READONLY))
+                self.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect, self.inputs[x])
+            else:                  # it is a simple data entry field
+                self.inputs.append(wx.TextCtrl(self, wx.ID_ANY,
+                                   value=str(default),
+                                   validator=ItemListValidator(datatype, required)))
+                self.Bind(wx.EVT_TEXT, self.OnText, self.inputs[x])
+
+            # Verfiy that field is editable, otherwise don't allow user to edit
+            if not editable:
+                self.inputs[x].Enable(False)
+
+            # Validate the default value and highlight the field if the value is
+            # in error or if input is required and the value is a null string.
+            self.inputs[x].GetValidator().Validate(self.inputs[x])
+
+        # Determine if all input boxes should be aligned across sections.
+        if self.align:
+            for x in range(self.item_cnt):
+                self.labels[x].SetMinSize((self.widest, -1))
+
+
+    def add_items_to_sizer(self, start, end):
+        sizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=10)
+        for x in range(start, end+1):
+            sizer.Add(self.labels[x], 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)
+            sizer.Add(self.inputs[x], 0, wx.EXPAND)
+        sizer.AddGrowableCol(1)
+        return sizer
+
+
+    def update_items_in_panel(self, new_values):
+        for x in range(len(self.inputs)):
+            if new_values[x] is not None:
+                self.inputs[x].SetValue(str(new_values[x]))
+
+
+    def GetResults(self):
+        """
+        Returns a list of values, one for each input field.  The value for
+        a field is either its initial (default) value or the last value
+        entered by the user that has been successfully validated.  An input
+        that fails validation is not returned by the validator from the
+        window.  For a non-editable field, its initial value is returned.
+
+        Blank input is converted to 0 for int, 0.0 for float, or a 0-length
+        string for a string datatype.
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(self.inputs[x].GetValidator().GetValidatedInput())
+        return ret
+
+
+    def GetResultsAltFormat(self):
+        """
+        Returns a list of values, one for each input field.  The value for
+        a field is either its initial (default) value or the last value
+        entered by the user that has been successfully validated.  An input
+        that fails validation is not returned by the validator from the
+        window.  For a non-editable field, its initial value is returned.
+
+        Blank input is returned as a value of None.
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(self.inputs[x].GetValidator().GetValidatedInputAlt())
+        return ret
+
+
+    def GetResultsRawInput(self):
+        """
+        Returns a list of strings corresponding to each input field.  These
+        are the current values from the text control widgets whether or not
+        they have passed validation.  All values are returned as raw strings
+        (i.e., they are not converted to floats or ints and leading and
+        trailing whitespace is not stripped).
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(str(self.inputs[x].GetValue()))
+        return ret
+
+
+    def OnText(self, event):
+        """
+        This method is called each time a key stroke is entered in any text
+        control box.  It should be subclassed if special processing is needed.
+        The sample code below shows how to obtain the index of the box and its
+        value.  Note that the box's index is 0 to n, where n is the number of
+        input and combo boxes, not just the number of input boxes.
+
+        # Get index of the input box that triggered the event.
+        text_ctrl = event.GetEventObject()
+        for box_idx, box_instance in enumerate(self.inputs):
+            if text_ctrl is box_instance:
+                break
+        # Get the edited string.
+        text = text_ctrl.GetValue()
+        print "Field:", box_idx, text
+        """
+
+        # Run the validator bound to the text control box that has been edited.
+        # If the validation fails, the validator will highlight the input field
+        # to alert the user of the error.
+        text_ctrl = event.GetEventObject()
+        text_ctrl.GetValidator().Validate(text_ctrl)
+        event.Skip()
+
+
+    def OnComboBoxSelect(self, event):
+        """
+        This method is called each time a selection is made in any combo box.
+        It should be subclassed if the caller wants to perform some action in
+        response to a selection event.  The sample code below shows how to
+        obtain the index of the box, the index of the item selected, and the
+        value.  Note that the box's index is 0 to n, where n is the number of
+        combo and input boxes, not just the number of combo boxes.
+
+        # Get index of selected item in combo box dropdown list.
+        item_idx = event.GetSelection()
+        # Get index of combo box that triggered the event.
+        current_box = event.GetEventObject()
+        for box_idx, box_instance in enumerate(self.inputs):
+            if current_box is box_instance:
+                break
+        print "Combo:", box_idx, item_idx, self.itemlist[box_idx][3][item_idx]
+        """
+
+        # Run the validator bound to the combo box that has a selection event.
+        # This should not fail unless the combo options were setup incorrectly.
+        # If the validation fails, the validator will highlight the input field
+        # to alert the user of the error.
+        combo_box = event.GetEventObject()
+        combo_box.GetValidator().Validate(combo_box)
+        event.Skip()
+
+#==============================================================================
+
+class InputListDialog(wx.Dialog):
+    """
+    This class implements a general purpose mechanism for obtaining and
+    validating user input from several fields in a pop-up dialog box.
+    (See InputListPanel that uses a scrolled window instead of a dialog box.)
+
+    It creates a pop-up dialog box in which to display one or more input fields
+    each preceded by a label.  The input fields can be a combination of simple
+    data entry boxes or drop down combo boxes.  Automatic validation of user
+    input is performed.  OK and Cancel buttons are provided at the bottom of
+    the dialog box for the user to signal completion of data entry whereupon
+    the caller can use the GetResults() method to obtain the final results from
+    all fields in the form of a list of values.  As with any dialog box, when
+    the user presses OK or Cancel the dialog disappears from the screen, but
+    the caller of this class is responsible for destroying the dialog box.
+
+    The dialog box is automatically sized to fit the fields and buttons with
+    reasonable spacing between the widgets.  The layout is:
+
+    +-------------------------------------+
+    |  Title                          [X] |
+    +-------------------------------------+
+    |                                     |
+    |  Label-1:   [<drop down list>  |V]  |
+    |                                     |     Note that drop down lists and
+    |  Label-2:   [<data entry field-2>]  |     simple data entry fields can
+    |  ...                                |     be specified in any order.
+    |  Label-n:   [<data entry field-n>]  |
+    |                                     |
+    |       [  OK  ]      [Cancel]        |
+    |                                     |
+    +-------------------------------------+
+
+    The itemlist parameter controls the display.  It is a list of input field
+    description lists where each description list contains 5 or 6 elements and
+    the 6th element is optional.  The items in the description list are:
+
+    [0] Label string prefix for the input field
+    [1] Default value
+    [2] Datatype for validation (see ItemListValidator docstring for details)
+    [3] Flags parameter in the form of a string of characters as follows:
+        R - input is required; otherwise input is optional and can be blank
+        E - field is editable by the user; otherwise it is non-editable and box
+            is grayed-out; a non-editable field has its default value returned
+        C - field is a combobox; otherwise it is a simple data entry box
+        L - field is preceded by a divider line; 'L' takes precedent over 'H'
+        H - field is preceded by a header given in the 6th element of the list;
+            the following header sub-options are valid only if 'H' is specified:
+            0 - header text size is same as label text size (default)
+            1 - header text size is label text size + 1 point (large)
+            2 - header text size is label text size + 2 points (x-large)
+            3 - header text size is label text size + 3 points (2x-large)
+            B - header text is bolded
+            U - header text is underlined
+        Options can be combined in the flags string such as 'REHB2' which means
+        field is required, editable, and preceeded by a bold, extra-large header
+    [4] List of values for a combo box or None for a simple data entry field
+    [5] Header string to be displayed above the label string of the input field;
+        if 'H' is not specified, this list element can be omitted or can be None
+
+    The align parameter determines whether input fields are aligned across when
+    the input fields are grouped into sections.  If True, the widest text label
+    determines the space allocated for all labels; if False, the text label
+    width is determined separately for each section.
+
+    The fontsize parameter allows the caller to specify a font point size to
+    override the default point size.
+
+    See the AppTestFrame class for a comprehensive example.
+    """
+
+    def __init__(self,
+                 parent   = None,
+                 id       = wx.ID_ANY,
+                 title    = "Enter Data",
+                 pos      = wx.DefaultPosition,
+                 size     = (300, -1),  # x is min_width; y will be calculated
+                 style    = wx.DEFAULT_DIALOG_STYLE,
+                 name     = "",
+                 itemlist = [],
+                 align    = False,
+                 fontsize = None
+                ):
+        wx.Dialog.__init__(self, parent, id, title, pos, size, style, name)
+
+        self.align = align
+        self.itemlist = itemlist
+        self.item_cnt = len(self.itemlist)
+        if self.item_cnt == 0:
+            return
+
+        # Set the font for this window and all child windows (widgets) from the
+        # parent window, or from the system defaults if no parent is given.
+        # A dialog box does not inherit font info from its parent, so we will
+        # explicitly get it from the parent and apply it to the dialog box.
+        if parent is not None:
+            font = parent.GetFont()
+            self.SetFont(font)
+
+        # If the caller specifies a font size, override the default value.
+        if fontsize is not None:
+            font = self.GetFont()
+            font.SetPointSize(fontsize)
+            self.SetFont(font)
+        #print "Input Dialog box font ptsize =", self.GetFont().GetPointSize()
+
+        # Create the button controls (OK and Cancel) and bind their events.
+        ok_button = wx.Button(self, wx.ID_OK, "OK")
+        ok_button.SetDefault()
+        cancel_button = wx.Button(self, wx.ID_CANCEL, "Cancel")
+
+        self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
+
+        # Specify the widget layout using sizers.
+        main_sizer = wx.BoxSizer(wx.VERTICAL)
+
+        # Create the text controls for labels and associated input fields
+        # and any optional headers.
+        self.add_items_to_dialog_box()
+
+        # Divide the input items into sections prefaced by header text (except
+        # that the first section is not required to have a header).  A section
+        # list is created that contains the index of the item that starts a new
+        # section plus a final entry that is one beyond the last item.
+        sect = [0]  # declare item 0 to be start of a new section
+        for i in range(self.item_cnt):
+            if i > 0 and self.headers[i] is not None:
+                sect.append(i)
+        sect.append(self.item_cnt)
+        #print "Section index list:", sect
+
+        # Place the items for each section in its own flex grid sizer.
+        for i in range(len(sect)-1):
+            j = sect[i]; k = sect[i+1] - 1
+            #print "Items per section:", j, "to", k
+            fg_sizer = self.add_items_to_sizer(j, k)
+
+            # Add the flex grid sizer to the main sizer.
+            if self.headers[j] is not None:  # self.headers[0] could be None
+                main_sizer.Add(self.headers[j], 0, border=10,
+                               flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT)
+            main_sizer.Add(fg_sizer, 0, border=10,
+                           flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT)
+
+        # Create the button sizer that will put the buttons in a row, right
+        # justified, and with a fixed amount of space between them.  This
+        # emulates the Windows convention for placing a set of buttons at the
+        # bottom right of the window.
+        button_sizer = wx.BoxSizer(wx.HORIZONTAL)
+        button_sizer.Add((10,20), 1)  # stretchable whitespace
+        button_sizer.Add(ok_button, 0)
+        button_sizer.Add((10,20), 0)  # non-stretchable whitespace
+        button_sizer.Add(cancel_button, 0)
+
+        # Add a separator line before the buttons.
+        separator = wx.StaticLine(self, wx.ID_ANY, style=wx.LI_HORIZONTAL)
+        main_sizer.Add(separator, 0 , border=10,
+                       flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT)
+
+        # Add the button sizer to the main sizer.
+        main_sizer.Add(button_sizer, 0, border=10,
+                       flag=wx.EXPAND|wx.TOP|wx.BOTTOM|wx.RIGHT)
+
+        # Finalize the sizer and establish the dimensions of the dialog box.
+        # The minimum width is explicitly set because the sizer is not able to
+        # take into consideration the width of the enclosing frame's title.
+        self.SetSizer(main_sizer)
+        main_sizer.SetMinSize((size[0], -1))
+        main_sizer.Fit(self)
+
+
+    def add_items_to_dialog_box(self):
+        """
+        For each input item, create a header (optional), label, and input box
+        widget to instantiate it.  Put the handles for these widgets in the
+        headers, labels, and inputs lists where the length of each list is the
+        same as the number of input boxes.
+        """
+
+        self.headers = []; self.labels = []; self.inputs = []
+        self.widest = 0
+        first_error_idx = None
+
+        for x in range(self.item_cnt):
+            params = len(self.itemlist[x])
+            if params == 6:
+                text, default, datatype, flags, plist, header = self.itemlist[x]
+            elif params == 5:
+                text, default, datatype, flags, plist = self.itemlist[x]
+                header = None
+            if default is None: default = ""  # display None as a null string
+
+            # Process the flags parameter.
+            required = False
+            if flags.find('R') >= 0: required = True
+            editable = False
+            if flags.find('E') >= 0: editable = True
+            combo = False
+            if flags.find('C') >= 0: combo = True
+            line = False
+            if flags.find('L') >= 0: line = True
+            hdr = False
+            if flags.find('H') >= 0 and header is not None: hdr = True
+            if hdr:
+                delta_pts = 0
+                if flags.find('1') >= 0: delta_pts = 1  # large
+                if flags.find('2') >= 0: delta_pts = 2  # X-large
+                if flags.find('3') >= 0: delta_pts = 3  # 2X-large
+                weight = wx.NORMAL
+                if flags.find('B') >= 0: weight = wx.BOLD
+                underlined = False
+                if flags.find('U') >= 0: underlined = True
+
+            # Optionally, create a header widget to display above the input box.
+            # A dividing line is treated as a special case header.
+            if line:
+                lin = wx.StaticLine(self, wx.ID_ANY, style=wx.LI_HORIZONTAL)
+                self.headers.append(lin)
+            elif hdr:
+                hdr = wx.StaticText(self, wx.ID_ANY, label=header,
+                                    style=wx.ALIGN_CENTER)
+                font = hdr.GetFont()
+                ptsize = font.GetPointSize() + delta_pts
+                font.SetPointSize(ptsize)
+                font.SetWeight(weight)
+                font.SetUnderlined(underlined)
+                hdr.SetFont(font)
+                hdr.SetForegroundColour("BLUE")
+                self.headers.append(hdr)
+            else:
+                self.headers.append(None)
+
+            # Create the text label widget.
+            self.labels.append(wx.StaticText(self, wx.ID_ANY, label=text,
+                               style=wx.ALIGN_LEFT))
+            w, h = self.labels[x].GetSize()
+            if w > self.widest: self.widest = w
+
+            # Create the input box widget (combo box or simple data entry box)
+            if combo:              # it is a drop down combo box list
+                self.inputs.append(wx.ComboBox(self, wx.ID_ANY,
+                                   value=str(default),
+                                   validator=ItemListValidator(datatype, required),
+                                   choices=plist,
+                                   style=wx.CB_DROPDOWN|wx.CB_READONLY))
+                self.Bind(wx.EVT_COMBOBOX, self.OnComboBoxSelect, self.inputs[x])
+            else:                  # it is a simple data entry field
+                self.inputs.append(wx.TextCtrl(self, wx.ID_ANY,
+                                   value=str(default),
+                                   validator=ItemListValidator(datatype, required)))
+                self.Bind(wx.EVT_TEXT, self.OnText, self.inputs[x])
+
+            # Verfiy that field is editable, otherwise don't allow user to edit
+            if not editable:
+                self.inputs[x].Enable(False)
+
+            # Validate the default value and highlight the field if the value is
+            # in error or if input is required and the value is a null string.
+            # Also, save index of the first field to fail validation, if any.
+            ret = self.inputs[x].GetValidator().Validate(self.inputs[x])
+            if not ret and first_error_idx is None: first_error_idx = x
+
+        # If any fields failed validation, set focus to the first failed one.
+        if first_error_idx is not None: self.inputs[first_error_idx].SetFocus()
+
+        # Determine if all input boxes should be aligned across sections.
+        if self.align:
+            for x in range(self.item_cnt):
+                self.labels[x].SetMinSize((self.widest, -1))
+
+
+    def add_items_to_sizer(self, start, end):
+        sizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=10)
+        for x in range(start, end+1):
+            sizer.Add(self.labels[x], 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)
+            sizer.Add(self.inputs[x], 0, wx.EXPAND)
+        sizer.AddGrowableCol(1)
+        return sizer
+
+
+    def update_items_in_dialog_box(self, new_values):
+        for x in range(len(self.inputs)):
+            if new_values[x] is not None:
+                self.inputs[x].SetValue(str(new_values[x]))
+
+
+    def GetResults(self):
+        """
+        Returns a list of values, one for each input field.  The value for
+        a field is either its initial (default) value or the last value
+        entered by the user that has been successfully validated.  An input
+        that fails validation is not returned by the validator from the
+        window.  For a non-editable field, its initial value is returned.
+
+        Blank input is converted to 0 for int, 0.0 for float, or a 0-length
+        string for a string datatype.
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(self.inputs[x].GetValidator().GetValidatedInput())
+        return ret
+
+
+    def GetResultsAltFormat(self):
+        """
+        Returns a list of values, one for each input field.  The value for
+        a field is either its initial (default) value or the last value
+        entered by the user that has been successfully validated.  An input
+        that fails validation is not returned by the validator from the
+        window.  For a non-editable field, its initial value is returned.
+
+        Blank input is returned as a value of None.
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(self.inputs[x].GetValidator().GetValidatedInputAlt())
+        return ret
+
+
+    def GetResultsRawInput(self):
+        """
+        Returns a list of strings corresponding to each input field.  These
+        are the current values from the text control widgets whether or not
+        they have passed validation.  All values are returned as raw strings
+        (i.e., they are not converted to floats or ints and leading and
+        trailing whitespace is not stripped).
+        """
+
+        ret = []
+        for x in range(self.item_cnt):
+            ret.append(str(self.inputs[x].GetValue()))
+        return ret
+
+
+    def OnOk(self, event):
+        """
+        This method gets called when the user presses the OK button.
+        It is intended to be subclassed if special processing is needed.
+        """
+
+        # Explicitly validate all input values before proceeding.  Although
+        # char-by-char validation would have warned the user about any invalid
+        # entries, the user could have pressed the OK button without making
+        # the corrections, so we'll do a full validation pass now.  The only
+        # purpose is to display an explicit error if any input fails validation.
+        if not self.Validate():
+            wx.MessageBox(caption="Data Entry Error",
+                          message=DATA_ENTRY_ERRMSG,
+                          style=wx.ICON_ERROR|wx.OK)
+            return  # keep the dialog box open
+
+        # When the wx.ID_OK event is skipped (to allow handlers up the chain to
+        # run), the Validate methods for all text control boxes will be called.
+        # If all report success, the TransferFromWindow methods will be called
+        # and the dialog box will close.  However, if any Validate method fails
+        # this process will stop and the dialog box will remain open allowing
+        # the user to either correct the problem(s) or cancel the dialog.
+        event.Skip()
+
+
+    def OnText(self, event):
+        """
+        This method is called each time a key stroke is entered in any text
+        control box.  It should be subclassed if special processing is needed.
+        The sample code below shows how to obtain the index of the box and its
+        value.  Note that the box's index is 0 to n, where n is the number of
+        input and combo boxes, not just the number of input boxes.
+
+        # Get index of the input box that triggered the event.
+        text_ctrl = event.GetEventObject()
+        for box_idx, box_instance in enumerate(self.inputs):
+            if text_ctrl is box_instance:
+                break
+        # Get the edited string.
+        text = text_ctrl.GetValue()
+        print "Field:", box_idx, text
+        """
+
+        # Run the validator bound to the text control box that has been edited.
+        # If the validation fails, the validator will highlight the input field
+        # to alert the user of the error.
+        text_ctrl = event.GetEventObject()
+        text_ctrl.GetValidator().Validate(text_ctrl)
+        event.Skip()
+
+
+    def OnComboBoxSelect(self, event):
+        """
+        This method is called each time a selection is made in any combo box.
+        It should be subclassed if the caller wants to perform some action in
+        response to a selection event.  The sample code below shows how to
+        obtain the index of the box, the index of the item selected, and the
+        value.  Note that the box's index is 0 to n, where n is the number of
+        combo and input boxes, not just the number of combo boxes.
+
+        # Get index of selected item in combo box dropdown list.
+        item_idx = event.GetSelection()
+        # Get index of combo box that triggered the event.
+        current_box = event.GetEventObject()
+        for box_idx, box_instance in enumerate(self.inputs):
+            if current_box is box_instance:
+                break
+        print "Combo:", box_idx, item_idx, self.itemlist[box_idx][3][item_idx]
+        """
+
+        # Run the validator bound to the combo box that has a selection event.
+        # This should not fail unless the combo options were setup incorrectly.
+        # If the validation fails, the validator will highlight the input field
+        # to alert the user of the error.
+        combo_box = event.GetEventObject()
+        combo_box.GetValidator().Validate(combo_box)
+        event.Skip()
+
+#==============================================================================
+
+class AppTestFrame(wx.Frame):
+    """
+    Interactively test both the InputListPanel and the InputListDialog classes.
+    Both will display the same input fields.  Enter invalid data to verify
+    char-by-char error processing.  Press the Submit and OK buttons with an
+    uncorrected highlighted field in error to generate a pop-up error box.
+    Resize the main window to see scroll bars disappear and reappear.
+    """
+
+    # Establish efault font and point size for test.
+    FONTNAME = "Arial"
+    if wx.Platform == "__WXMSW__":
+        FONTSIZE = 9
+    elif wx.Platform == "__WXMAC__":
+        FONTSIZE = 12
+    elif wx.Platform == "__WXGTK__":
+        FONTSIZE = 11
+    else:
+        FONTSIZE = 10
+
+    def __init__(self):
+        wx.Frame.__init__(self, parent=None, id=wx.ID_ANY,
+                          title="InputListPanel Test", size=(300, 600))
+        panel = wx.Panel(self, wx.ID_ANY, style=wx.RAISED_BORDER)
+        panel.SetBackgroundColour("PALE GREEN")
+
+        pt_size = panel.GetFont().GetPointSize()
+
+        # Define fields for both InputListPanel and InputListDialog to display.
+        self.fields = [
+            ["Integer (int, optional):", 12345, int, 'EH3', None,
+                "Test Header (2X-large)"],
+            # Test specification of integer default value as a string
+            ["Integer (int, optional):", "-60", int, 'E', None],
+            # Default value is null, so the required field should be highlighted
+            ["Integer (int, required):", "", int, 'RE', None],
+            ["Floating Point (float, optional):", 2.34567e-5, float, 'EHB1', None,
+                "Test Header (large, bold)"],
+            ["Floating Point (float, optional):", "", float, 'E', None],
+            ["Floating Point (float, required):", 1.0, float, 'RE', None],
+            # Test unknown datatype which should be treated as 'str'
+            ["String (str, optional):", "DANSE", "foo", 'EHU', None,
+                "Test Header (%dpt font, underlined)"%pt_size],
+            ["String (str, reqiured):", "delete me", str, 'RE', None],
+            ["Non-editable field:", "Cannot be changed!", str, '', None],
+            ["ComboBox String:", "Two", str, 'CREL', ("One", "Two", "Three")],
+            # ComboBox items must be specified as strings
+            ["ComboBox Integer:", "", int, 'CE', ("100", "200", "300")],
+            ["String (alphabetic):", "Aa", "str_alpha", 'E', None],
+            ["String (alphanumeric):", "Aa1", "str_alnum", 'E', None],
+            ["String (A-Z, a-z, 0-9, _, -):", "A-1_a", "str_id", 'E', None],
+                      ]
+
+        # Create the scrolled window with input boxes.  Due to the intentionally
+        # small size of the parent panel, both scroll bars should be displayed.
+        self.scrolled = InputListPanel(parent=panel, itemlist=self.fields,
+                                       align=True)
+
+        # Create a button to request the popup dialog box.
+        show_button = wx.Button(panel, wx.ID_ANY, "Show Pop-up Dialog Box")
+        self.Bind(wx.EVT_BUTTON, self.OnShow, show_button)
+
+        # Create a button to signal end of user edits and one to exit program.
+        submit_button = wx.Button(panel, wx.ID_ANY, "Submit")
+        self.Bind(wx.EVT_BUTTON, self.OnSubmit, submit_button)
+        exit_button = wx.Button(panel, wx.ID_ANY, "Exit")
+        self.Bind(wx.EVT_BUTTON, self.OnExit, exit_button)
+
+        # Create a horizontal sizer for the buttons.
+        button_sizer = wx.BoxSizer(wx.HORIZONTAL)
+        button_sizer.Add((10,20), 1)  # stretchable whitespace
+        button_sizer.Add(submit_button, 0)
+        button_sizer.Add((10,20), 0)  # non-stretchable whitespace
+        button_sizer.Add(exit_button, 0)
+
+        # Create a vertical box sizer for the panel and layout widgets in it.
+        box_sizer = wx.BoxSizer(wx.VERTICAL)
+        box_sizer.Add(show_button, 0, wx.ALIGN_CENTER|wx.ALL, border=10)
+        box_sizer.Add(self.scrolled, 1, wx.EXPAND|wx.ALL, border=10)
+        box_sizer.Add(button_sizer, 0, wx.EXPAND|wx.BOTTOM|wx.ALL, border=10)
+
+        # Associate the sizer with its container.
+        panel.SetSizer(box_sizer)
+        box_sizer.Fit(panel)
+
+
+    def OnShow(self, event):
+        # Display the same fields shown in the frame in a pop-up dialog box.
+        pt_size = self.FONTSIZE
+        self.fields[6][5] = "Test Header (%dpt font, underlined)"%pt_size
+        dlg = InputListDialog(parent=self,
+                              title="InputListDialog Test",
+                              itemlist=self.fields,
+                              align=True,
+                              fontsize=self.FONTSIZE)
+        if dlg.ShowModal() == wx.ID_OK:
+            print("****** Dialog Box results from validated input fields:")
+            print("  ", dlg.GetResults())
+            print("****** Dialog Box results from validated input fields" +\
+                  " (None if no input):")
+            print("  ", dlg.GetResultsAltFormat())
+            print("****** Dialog Box results from raw input fields:")
+            print("  ", dlg.GetResultsRawInput())
+        dlg.Destroy()
+
+
+    def OnSubmit(self, event):
+        # Explicitly validate all input parameters before proceeding.  Even
+        # though char-by-char validation would have warned the user about any
+        # invalid entries, the user could have pressed the Done button without
+        # making the corrections, so a full validation pass is necessary.
+        if not self.scrolled.Validate():
+            wx.MessageBox(caption="Data Entry Error",
+                message="Please correct the highlighted fields in error.",
+                style=wx.ICON_ERROR|wx.OK)
+            return  # keep the dialog box open
+        print("****** Scrolled Panel results from validated input fields:")
+        print("  ", self.scrolled.GetResults())
+        print("****** Scrolled Panel results from validated input fields" +\
+              " (None if no input):")
+        print("  ", self.scrolled.GetResultsAltFormat())
+        print("****** Scrolled Panel results from raw input fields:")
+        print("  ", self.scrolled.GetResultsRawInput())
+
+
+    def OnExit(self, event):
+        # Terminate the program.
+        self.Close()
+
+#==============================================================================
+
+if __name__ == '__main__':
+    # Interactively test both the InputListPanel and the InputListDialog classes.
+    app = wx.PySimpleApp()
+    frame = AppTestFrame()
+    frame.Show(True)
+    app.MainLoop()
diff --git a/bumps/gui/log_view.py b/bumps/gui/log_view.py
new file mode 100755
index 0000000..377fc4a
--- /dev/null
+++ b/bumps/gui/log_view.py
@@ -0,0 +1,51 @@
+import wx
+
+IS_MAC = (wx.Platform == '__WXMAC__')
+
+class LogView(wx.Panel):
+    title = 'Log'
+    default_size = (600,200)
+    def __init__(self, *args, **kw):
+        wx.Panel.__init__(self, *args, **kw)
+
+        self.log_info = []
+        vsizer = wx.BoxSizer(wx.VERTICAL)
+
+        self.progress = wx.TextCtrl(self,-1,style=wx.TE_MULTILINE|wx.HSCROLL)
+        self._redraw()
+
+        vsizer.Add(self.progress, 1, wx.EXPAND)
+
+        self.SetSizer(vsizer)
+        vsizer.Fit(self)
+        self.SetAutoLayout(True)
+        #self.SetupScrolling()
+
+        self.Bind(wx.EVT_SHOW, self.OnShow)
+
+    def OnShow(self, event):
+        if not event.Show: return
+        #print "showing log"
+        if self._need_redraw:
+            #print "-redraw"
+            self._redraw()
+
+    def get_state(self):
+        return self.log_info
+    def set_state(self, state):
+        self.log_info = state
+        self._redraw()
+
+    def log_message(self, message):
+        if len(self.log_info) > 1000:
+            del self.log_info[:-1000]
+        self.log_info.append(message)
+        self._redraw()
+
+    def _redraw(self):
+        if not IS_MAC and not self.IsShown():
+            self._need_redraw = True
+        else:
+            self._need_redraw = False
+            self.progress.Clear()
+            self.progress.AppendText("\n".join(self.log_info))
diff --git a/bumps/gui/parameter_view.py b/bumps/gui/parameter_view.py
new file mode 100755
index 0000000..8e089f5
--- /dev/null
+++ b/bumps/gui/parameter_view.py
@@ -0,0 +1,283 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: Nikunj Patel
+
+"""
+This module implements the Parameter View panel.
+"""
+
+#==============================================================================
+
+import wx
+
+import wx.gizmos as gizmos
+
+from ..parameter import BaseParameter
+from .util import nice
+from . import signal
+
+
+IS_MAC = (wx.Platform == '__WXMAC__')
+
+class ParameterView(wx.Panel):
+    title = 'Parameters'
+    default_size = (640,500)
+    def __init__(self, *args, **kw):
+        wx.Panel.__init__(self, *args, **kw)
+
+        #sizers
+        vbox = wx.BoxSizer(wx.VERTICAL)
+        text_hbox = wx.BoxSizer(wx.HORIZONTAL)
+
+        self.tree = gizmos.TreeListCtrl(self, -1, style =
+                                        wx.TR_DEFAULT_STYLE
+                                        | wx.TR_HAS_BUTTONS
+                                        | wx.TR_TWIST_BUTTONS
+                                        | wx.TR_ROW_LINES
+                                        #| wx.TR_COLUMN_LINES
+                                        | wx.TR_NO_LINES
+                                        | wx.TR_FULL_ROW_HIGHLIGHT
+                                       )
+
+        # Create columns.
+        self.tree.AddColumn("Model")
+        self.tree.AddColumn("Parameter")
+        self.tree.AddColumn("Value")
+        self.tree.AddColumn("Minimum")
+        self.tree.AddColumn("Maximum")
+        self.tree.AddColumn("Fit?")
+
+        # Align the textctrl box with treelistctrl.
+        self.tree.SetMainColumn(0) # the one with the tree in it...
+        self.tree.SetColumnWidth(0, 180)
+        self.tree.SetColumnWidth(1, 150)
+        self.tree.SetColumnWidth(2, 73)
+        self.tree.SetColumnWidth(3, 73)
+        self.tree.SetColumnWidth(4, 73)
+        self.tree.SetColumnWidth(5, 40)
+
+        # Determine which colunms are editable.
+        self.tree.SetColumnEditable(0, False)
+        self.tree.SetColumnEditable(1, False)
+        self.tree.SetColumnEditable(2, True)
+        self.tree.SetColumnEditable(3, True)
+        self.tree.SetColumnEditable(4, True)
+        self.tree.SetColumnEditable(5, False)
+
+        self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
+        self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnEndEdit)
+        '''
+        self.tree.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP,self.OnTreeTooltip)
+        wx.EVT_MOTION(self.tree, self.OnMouseMotion)
+        '''
+
+        vbox.Add(self.tree, 1, wx.EXPAND)
+        self.SetSizer(vbox)
+        self.SetAutoLayout(True)
+
+        self._need_update_parameters = self._need_update_model = False
+        self.Bind(wx.EVT_SHOW, self.OnShow)
+
+    # ============= Signal bindings =========================
+
+    '''
+    def OnTreeTooltip(self, event):
+         itemtext = self.tree.GetItemText(event.GetItem())
+         event.SetToolTip("This is a ToolTip for %s!" % itemtext)
+         event.Skip()
+
+    def OnMouseMotion(self, event):
+        pos = event.GetPosition()
+        item, flags, col = self.tree.HitTest(pos)
+
+        if wx.TREE_HITTEST_ONITEMLABEL:
+            self.tree.SetToolTipString("tool tip")
+        else:
+            self.tree.SetToolTipString("")
+
+        event.Skip()
+    '''
+
+    def OnShow(self, event):
+        if not event.Show: return
+        #print "showing parameter"
+        if self._need_update_model:
+            #print "-model update"
+            self.update_model(self.model)
+        elif self._need_update_parameters:
+            #print "-parameter update"
+            self.update_parameters(self.model)
+        event.Skip()
+
+    # ============ Operations on the model  ===============
+    def get_state(self):
+        return self.model
+    def set_state(self, state):
+        self.set_model(state)
+
+    def set_model(self, model):
+        self.model = model
+        self.update_model(model)
+
+    def update_model(self, model):
+        if self.model != model: return
+
+        if not IS_MAC and not self.IsShown():
+            self._need_update_model = True
+        else:
+            self._need_update_model = self._need_update_parameters = False
+            self._update_model()
+
+    def update_parameters(self, model):
+        if self.model != model: return
+        if not IS_MAC and not self.IsShown():
+            self._need_update_parameters = True
+        else:
+            self._need_update_parameters = False
+            self._update_tree_nodes()
+
+    def _update_model(self):
+        # Delete the previous tree (if any).
+        self.tree.DeleteAllItems()
+        if self.model is None: return
+        parameters = self.model.model_parameters()
+        # Add a root node.
+        self.root = self.tree.AddRoot("Model")
+        # Add nodes from our data set .
+        self._add_tree_nodes(self.root, parameters)
+        self._update_tree_nodes()
+        self.tree.ExpandAll(self.root)
+
+    def _add_tree_nodes(self, branch, nodes):
+        if isinstance(nodes,dict) and nodes != {}:
+            for k in sorted(nodes.keys()):
+                child = self.tree.AppendItem(branch, k)
+                self._add_tree_nodes(child,nodes[k])
+        elif ( ( isinstance(nodes, tuple) and nodes != () ) or
+              ( isinstance(nodes, list) and nodes != [] ) ):
+            for i,v in enumerate(nodes):
+                child = self.tree.AppendItem(branch, '[%d]'%i)
+                self._add_tree_nodes(child,v)
+
+        elif isinstance(nodes, BaseParameter):
+            self.tree.SetItemPyData(branch, nodes)
+
+    def _update_tree_nodes(self):
+        node = self.tree.GetRootItem()
+        while node.IsOk():
+            self._set_leaf(node)
+            node = self.tree.GetNext(node)
+
+    def _set_leaf(self, branch):
+        par = self.tree.GetItemPyData(branch)
+        if par is None: return
+
+        if par.fittable:
+            if par.fixed:
+                fitting_parameter = 'No'
+                low, high = '', ''
+            else:
+                fitting_parameter = 'Yes'
+                low, high = (str(v) for v in par.bounds.limits)
+        else:
+            fitting_parameter = ''
+            low, high = '', ''
+
+        self.tree.SetItemText(branch, str(par.name), 1)
+        self.tree.SetItemText(branch, str(nice(par.value)), 2)
+        self.tree.SetItemText(branch, low, 3)
+        self.tree.SetItemText(branch, high, 4)
+        self.tree.SetItemText(branch, fitting_parameter, 5)
+
+    def OnRightUp(self, evt):
+        pos = evt.GetPosition()
+        branch, flags, column = self.tree.HitTest(pos)
+        if column == 5:
+            par = self.tree.GetItemPyData(branch)
+            if par is None: return
+
+            if par.fittable:
+                fitting_parameter = self.tree.GetItemText(branch, column)
+                if fitting_parameter == 'No':
+                    par.fixed = False
+                    fitting_parameter = 'Yes'
+                    low, high = (str(v) for v in par.bounds.limits)
+                elif fitting_parameter == 'Yes':
+                    par.fixed = True
+                    fitting_parameter = 'No'
+                    low, high = '', ''
+
+                self.tree.SetItemText(branch, low, 3)
+                self.tree.SetItemText(branch, high, 4)
+                self.tree.SetItemText(branch, fitting_parameter, 5)
+                signal.update_model(model=self.model, dirty=False)
+
+    def OnEndEdit(self, evt):
+        item = self.tree.GetSelection()
+        self.node_object = self.tree.GetItemPyData(evt.GetItem())
+        # TODO: Not an efficient way of updating values of Parameters
+        # but it is hard to find out which column changed during edit
+        # operation. This may be fixed in the future.
+        wx.CallAfter(self.get_new_name, item, 1)
+        wx.CallAfter(self.get_new_value, item, 2)
+        wx.CallAfter(self.get_new_min, item, 3)
+        wx.CallAfter(self.get_new_max, item, 4)
+
+    def get_new_value(self, item, column):
+        new_value = self.tree.GetItemText(item, column)
+
+        # Send update message to other tabs/panels only if parameter value
+        # is updated .
+        if new_value != str(self.node_object.value):
+            self.node_object.clip_set(float(new_value))
+            signal.update_parameters(model=self.model)
+
+    def get_new_name(self, item, column):
+        new_name = self.tree.GetItemText(item, column)
+
+        # Send update message to other tabs/panels only if parameter name
+        # is updated.
+        if new_name != str(self.node_object.name):
+            self.node_object.name = new_name
+            signal.update_model(model=self.model, dirty=False)
+
+    def get_new_min(self, item, column):
+        low = self.tree.GetItemText(item, column)
+        if low == '': return
+        low = float(low)
+        high = self.node_object.bounds.limits[1]
+
+        # Send update message to other tabs/panels only if parameter min range
+        # value is updated.
+        if low != self.node_object.bounds.limits[0]:
+            self.node_object.range(low, high)
+            signal.update_model(model=self.model, dirty=False)
+
+    def get_new_max(self, item, column):
+        high = self.tree.GetItemText(item, column)
+        if high == '': return
+        low = self.node_object.bounds.limits[0]
+        high = float(high)
+        # Send update message to other tabs/panels only if parameter max range
+        # value is updated.
+        if high != self.node_object.bounds.limits[1]:
+            self.node_object.range(low, high)
+            signal.update_model(model=self.model, dirty=False)
diff --git a/bumps/gui/plot_view.py b/bumps/gui/plot_view.py
new file mode 100644
index 0000000..c991c17
--- /dev/null
+++ b/bumps/gui/plot_view.py
@@ -0,0 +1,220 @@
+from __future__ import with_statement
+
+import wx
+IS_MAC = (wx.Platform == '__WXMAC__')
+
+from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
+from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Toolbar
+
+# The Figure object is used to create backend-independent plot representations.
+from matplotlib.figure import Figure
+
+
+from .util import EmbeddedPylab
+
+
+class PlotView(wx.Panel):
+    title = 'Plot'
+    default_size = (600, 400)
+
+    def __init__(self, *args, **kw):
+        wx.Panel.__init__(self, *args, **kw)
+
+        # Can specify name on
+        if 'title' in kw:
+            self.title = kw['title']
+
+        # Instantiate a figure object that will contain our plots.
+        figure = Figure(figsize=(1,1), dpi=72)
+
+        # Initialize the figure canvas, mapping the figure object to the plot
+        # engine backend.
+        canvas = FigureCanvas(self, wx.ID_ANY, figure)
+
+        # Wx-Pylab magic ...
+        # Make our canvas an active figure manager for pylab so that when
+        # pylab plotting statements are executed they will operate on our
+        # canvas and not create a new frame and canvas for display purposes.
+        # This technique allows this application to execute code that uses
+        # pylab stataments to generate plots and embed these plots in our
+        # application window(s).  Use _activate_figure() to set.
+        self.pylab_interface = EmbeddedPylab(canvas)
+
+        # Instantiate the matplotlib navigation toolbar and explicitly show it.
+        mpl_toolbar = Toolbar(canvas)
+        mpl_toolbar.Realize()
+
+        # Create a vertical box sizer to manage the widgets in the main panel.
+        sizer = wx.BoxSizer(wx.VERTICAL)
+        sizer.Add(canvas, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, border=0)
+        sizer.Add(mpl_toolbar, 0, wx.EXPAND|wx.ALL, border=0)
+
+        # Associate the sizer with its container.
+        self.SetSizer(sizer)
+        sizer.Fit(self)
+
+        self._calculating = False
+        self._need_plot = self._need_newmodel = False
+        self.Bind(wx.EVT_SHOW, self.OnShow)
+        self.plot_state = None
+        self.model = None
+
+
+        '''
+        # Add context menu and keyboard support to canvas
+        canvas.Bind(wx.EVT_RIGHT_DOWN, self.OnContextMenu)
+        #canvas.Bind(wx.EVT_LEFT_DOWN, lambda evt: canvas.SetFocus())
+
+
+        # Status bar
+        frame = self.GetTopLevelParent()
+        self.statusbar = frame.GetStatusBar()
+        if self.statusbar is None:
+            self.statusbar = frame.CreateStatusBar()
+        status_update = lambda msg: self.statusbar.SetStatusText(msg)
+
+        canvas.mpl_connect('motion_notify_event', self.OnMotion),
+        '''
+
+    '''
+    def OnContextMenu(self,event):
+        """
+        Forward the context menu invocation to profile, if profile exists.
+        """
+        transform = self.axes.transData
+        sx,sy = event.GetX(), event.GetY()
+        data_x,data_y = pixel_to_data(transform, sx, self.fig.bbox.height-sy)
+
+        popup = wx.Menu()
+        item = popup.Append(wx.ID_ANY,'&Grid on/off', 'Toggle grid lines')
+        wx.EVT_MENU(self, item.GetId(),
+                    lambda _: (self.axes.grid(),self.fig.canvas.draw_idle()))
+        self.PopupMenu(popup, (sx,sy))
+        return False
+
+    def update_cursor(self, x, y):
+        def nice(value, range):
+            place = int(math.log10(abs(range[1]-range[0]))-3)
+            #print value,range,place
+            if place<0: return "%.*f"%(-place,value)
+            else: return "%d"%int(value)
+        self.status_update("x:%s  y:%s"
+                           %( nice(x, self.axes.get_xlim()),
+                              nice(y, self.axes.get_ylim())))
+
+    def OnMotion(self, event):
+        """Respond to motion events by changing the active layer."""
+
+        # Force data coordinates for the mouse position
+        transform = self.axes.transData
+        x,y = pixel_to_data(transform, event.x, event.y)
+        self.update_cursor(x,y)
+    '''
+
+    def OnShow(self, event):
+        #print "theory show"
+        if not event.Show:
+            return
+        #print "showing theory"
+        if self._need_newmodel:
+            self._redraw(newmodel=True)
+        elif self._need_plot:
+            self._redraw(newmodel=False)
+
+
+    def set_model(self, model):
+        self.model = model
+        if not IS_MAC and not self.IsShown():
+            self._need_newmodel = True
+        else:
+            self._redraw(newmodel=True)
+
+    def update_model(self, model):
+        #print "profile update model"
+        if self.model != model:  # ignore updates to different models
+            return
+
+        if not IS_MAC and not self.IsShown():
+            self._need_newmodel = True
+        else:
+            self._redraw(newmodel=True)
+
+    def update_parameters(self, model):
+        #print "profile update parameters"
+        if self.model != model: return
+
+        if not IS_MAC and not self.IsShown():
+            self._need_plot = True
+        else:
+            self._redraw(newmodel=self._need_newmodel)
+
+    def _redraw(self, newmodel=False):
+        self._need_newmodel = newmodel
+        if self._calculating:
+            # That means that I've entered the thread through a
+            # wx.Yield for the currently executing redraw.  I need
+            # to cancel the running thread and force it to start
+            # the calculation over.
+            self.cancel_calculation = True
+            #print "canceling calculation"
+            return
+
+        with self.pylab_interface as pylab:
+            self._calculating = True
+
+            #print "calling again"
+            while True:
+                #print "restarting"
+                # We are restarting the calculation, so clear the reset flag
+                self.cancel_calculation = False
+
+                if self._need_newmodel:
+                    self.newmodel()
+                    if self.cancel_calculation:
+                        continue
+                    self._need_newmodel = False
+                self.plot()
+                if self.cancel_calculation:
+                    continue
+                pylab.draw()
+                break
+        self._need_plot = False
+        self._calculating = False
+
+    def get_state(self):
+        #print "returning state",self.model,self.plot_state
+        return self.model, self.plot_state
+
+    def set_state(self, state):
+        self.model, self.plot_state = state
+        #print "setting state",self.model,self.plot_state
+        self.plot()
+
+    def menu(self):
+        """
+        Return a model specific menu
+        """
+        return None
+
+    def newmodel(self, model=None):
+        """
+        New or updated model structure.  Do any sort or precalculation you
+        need.  plot will be called separately when you are done.
+
+        For long calculations, periodically perform wx.YieldIfNeeded()
+        and then if self.cancel_calculation is True, return from the plot.
+        """
+        pass
+
+    def plot(self):
+        """
+        Plot to the current figure.  If model has a plot method,
+        just use that.
+
+        For long calculations, periodically perform wx.YieldIfNeeded()
+        and then if self.cancel_calculation is True, return from the plot.
+        """
+        if hasattr(self.model, 'plot'):
+            self.model.plot()
+        else:
+            raise NotImplementedError("PlotPanel needs a plot method")
diff --git a/bumps/gui/resfiles.py b/bumps/gui/resfiles.py
new file mode 100644
index 0000000..b8df115
--- /dev/null
+++ b/bumps/gui/resfiles.py
@@ -0,0 +1,157 @@
+"""
+Package data files
+==================
+
+Some python packages, particularly gui packages, but also other packages
+with static data need to be able to ship the data with the package.  This
+is particularly a problem for py2exe since python does not provide any
+facilities for extracting the data files from the bundled exe at runtime.
+Instead, the setup.py process needs to ask for the package data files so
+it can bundle them separately from the exe.  When the application is running
+it will need to find the resource files so that it can load them, regardless
+if it is running directly from the source tree, from an installed package
+or from an exe or app.
+
+The Resources class handles both the setup and the runtime requirements
+for package resources.  You will need to put the resources in a path
+within your source tree and initialize a Resources object with the necessary
+information.  For example, assuming the resources are in a subdirectory
+named "resources", and this package is stored as resfiles.py in the parent,
+you would set resources/__init__.py as follows::
+
+    from ..resfiles import Resources
+    resources = Resources(package=__name__,
+                          patterns=('*.png', '*.jpg', '*.ico', '*.wav'),
+                          datadir='bumps-data',
+                          check_file='reload.png',
+                          env='BUMPS_DATA')
+
+Now a resource file such as 'reload.png' can be accessed from a parent module
+using::
+
+    from .resources import resources
+    resources.get_path('reload.png')
+
+"""
+import sys
+import os
+import glob
+
+
+class Resources(object):
+    """
+    Identify project resource files.
+
+    *package* : string
+        Name of the subpackage containing the resources.  From the __init__.py
+        file, for the resource directory, this is just __name__.
+    *patterns* : list or tuple
+        Set of glob patterns used to identify resource files in the resource
+        package.
+    *datadir* : string
+        Name of the installed resource directory.  This is used in setup to
+        prepare the resource directory and in the application to locate the
+        resources that have been installed.
+    *check_file*: string
+        Name of a resource file that should be in the resource directory.  This
+        is used to check that the resource directory exists in the installed
+        application.
+    *env* : string (optional)
+        Environment variable which contains the complete path to the resource
+        directory.  The environment variable overrides other methods of
+        accessing the resources except running directly from the source tree.
+    """
+    def __init__(self, package, patterns, datadir, check_file, env=None):
+        self.package = package
+        self.patterns = patterns
+        self.datadir = datadir
+        self.check_file = check_file
+        self.env = env
+        self._cached_path = None
+
+    def package_data(self):
+        """
+        Return the data files associated with the package.
+
+        The format is a dictionary of {'fully.qualified.package', [files...]}
+        used directly in the setup script as::
+
+            setup(...,
+                  package_data=package_data(),
+                  ...)
+        """
+        return { self.package: list(self.patterns) }
+
+    def data_files(self):
+        """
+        Return the data files associated with the package.
+
+        The format is a list of (directory, [files...]) pairs which can be
+        used directly in the py2exe setup script as::
+
+            setup(...,
+                  data_files=data_files(),
+                  ...)
+
+        Unlike package_data(), which only works from the source tree, data_files
+        uses installed data path to locate the resources.
+        """
+        data_files = [(self.datadir, self._finddata(*self.patterns))]
+        return data_files
+
+    def _finddata(self, *patterns):
+        path = self.resource_dir()
+        files = []
+        for p in patterns:
+            files += glob.glob(os.path.join(path,p))
+        return files
+
+    def resource_dir(self):
+        """
+        Return the path to the application data.
+
+        This is either in an environment variable, in the source tree next to
+        this file, or beside the executable.  The environment key can be set
+        using
+        """
+        # If we already found it, then we are done
+        if self._cached_path is not None:
+            return self._cached_path
+
+        # Check for data in the package itself (which will be the case when
+        # we are running from the source tree).
+        path = os.path.abspath(os.path.dirname(sys.modules[self.package].__file__))
+        if self._cache_resource_path(path):
+            return self._cached_path
+
+        # Check for data path in the environment.  If the environment variable
+        # is specified, then the resources have to be there, or the program fails.
+        if self.env and self.env in os.environ:
+            if not self._cache_resource_path(os.environ[self.env]):
+                raise RuntimeError('Environment %s not a directory'%self.env)
+            return self._cached_path
+
+        # Check for data next to exe/zip file.
+        exepath = os.path.dirname(sys.executable)
+        path = os.path.join(exepath,self.datadir)
+        if self._cache_resource_path(path):
+            return self._cached_path
+
+        # py2app puts the data in Contents/Resources, but the executable
+        # is in Contents/MacOS.
+        path = os.path.join(exepath,'..','Resources',self.datadir)
+        if self._cache_resource_path(path):
+            return self._cached_path
+
+        raise RuntimeError('Could not find the GUI data files')
+
+    def _cache_resource_path(self, path):
+        if os.path.exists(os.path.join(path,self.check_file)):
+            self._cached_path = path
+            return True
+        else:
+            return False
+
+    def get_path(self, filename):
+        return os.path.join(self.resource_dir(),filename)
+
diff --git a/bumps/gui/resources/__init__.py b/bumps/gui/resources/__init__.py
new file mode 100644
index 0000000..4335b2c
--- /dev/null
+++ b/bumps/gui/resources/__init__.py
@@ -0,0 +1,9 @@
+
+# Identify the resource files for the bumps GUI
+from ..resfiles import Resources
+resources = Resources(package=__name__,
+                      patterns=('*.png', '*.jpg', '*.ico', '*.wav'),
+                      datadir='bumps-data',
+                      check_file='reload.png',
+                      env='BUMPS_DATA')
+del Resources
diff --git a/bumps/gui/resources/bumps.ico b/bumps/gui/resources/bumps.ico
new file mode 100644
index 0000000..37e4ecc
Binary files /dev/null and b/bumps/gui/resources/bumps.ico differ
diff --git a/bumps/gui/resources/bumps_splash.jpg b/bumps/gui/resources/bumps_splash.jpg
new file mode 100644
index 0000000..9ac8332
Binary files /dev/null and b/bumps/gui/resources/bumps_splash.jpg differ
diff --git a/bumps/gui/resources/done.wav b/bumps/gui/resources/done.wav
new file mode 100644
index 0000000..1546a5d
Binary files /dev/null and b/bumps/gui/resources/done.wav differ
diff --git a/bumps/gui/resources/import_script.png b/bumps/gui/resources/import_script.png
new file mode 100755
index 0000000..8755c17
Binary files /dev/null and b/bumps/gui/resources/import_script.png differ
diff --git a/bumps/gui/resources/reload.png b/bumps/gui/resources/reload.png
new file mode 100755
index 0000000..3bd9723
Binary files /dev/null and b/bumps/gui/resources/reload.png differ
diff --git a/bumps/gui/resources/start_fit.png b/bumps/gui/resources/start_fit.png
new file mode 100755
index 0000000..d54358e
Binary files /dev/null and b/bumps/gui/resources/start_fit.png differ
diff --git a/bumps/gui/resources/stop_fit.png b/bumps/gui/resources/stop_fit.png
new file mode 100755
index 0000000..60d06f6
Binary files /dev/null and b/bumps/gui/resources/stop_fit.png differ
diff --git a/bumps/gui/signal.py b/bumps/gui/signal.py
new file mode 100644
index 0000000..d93c482
--- /dev/null
+++ b/bumps/gui/signal.py
@@ -0,0 +1,61 @@
+"""
+Signals changes to the model that need to be reflected in the views.
+
+In practice, the main window is the only listener, and it forwards the
+messages to the appropriate views.
+"""
+import wx
+import wx.py.dispatcher
+from wx.py.dispatcher import send
+
+# export the connect function; do it this way so that linters don't complain
+# about unused imports
+connect = wx.py.dispatcher.connect
+
+
+def model_new(model):
+    """
+    Inform all views that a new model is available.
+    """
+    wx.CallAfter(send, 'model.new', model=model)
+
+
+def update_model(model, dirty=True):
+    """
+    Inform all views that the model structure has changed.  This calls
+    model.model_reset() to reset the fit parameters and constraints.
+    """
+    model.model_reset()  #
+    if dirty:
+        model.model_update()
+    wx.CallAfter(send, 'model.update_structure', model=model)
+
+
+_DELAYED_SIGNAL = {}
+def update_parameters(model, delay=100):
+    """
+    Inform all views that the model has changed.  Note that if the model
+    is changing rapidly, then the signal will be delayed for a time.  This
+    calls model.model_update() to let the model know that it needs to be
+    recalculated.
+    """
+    # signaller is responsible for marking the model as needing recalculation
+    model.model_update()
+    # TODO: potential race condition
+    # Future call mat be occurring at the time that restart is triggered.
+    # Not sure we can do anything about it from outside wx...
+    signal = _DELAYED_SIGNAL.get(model, None)
+    if signal is not None:
+        # signal is already active, so delay it some more
+        signal.Restart(delay)
+    else:
+        # activate a new signal, and call when back at GUI loop
+        def _send_signal():
+            #print "sending update parameters",model
+            del _DELAYED_SIGNAL[model]
+            wx.CallAfter(send, 'model.update_parameters', model=model)
+        _DELAYED_SIGNAL[model] = wx.FutureCall(delay, _send_signal)
+
+
+def log_message(message):
+    wx.CallAfter(send, 'log', message=message)
diff --git a/bumps/gui/summary_view.py b/bumps/gui/summary_view.py
new file mode 100755
index 0000000..5db7830
--- /dev/null
+++ b/bumps/gui/summary_view.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: Nikunj Patel
+
+"""
+This module implements the Summary View panel.
+"""
+
+#==============================================================================
+
+from __future__ import division
+import wx
+
+import  wx.lib.scrolledpanel as scrolled
+
+from .util import nice
+from . import signal
+
+IS_MAC = (wx.Platform == '__WXMAC__')
+
+NUMPIX = 400
+NUMTICKS = NUMPIX*5-1
+
+class SummaryView(scrolled.ScrolledPanel):
+    """
+    Model view showing summary of fit (only fittable parameters).
+    """
+    title = 'Summary'
+    default_size = (600,500)
+    def __init__(self, *args, **kw):
+        scrolled.ScrolledPanel.__init__(self, *args, **kw)
+
+        self.display_list = []
+
+        self.sizer = wx.GridBagSizer(hgap=0, vgap=3)
+        self.SetSizer(self.sizer)
+        self.sizer.Fit(self)
+
+        self.SetAutoLayout(True)
+        self.SetupScrolling()
+
+        self._need_update_parameters = self._need_update_model = False
+        self.Bind(wx.EVT_SHOW, self.OnShow)
+
+    def OnShow(self, event):
+        #print "show event"
+        if not event.Show: return
+        #print "showing summary"
+        if self._need_update_model:
+            #print "-update_model"
+            self.update_model(self.model)
+        elif self._need_update_parameters:
+            #print "-update_parameters"
+            self.update_parameters(self.model)
+        event.Skip()
+
+    # ============ Operations on the model  ===============
+
+    def get_state(self):
+        return self.model
+    def set_state(self, state):
+        self.set_model(state)
+
+    def set_model(self, model):
+        self.model = model
+        self.update_model(model)
+
+    def update_model(self, model):
+        if self.model != model: return
+
+        if not IS_MAC and not self.IsShown():
+            #print "summary not shown config"
+            self._need_update_model = True
+        else:
+            #print "summary shown config"
+            self._update_model()
+            self._need_update_parameters = self._need_update_model = False
+
+    def update_parameters(self, model):
+        if self.model != model: return
+        if not IS_MAC and not self.IsShown():
+            #print "summary not shown update"
+            self._need_update_parameters = True
+        else:
+            #print "summary shown upate"
+            self._need_update_parameters = False
+            self._update_parameters()
+
+    def _update_model(self):
+        #print "drawing"
+        self.sizer.Clear(deleteWindows=True)
+        self.display_list = []
+
+        self.layer_label = wx.StaticText(self, wx.ID_ANY, 'Fit Parameter',
+                                         size=(160,-1))
+        self.slider_label = wx.StaticText(self, wx.ID_ANY, '',
+                                         size=(100,-1))
+        self.value_label = wx.StaticText(self, wx.ID_ANY, 'Value',
+                                         size=(100,-1))
+        self.low_label = wx.StaticText(self, wx.ID_ANY, 'Minimum',
+                                         size=(100,-1))
+        self.high_label = wx.StaticText(self, wx.ID_ANY, 'Maximum',
+                                         size=(100,-1))
+
+        hbox = wx.BoxSizer(wx.HORIZONTAL)
+        hbox.Add(self.layer_label, 0, wx.LEFT, 1)
+        hbox.Add(self.slider_label, 0, wx.LEFT, 1)
+        hbox.Add(self.value_label, 0, wx.LEFT, 21)
+        hbox.Add(self.low_label, 0, wx.LEFT, 1)
+        hbox.Add(self.high_label, 0, wx.LEFT, 1)
+
+        # Note that row at pos=(0,0) is not used to add a blank row.
+        self.sizer.Add(hbox, pos=(1,0))
+
+        line = wx.StaticLine(self, wx.ID_ANY)
+        self.sizer.Add(line, pos=(2,0), flag=wx.EXPAND|wx.RIGHT, border=5)
+
+        # TODO: better interface to fittable parameters
+        if self.model is not None:
+            pars = self.model._parameters
+            #pars = sorted(pars, cmp=lambda x,y: cmp(x.name, y.name))
+            for p in pars:
+                self.display_list.append(ParameterSummary(self, p, self.model))
+
+        for index, item in enumerate(self.display_list):
+            self.sizer.Add(item, pos=(index+3,0))
+
+        self.SetupScrolling()
+        self.Layout()
+
+    def _update_parameters(self):
+        #print "updating"
+        for p in self.display_list:
+            p.update_slider()
+
+
+class ParameterSummary(wx.Panel):
+    """Build one parameter line for display."""
+    def __init__(self, parent, parameter, model):
+        wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
+
+        self.parameter = parameter
+        self.model = model
+
+        self.low, self.high = (v for v in self.parameter.bounds.limits)
+
+        text_hbox = wx.BoxSizer(wx.HORIZONTAL)
+
+        self.layer_name = wx.StaticText(self, wx.ID_ANY,
+                                        str(self.parameter.name),
+                                        size=(160,-1), style=wx.TE_LEFT)
+        self.slider = wx.Slider(self, wx.ID_ANY,
+                                value=0, minValue=0, maxValue=NUMPIX*5-1,
+                                size=(NUMPIX, 16), style=wx.SL_HORIZONTAL)
+        self.value = wx.StaticText(self, wx.ID_ANY, str(self.parameter.value),
+                                   size=(100,-1), style=wx.TE_LEFT)
+        self.min_range = wx.StaticText(self, wx.ID_ANY, str(self.low),
+                                       size=(100,-1), style=wx.TE_LEFT)
+        self.max_range = wx.StaticText(self, wx.ID_ANY, str(self.high),
+                                       size=(100,-1), style=wx.TE_LEFT)
+
+        # Add text strings and slider to sizer.
+        text_hbox.Add(self.layer_name, 0, wx.LEFT, 1)
+        text_hbox.Add(self.slider, 0, wx.LEFT, 1)
+        text_hbox.Add(self.value, 0, wx.LEFT, 21)
+        text_hbox.Add(self.min_range, 0, wx.LEFT, 1)
+        text_hbox.Add(self.max_range, 0, wx.LEFT, 1)
+
+        self.SetSizer(text_hbox)
+
+        self.slider.Bind(wx.EVT_SCROLL, self.OnScroll)
+        self.update_slider()
+
+    def update_slider(self):
+        slider_pos = int(self.parameter.bounds.get01(self.parameter.value)*NUMTICKS)
+        # Add line below if get01 doesn't protect against values out of range.
+        #slider_pos = min(max(slider_pos,0),100)
+        self.slider.SetValue(slider_pos)
+        self.value.SetLabel(str(nice(self.parameter.value)))
+
+        # Update new min and max range of values if changed.
+        newlow, newhigh = (v for v in self.parameter.bounds.limits)
+        if newlow != self.low:
+            self.min_range.SetLabel(str(newlow))
+
+        #if newhigh != self.high:
+        self.max_range.SetLabel(str(newhigh))
+
+    def OnScroll(self, event):
+        value = self.slider.GetValue()
+        new_value  = self.parameter.bounds.put01(value/NUMTICKS)
+        self.parameter.value = new_value
+        self.value.SetLabel(str(nice(new_value)))
+        signal.update_parameters(model=self.model, delay=1)
diff --git a/bumps/gui/uncertainty_view.py b/bumps/gui/uncertainty_view.py
new file mode 100644
index 0000000..0047a23
--- /dev/null
+++ b/bumps/gui/uncertainty_view.py
@@ -0,0 +1,101 @@
+from __future__ import with_statement
+
+from ..dream import views as dream_views
+from ..dream import stats as dream_stats
+from .. import errplot
+from .plot_view import PlotView
+
+
+class UncertaintyView(PlotView):
+    title = "Uncertainty"
+
+    def plot(self):
+        if not self.plot_state:
+            return
+        history, stats = self.plot_state
+        with self.pylab_interface as pylab:
+            pylab.clf()
+            dream_views.plot_vars(history.draw(), stats)
+            pylab.draw()
+
+    def update(self, state):
+        self.plot_state = state
+        self.plot()
+
+    def OnFitProgress(self, event):
+        if event.problem != self.model:
+            return
+        history = event.uncertainty_state
+        stats = dream_stats.var_stats(history.draw())
+        self.update((history,stats))
+
+
+class CorrelationView(PlotView):
+    title = "Correlations"
+
+    def plot(self):
+        if not self.plot_state:
+            return
+        # suppress correlation plot if too many variables
+        if self.plot_state.Nvar > 15:
+            return
+        history = self.plot_state
+        with self.pylab_interface as pylab:
+            pylab.clf()
+            dream_views.plot_corrmatrix(history.draw())
+            pylab.draw()
+
+    def update(self, state):
+        self.plot_state = state
+        self.plot()
+
+    def OnFitProgress(self, event):
+        if event.problem != self.model:
+            return
+        self.update(event.uncertainty_state)
+
+
+class TraceView(PlotView):
+    title = "Parameter Trace"
+
+    def plot(self):
+        if not self.plot_state:
+            return
+        history = self.plot_state
+        with self.pylab_interface as pylab:
+            pylab.clf()
+            dream_views.plot_trace(history)
+            pylab.draw()
+
+    def update(self, state):
+        self.plot_state = state
+        self.plot()
+
+    def OnFitProgress(self, event):
+        if event.problem != self.model:
+            return
+        self.plot_state = event.uncertainty_state
+        self.plot()
+
+
+class ModelErrorView(PlotView):
+    title = "Model Uncertainty"
+
+    def plot(self):
+        if not self.plot_state:
+            return
+        with self.pylab_interface as pylab:
+            pylab.clf()
+            # Won't get here if plot_state is None
+            errplot.show_errors(self.plot_state)
+            pylab.draw()
+
+    def OnFitProgress(self, event):
+        if event.problem != self.model:
+            return
+        self.update(event.problem, event.uncertainty_state)
+
+    def update(self, problem, state):
+        # Should happen in a separate process
+        self.plot_state = errplot.calc_errors_from_state(problem, state)
+        self.plot()
diff --git a/bumps/gui/util.py b/bumps/gui/util.py
new file mode 100755
index 0000000..481d73a
--- /dev/null
+++ b/bumps/gui/util.py
@@ -0,0 +1,105 @@
+"""
+Wx-Pylab magic for displaying plots within an application's window.
+"""
+from math import log10, floor
+import string
+
+import wx
+
+class EmbeddedPylab(object):
+    """
+    Define a 'with' context manager that lets you use pylab commands to
+    plot on an embedded canvas.  This is useful for wrapping existing
+    scripts in a GUI, and benefits from being more familiar than the
+    underlying object oriented interface.
+
+    As a convenience, the pylab module is returned on entry.
+
+    Example
+    -------
+
+    The following example shows how to use the WxAgg backend in a wx panel::
+
+        from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
+        from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
+        from matplotlib.figure import Figure
+
+        class PlotPanel(wx.Panel):
+            def __init__(self, *args, **kw):
+                wx.Panel.__init__(self, *args, **kw)
+
+                figure = Figure(figsize=(1,1), dpi=72)
+                canvas = FigureCanvas(self, wx.ID_ANY, figure)
+                self.pylab_interface = EmbeddedPylab(canvas)
+
+                # Instantiate the matplotlib navigation toolbar and explicitly show it.
+                mpl_toolbar = Toolbar(canvas)
+                mpl_toolbar.Realize()
+
+                # Create a vertical box sizer to manage the widgets in the main panel.
+                sizer = wx.BoxSizer(wx.VERTICAL)
+                sizer.Add(canvas, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, border=0)
+                sizer.Add(mpl_toolbar, 0, wx.EXPAND|wx.ALL, border=0)
+
+                # Associate the sizer with its container.
+                self.SetSizer(sizer)
+                sizer.Fit(self)
+
+            def plot(self, *args, **kw):
+                with self.pylab_interface as pylab:
+                    pylab.clf()
+                    pylab.plot(*args, **kw)
+
+    Similar patterns should work for the other backends.  Check the source code
+    in matplotlib.backend_bases.* for examples showing how to use matplotlib
+    with other GUI toolkits.
+    """
+    def __init__(self, canvas):
+        # delay loading pylab until matplotlib.use() is called
+        from matplotlib.backend_bases import FigureManagerBase
+        self.fm = FigureManagerBase(canvas, -1)
+    def __enter__(self):
+        # delay loading pylab until matplotlib.use() is called
+        import pylab
+        from matplotlib._pylab_helpers import Gcf
+        Gcf.set_active(self.fm)
+        return pylab
+    def __exit__(self, *args, **kw):
+        # delay loading pylab until matplotlib.use() is called
+        from matplotlib._pylab_helpers import Gcf
+        Gcf._activeQue = [f for f in Gcf._activeQue if f is not self.fm]
+        try:
+            del Gcf.figs[-1]
+        except KeyError:
+            pass
+
+class Validator(wx.PyValidator):
+    def __init__(self, flag):
+        wx.PyValidator.__init__(self)
+        self.flag = flag
+        self.Bind(wx.EVT_CHAR, self.OnChar)
+    def Clone(self):
+        return Validator(self.flag)
+    def Validate(self, win):
+        return True
+    def TransferToWindow(self):
+        return True
+    def TransferFromWindow(self):
+        return True
+    def OnChar(self, evt):
+        key = chr(evt.GetKeyCode())
+        if self.flag == "no-alpha" and key in string.letters:
+            return
+        if self.flag == "no-digit" and key in string.digits:
+            return
+        evt.Skip()
+
+def nice(v, digits=4):
+    """Fix v to a value with a given number of digits of precision"""
+    if v == 0.:
+        return v
+    else:
+        sign = v/abs(v)
+        place = floor(log10(abs(v)))
+        scale = 10**(place-(digits-1))
+        return sign*floor(abs(v)/scale+0.5)*scale
diff --git a/bumps/gui/utilities.py b/bumps/gui/utilities.py
new file mode 100644
index 0000000..0802f53
--- /dev/null
+++ b/bumps/gui/utilities.py
@@ -0,0 +1,468 @@
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# Author: James Krycka
+
+"""
+This module contains utility functions and classes for the application.
+"""
+
+#==============================================================================
+from __future__ import print_function
+
+import os
+import sys
+import time
+import glob
+
+import wx
+from wx.lib import delayedresult
+
+# Text string used to compare the string width in pixels for different fonts.
+# This benchmark string has 273 characters, containing 92 distinct characters
+# consisting of the lowercase alpha chars in the ratio used in an English
+# Scrabble(TM) set, two sets of uppercase alpha chars, two sets of digits,
+# special chars with multiples of commonly used ones, and many spaces to
+# approximate spacing between words in sentences and labels.
+BENCHMARK_TEXT =\
+"aaaaaaaaa bb cc dddd eeeeeeeeeeee ff ggg hh iiiiiiiii j k llll mm "\
+"nnnnnn oooooooo pp q rrrrrr ssss tttttt uuuu vv ww x yy z "\
+"ABCD EFGH IJKL MNOP QRST UVW XYZ ABCD EFGH IJKL MNOP QRST UVW XYZ "\
+"01234 56789 01234 56789 "\
+"...... :::: ()()() \"\",,'' ++-- **//== {}[]<> ;|~\\_ ?!@#$%^&"
+
+# The width and height in pixels of the test string using MS Windows default
+# font "MS Shell Dlg 2" and a dpi of 96.
+# Note: the MS Windows XP default font has the same width and height as Tahoma.
+BENCHMARK_WIDTH = 1600
+BENCHMARK_HEIGHT = 14
+
+#==============================================================================
+
+def choose_fontsize(fontname=None):
+    """
+    Determines the largest font size (in points) to use for a given font such
+    that the rendered width of the benchmark string is less than or equal to
+    101% of the rendered width of the string on a Windows XP computer using the
+    Windows default font at 96 dpi.
+
+    The width in pixels of a rendered string is affected by the choice of font,
+    the point size of the font, and the resolution of the installed font as
+    measured in dots-per-inch (aka points-per-inch).
+    """
+
+    frame = wx.Frame(parent=None, id=wx.ID_ANY, title="")
+    if fontname is None:
+        fontname = frame.GetFont().GetFaceName()
+    max_width = BENCHMARK_WIDTH + BENCHMARK_WIDTH/100
+
+    for fontsize in range(12, 5, -1):
+        frame.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
+                              fontname))
+        benchmark = wx.StaticText(frame, wx.ID_ANY, label="")
+        w, h = benchmark.GetTextExtent(BENCHMARK_TEXT)
+        benchmark.Destroy()
+        if w <= max_width: break
+
+    frame.Destroy()
+    return fontsize
+
+
+def display_fontsize(fontname=None, benchmark_text=BENCHMARK_TEXT,
+                                    benchmark_width=BENCHMARK_WIDTH,
+                                    benchmark_height=BENCHMARK_HEIGHT):
+    """
+    Displays the width in pixels of a benchmark text string for a given font
+    at various point sizes when rendered on the application's output device
+    (which implicitly takes into account the resolution in dpi of the font
+    faces at the various point sizes).
+    """
+
+    # Create a temporary frame that we will soon destroy.
+    frame = wx.Frame(parent=None, id=wx.ID_ANY, title="")
+
+    # Set the fontname if one is given, otherwise use the system default font.
+    # Get the font name even if we just set it in case the specified font is
+    # not installed and the system chooses another one.
+    if fontname is not None:
+        frame.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
+                              fontname))
+    fontname = frame.GetFont().GetFaceName()
+
+    x, y = wx.ClientDC(frame).GetPPI()
+    print("*** Benchmark text width and height in pixels = %4d %2d"\
+          %(benchmark_width, benchmark_height))
+    print("*** Compare against %s font with dpi resolution of %d:"\
+          %(fontname, x))
+
+    for fontsize in range(12, 5, -1):
+        frame.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
+                              fontname))
+        benchmark = wx.StaticText(frame, wx.ID_ANY, label="")
+        w, h = benchmark.GetTextExtent(benchmark_text)
+        benchmark.Destroy()
+        print("      For point size %2d, benchmark text w, h = %4d  %2d"\
+              %(fontsize, w, h))
+
+    frame.Destroy()
+
+def _finddata():
+    patterns = ['*.png','*.ico','*.jpg']
+    path = resource_dir()
+    files = []
+    for p in patterns:
+        files += glob.glob(os.path.join(path,p))
+    return files
+
+def data_files():
+    """
+    Return the data files associated with the package.
+
+    The format is a list of (directory, [files...]) pairs which can be
+    used directly in the py2exe setup script as::
+
+        setup(...,
+              data_files=data_files(),
+              ...)
+    """
+    data_files = [('bumps-data', _finddata())]
+    return data_files
+
+def package_data():
+    """
+    Return the data files associated with the package.
+
+    The format is a dictionary of {'fully.qualified.module', [files...]}
+    used directly in the setup script as::
+
+        setup(...,
+              package_data=package_data(),
+              ...)
+    """
+    return { 'bumps.gui': _finddata() }
+
+self_cached_path = None
+def resource_dir():
+    """
+    Return the path to the application data.
+
+    This is either in the environment variable BUMPS_DATA, in the
+    source tree in gui/resources, or beside the executable in
+    bumps-data.
+    """
+    # If we already found it, then we are done
+    global self_cached_path
+    if self_cached_path is not None: return self_cached_path
+
+    # Check for data path in the environment
+    key = 'BUMPS_DATA'
+    if key in os.environ:
+        path = os.environ[key]
+        if not os.path.isdir(path):
+            raise RuntimeError('Path in environment %s not a directory'%key)
+        self_cached_path = path
+        return self_cached_path
+
+    # Check for data path in the package
+    path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources'))
+    #print >>sys.stderr, "checking for resource in",path
+    if os.path.isdir(path):
+        self_cached_path = path
+        return self_cached_path
+
+    # Check in package root, which is where pyinstaller puts it
+    root = os.path.dirname(os.path.dirname(os.path.dirname(path)))
+    path = os.path.join(root, 'bumps-data')
+    if os.path.isdir(path):
+        self_cached_path = path
+        return self_cached_path
+
+    # Check for data path next to exe/zip file.
+    exepath = os.path.dirname(sys.executable)
+    path = os.path.join(exepath,'bumps-data')
+    #print >>sys.stderr, "checking for resource in",path
+    if os.path.isdir(path):
+        self_cached_path = path
+        return self_cached_path
+
+    # py2app puts the data in Contents/Resources, but the executable
+    # is in Contents/MacOS.
+    path = os.path.join(exepath,'..','Resources','bumps-data')
+    #print >>sys.stderr, "checking for resource in",path
+    if os.path.isdir(path):
+        self_cached_path = path
+        return self_cached_path
+
+    raise RuntimeError('Could not find the Bumps data files')
+
+def resource(filename):
+    return os.path.join(resource_dir(),filename)
+
+def get_bitmap(filename, type=wx.BITMAP_TYPE_PNG, scale_factor=16):
+    """
+    Returns the scaled bitmap from an image file (bmp, jpg, png) stored in
+    the data directory of the package.
+    """
+
+    path = resource(filename)
+
+    return wx.BitmapFromImage(wx.Image(name=path, type=type)
+                                      .Scale(scale_factor, scale_factor))
+
+
+def popup_error_message(caption, message):
+    """Displays an error message in a pop-up dialog box with an OK button."""
+
+    msg = wx.MessageDialog(None, message, caption, style=wx.ICON_ERROR|wx.OK)
+    msg.ShowModal()
+    msg.Destroy()
+
+
+def popup_information_message(caption, message):
+    """Displays an informational message in a pop-up with an OK button."""
+
+    msg = wx.MessageDialog(None, message, caption,
+                           style=wx.ICON_INFORMATION|wx.OK)
+    msg.ShowModal()
+    msg.Destroy()
+
+
+def popup_question(caption, message):
+    """Displays a question in a pop-up dialog box with YES and NO buttons."""
+
+    msg = wx.MessageDialog(None, message, caption,
+                           style=wx.ICON_QUESTION|wx.YES_NO)
+    msg.ShowModal()
+    msg.Destroy()
+
+
+def popup_warning_message(caption, message):
+    """Displays a warning message in a pop-up dialog box with an OK button."""
+
+    msg = wx.MessageDialog(None, message, caption, style=wx.ICON_WARNING|wx.OK)
+    msg.ShowModal()
+    msg.Destroy()
+
+#==============================================================================
+
+class StatusBarInfo():
+    """This class writes, saves, and restores multi-field status bar text."""
+
+    def __init__(self):
+        frame = wx.FindWindowByName("AppFrame", parent=None)
+        self.sb = frame.GetStatusBar()
+        self.cnt = self.sb.GetFieldsCount()
+        self.field = [""]*self.cnt
+
+
+    def write(self, index=0, text=""):
+        # Write text to the specified slot and save text locally.
+        # Beware that if you use field 0, wxPython will likely overwite it.
+        if index > self.cnt - 1:
+            return
+        self.sb.SetStatusText(text, index)
+        self.field[index] = text
+
+
+    def restore(self):
+        # Restore saved text from fields 1 to n.
+        # Note that wxPython updates field 0 with hints and other messages.
+        for index in range(1, self.cnt):
+            self.sb.SetStatusText(self.field[index], index)
+
+#==============================================================================
+
+class ExecuteInThread():
+    """
+    This class executes the specified function in a separate thread and calls a
+    designated callback function when the execution completes.  Control is
+    immediately given back to the caller of ExecuteInThread which can execute
+    in parallel in the main thread.
+
+    Note that wx.lib.delayedresult provides a simple interface to threading
+    that does not include mechanism to stop the thread.
+    """
+
+    def __init__(self, callback, function, *args, **kwargs):
+        if callback is None: callback = self._callback
+        #print "*** ExecuteInThread init:", callback, function, args, kwargs
+        delayedresult.startWorker(consumer=callback, workerFn=function,
+                                  wargs=args, wkwargs=kwargs)
+
+    def _callback(self, delayedResult):
+        '''
+        jobID = delayedResult.getJobID()
+        assert jobID == self.jobID
+        try:
+            result = delayedResult.get()
+        except Exception, e:
+            popup_error_message(self, "job %s raised exception: %s"%(jobID, e)
+            return
+        '''
+        return
+
+#==============================================================================
+
+class WorkInProgress(wx.Panel):
+    """
+    This class implements a rotating 'work in progress' gauge.
+    """
+
+    def __init__(self, parent):
+        wx.Panel.__init__(self, parent, wx.ID_ANY)
+
+        self.gauge = wx.Gauge(self, wx.ID_ANY, range=50, size=(250, 25))
+
+        self.timer = wx.Timer(self)
+        self.Bind(wx.EVT_TIMER, self.TimerHandler)
+        #self.count = 0
+
+    def Start(self):
+        self.timer.Start(100)
+
+    def Stop(self):
+        self.timer.Stop()
+
+    def TimerHandler(self, event):
+        #self.count += 1
+        #print "*** count = ", self.count
+        self.gauge.Pulse()
+
+#==============================================================================
+
+log_time_handle = None  # global variable for holding TimeStamp instance handle
+
+def log_time(text=None, reset=False):
+    """
+    This is a convenience function for using the TimeStamp class from any
+    module in the application for logging elapsed and delta time information.
+    This data is prefixed by a timestamp and optionally suffixed by a comment.
+    log_time maintains a single instance of TimeStamp during program execution.
+    Example output from calls to log_time('...'):
+
+    ==>    0.000s    0.000s  Starting <application name>
+    ==>    0.016s    0.016s  Starting to display the splash screen
+    ==>    0.015s    0.031s  Starting to build the GUI application
+    ==>    0.094s    0.125s  Entering the event loop
+    ==>    2.906s    3.031s  Terminating the splash screen and showing the GUI
+    """
+
+    global log_time_handle
+    if log_time_handle is None:
+        log_time_handle = TimeStamp()
+    if reset:
+        log_time_handle.reset()
+    log_time_handle.log_interval(text=text)
+
+
+class TimeStamp():
+    """
+    This class provides timestamp, delta time, and elapsed time services for
+    displaying wall clock time usage by the application.
+    """
+
+    def __init__(self):
+        self.reset()
+
+
+    def reset(self):
+        # Starts new timing interval.
+        self.t0 = self.t1 = time.time()
+
+
+    def gettime3(self):
+        # Gets current time in timestamp, delta time, and elapsed time format.
+        now = time.time()
+        timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(now))
+        elapsed = now - self.t0
+        delta = now - self.t1
+        self.t1 = now
+        return timestamp, delta, elapsed
+
+
+    def gettime2(self):
+        # Gets current time in delta time and elapsed time format.
+        now = time.time()
+        elapsed = now - self.t0
+        delta = now - self.t1
+        self.t1 = now
+        return delta, elapsed
+
+
+    def log_time_info(self, text=""):
+        # Prints timestamp, delta time, elapsed time, and optional comment.
+        t, d, e = self.gettime3()
+        print("==> %s%9.3fs%9.3fs  %s" %(t, d, e, text))
+
+
+    def log_timestamp(self, text=""):
+        # Prints timestamp and optional comment.
+        t, d, e = self.gettime3()
+        print("==> %s  %s" %(t, text))
+
+
+    def log_interval(self, text=""):
+        # Prints elapsed time, delta time, and optional comment.
+        d, e = self.gettime2()
+        print("==>%9.3fs%9.3fs  %s" %(d, e, text))
+
+#==============================================================================
+
+if __name__ == '__main__':
+    # Test the display_fontsize and choose_fontsize functions.
+    app = wx.PySimpleApp()
+    print("For Arial font:")
+    display_fontsize(fontname="Arial")
+    print("    Calculated font size =", choose_fontsize(fontname="Arial"))
+    app.Destroy()
+
+    print("")
+    print("*** Data directory is:          ", resource_dir())
+
+    # Test the TimeStamp class and the convenience function.
+    print("")
+    log_time("Using log_time() function")
+    print("Sleeping for 0.54 seconds ...")
+    time.sleep(0.54)
+    log_time("Using log_time() function")
+    print("Sleeping for 0.83 seconds ...")
+    time.sleep(0.83)
+    log_time("Using log_time() function")
+    print("Creating an instance of TimeStamp (as the second timing class)")
+    ts = TimeStamp()
+    print("Sleeping for 0.66 seconds ...")
+    time.sleep(0.66)
+    ts.log_time_info(text="Using log_time_info() method")
+    ts.log_timestamp(text="Using log_timestamp() method")
+    ts.log_interval(text="Using log_interval() method")
+    print("Sleeping for 0.35 seconds ...")
+    time.sleep(0.35)
+    ts.log_interval(text="Using log_interval() method")
+    print("Sleeping for 0.42 seconds ...")
+    time.sleep(0.42)
+    ts.log_interval(text="Using log_interval() method")
+    print("Resetting the clock ...")
+    ts.reset()
+    ts.log_interval(text="Using log_interval() method")
+    print("Sleeping for 0.33 seconds ...")
+    time.sleep(0.33)
+    ts.log_interval(text="Using log_interval() method")
+    print("Switch back to the first timing class")
+    log_time("Using log_time() function")
diff --git a/bumps/history.py b/bumps/history.py
new file mode 100644
index 0000000..9101132
--- /dev/null
+++ b/bumps/history.py
@@ -0,0 +1,283 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Log of progress through a computation.
+
+Each cycle through a computation, a process can update its history,
+adding information about the number of function evaluations, the
+total time taken, the set of points evaluated and their values, the
+current best value and so on.  The process can use this history
+when computing the next set of points to evaluate and when checking
+if the termination conditions are met.  Any values that may be
+useful outside the computation, e.g., for logging or for updating
+the user, should be recorded.  In the ideal case, the history
+is all that is needed to restart the process in case of a system
+crash.
+
+History consists of a set of traces.  The content of the traces
+themselves is provided by the computation, but various stake holders
+can use them.  For example, the user may wish to log the set of points
+that have been evaluated and their values using the system logger
+and an optimizer may require a certain amount of history to calculate
+the next set of values.
+
+New traces are provided using :meth:`History.provides`.  For example,
+the following adds traces for 'value' and 'point' to the history, and
+requires the best value on the two previous cycles in order to do its work:
+
+    >>> from bumps.history import History
+    >>> h = History()
+    >>> h.provides(value=2, point=0)
+
+Initially the history is empty:
+
+    >>> print(len(h.value))
+    0
+
+After three updates we see that only two values are kept:
+
+    >>> h.update(value=2,point=[1,1,1])
+    >>> h.update(value=1,point=[1,0.5,1])
+    >>> h.update(value=0.5,point=[1,0.5,0.9])
+    >>> print(h.value)
+    Trace value: 0.5, 1
+    >>> print(len(h.value))
+    2
+
+Note that point is not monitored since it is not required:
+
+    >>> print(h.point[0])
+    Traceback (most recent call last):
+        ...
+    IndexError: point has not accumulated enough history
+
+Traces may be used as accumulators.  In that case, the next
+value is added to the tail value before appending to the trace.
+For example:
+
+    >>> h = History()
+    >>> h.provides(step=1)
+    >>> h.accumulate(step=1)
+    >>> h.accumulate(step=1)
+    >>> print(h.step[0])
+    2
+"""
+
+# Design questions:
+# 1. Can optimizer function evaluators add traces?  Can they use traces?
+# 2. Do we want to support a skip option on traces, so that only every nth
+#    item is preserved?  This is probably too hard.
+
+
+class History(object):
+
+    """
+    Collection of traces.
+
+    Provided traces can be specified as key word arguments, name=length.
+    """
+
+    def __init__(self, **kw):
+        self.provides(**kw)
+
+    def provides(self, **kw):
+        """
+        Specify additional provided fields.
+
+        Raises AttributeError if trace is already provided or if the trace
+        name matches the name of one of the history methods.
+        """
+        for k, v in kw.items():
+            # Make sure the additional trait is not already provided.
+            # This test should also catch methods such as provides/requires
+            # and static properties such as bounds that are set from outside.
+            if hasattr(self, k):
+                raise AttributeError("history already provides " + k)
+            else:
+                mon = self._new_trace(keep=v, name=k)
+                setattr(self, k, mon)
+
+    def requires(self, **kw):
+        """
+        Specify required fields, and their history length.
+        """
+        for k, v in kw.items():
+            try:
+                mon = getattr(self, k)
+                mon.requires(v)
+            except AttributeError:
+                raise AttributeError("history does not provide " + k
+                                     + "\nuse one of " + self._trace_names())
+
+    def accumulate(self, **kw):
+        """
+        Extend the given traces with the provided values.  The traced
+        value will be the old value plus the new value.
+        """
+        for k, v in kw.items():
+            try:
+                getattr(self, k).accumulate(v)
+            except AttributeError:
+                raise AttributeError(k + " is not being traced")
+
+    def update(self, **kw):
+        """
+        Extend the given traces with the provided values.  The traced
+        values are independent.  Use accumulate if you want to add the
+        new value to the previous value in the trace.
+        """
+        for k, v in kw.items():
+            try:
+                getattr(self, k).put(v)
+            except AttributeError:
+                raise AttributeError(k + " is not being traced")
+
+    def clear(self):
+        """
+        Clear history, removing all traces
+        """
+        self.__dict__.clear()
+
+    def _new_trace(self, keep=None, name=None):
+        """
+        Create a new trace.  We use a factory method here so that
+        the History subclass can control the kind of trace created.
+        The returned trace must be a subclass of history.Trace.
+        """
+        return Trace(keep=keep, name=name)
+
+    def _traces(self):
+        return [trace
+                for trace in self.__dict__.values()
+                if isinstance(trace, Trace)]
+
+    def _trace_names(self):
+        traces = [trace.name for trace in self._traces()]
+        return ", ".join(l for l in sorted(traces))
+
+    def __str__(self):
+        traces = sorted(self._traces(), lambda x, y: cmp(x.name, y.name))
+        return "\n".join(str(l) for l in traces)
+
+    def snapshot(self):
+        """
+        Return a dictionary of traces { 'name':  [v[n], v[n-1], ..., v[0]] }
+        """
+        return dict((trace.name, trace.snapshot()) for trace in self._traces())
+
+    def restore(self, state):
+        """
+        Restore history to the state returned by a call to snapshot
+        """
+        for k, v in state.items():
+            try:
+                getattr(self, k).restore(v)
+            except KeyError:
+                pass
+
+
+class Trace(object):
+
+    """
+    Value trace.
+
+    This is a stack-like object with items inserted at the beginning, and
+    removed from the end once the maximum length *keep* is reached.
+
+    len(trace) returns the number of items in the trace
+    trace[i] returns the ith previous element in the history
+    trace.requires(n) says how much history to keep
+    trace.put(value) stores value
+    trace.accumulate(value) adds value to the previous value before storing
+    state = trace.snapeshot() returns the values as a stack, most recent last
+    trace.restore(state) restores a snapshot
+
+    Note that snapshot/restore uses lists to represent numpy arrays, which
+    may cause problems if the trace is capturing lists.
+    """
+    # Implementation note:
+    # Traces are stored in reverse order because append is faster than insert.
+    # This detail is hidden from the caller since __getitem__ returns the
+    # appropriate value.
+    # TODO: convert to circular buffer unless keeping the full trace
+    # TODO: use numpy arrays for history
+
+    def __init__(self, keep=1, name="trace"):
+        self.keep = keep
+        self._storage = []
+        self.name = name
+
+    def requires(self, n):
+        """
+        Set the trace length to be at least n.
+        """
+        # Note: never shorten the trace since another algorithm, condition,
+        # or monitor may require the longer trace.
+        if n > self.keep:
+            self.keep = n
+
+    def accumulate(self, value):
+        if self.keep < 1:
+            return
+        try:
+            value = self._storage[-1] + value
+        except IndexError:
+            pass  # Value is 0 + value => 0
+        self.put(value)
+
+    def put(self, value):
+        """
+        Add an item to the trace, shifting off from the beginning
+        when the trace is full.
+        """
+        if self.keep < 1:
+            return
+        if len(self._storage) == self.keep:
+            self._storage = self._storage[1:]
+        self._storage.append(value)
+
+    def __len__(self):
+        return len(self._storage)
+
+    def __getitem__(self, key):
+        if key < 0:
+            raise IndexError(self.name
+                             + " can only be accessed from the beginning")
+        try:
+            return self._storage[-key - 1]
+        except IndexError:
+            raise IndexError(self.name + " has not accumulated enough history")
+
+    def __setitem__(self, key, value):
+        raise TypeError("cannot write directly to a trace; use put instead")
+
+    def __str__(self):
+        return ("Trace " + self.name + ": "
+                + ", ".join([str(k) for k in reversed(self._storage)]))
+
+    def snapshot(self):
+        """
+        Capture state of the trace.
+
+        Numpy arrays are converted to lists so that the trace can be easily
+        converted to json.
+        """
+        import numpy as np
+        if isinstance(self._storage[0], np.ndarray):
+            return [v.tolist() for v in self._storage]
+        else:
+            return self._storage[:]
+
+    def restore(self, state):
+        """
+        Restore a trace from a captured snapshot.
+
+        Lists are converted to numpy arrays.
+        """
+        import numpy as np
+        if isinstance(state[0], list):
+            state = [np.asarray(v) for v in state]
+        if len(state) > self.keep:
+            self._storage = state[-self.keep:]
+        else:
+            self._storage = state[:]
diff --git a/bumps/initpop.py b/bumps/initpop.py
new file mode 100644
index 0000000..60087dd
--- /dev/null
+++ b/bumps/initpop.py
@@ -0,0 +1,310 @@
+"""
+Population initialization strategies.
+
+To start the analysis an initial population is required.  This will be
+an array of size M x N, where M is the number of dimensions in the fitting
+problem and N is the number of individuals in the population.
+
+Normally the initialization will use a call to :func:`generate` with
+key-value pairs from the command line options.  This will include the
+'init' option, with the name of the strategy used to initialize the
+population.
+
+Additional strategies like uniform box in [0,1] or standard norm
+(rand(m,n) and randn(m,n) respectively), may also be useful.
+"""
+
+# Note: borrowed from DREAM and extended.
+
+from __future__ import division, print_function
+
+__all__ = ['generate', 'cov_init', 'eps_init', 'lhs_init', 'random_init']
+
+import math
+import numpy as np
+from numpy import diag, empty, isinf, isfinite, clip, inf
+
+try:
+    from typing import Optional
+except ImportError:
+    pass
+
+
+def generate(problem, init='eps', pop=10, use_point=True, **options):
+    # type: (Any, str, int, bool, ...) -> np.ndarray
+    """
+    Population initializer.
+
+    *problem* is a fit problem with *getp* and *bounds* methods.
+
+    *init* is 'eps', 'cov', 'lhs' or 'random', indicating which
+    initializer should be used.
+
+    *pop* is the population scale factor, generating *pop* individuals
+    for each parameter in the fit.
+
+    *use_point* is True if the initial value should be a member of the
+    population.
+
+    Additional options are ignored so that generate can be called using
+    all command line options.
+    """
+    initial = problem.getp()
+    undefined = np.isnan(initial)
+    initial[~isfinite(initial)] = 1.
+    pop_size = int(math.ceil(pop * len(initial)))
+    bounds = problem.bounds()
+    if init == 'random':
+        population = random_init(
+            pop_size, initial, bounds, use_point=use_point, problem=problem)
+    elif init == 'cov':
+        cov = problem.cov()
+        population = cov_init(
+            pop_size, initial, bounds, use_point=use_point, cov=cov)
+    elif init == 'lhs':
+        population = lhs_init(
+            pop_size, initial, bounds, use_point=use_point)
+    elif init == 'eps':
+        population = eps_init(
+            pop_size, initial, bounds, use_point=use_point, eps=1e-6)
+    else:
+        raise ValueError(
+            "Unknown population initializer '%s'" % init)
+
+    # Use LHS to initialize any undefined parameters
+    if undefined.any():
+        population[:, undefined] = lhs_init(
+            pop_size, initial[undefined], bounds[:, undefined],
+            use_point=False)
+
+    return population
+
+
+def lhs_init(n, initial, bounds, use_point=False):
+    # type: (int, np.ndarray, np.ndarray, bool, float) -> np.ndarray
+    """
+    Latin hypercube sampling.
+
+    Returns an array whose columns and rows each have *n* samples from
+    equally spaced bins between *bounds=(xmin, xmax)* for the column.
+    Unlike random, this method guarantees a certain amount of coverage
+    of the parameter space.  Consider, though that the diagonal matrix
+    satisfies the LHS condition, and you can see that the guarantees are
+    not very strong.  A better methods, similar to sudoku puzzles, would
+    guarantee coverage in each block of the matrix, but this is not
+    yet implmeneted.
+
+    If *use_point* is True, then the current value of the parameters
+    is returned as the first point in the population, preserving the the
+    LHS property.
+    """
+    xmin, xmax = bounds
+
+    # Define the size of xmin
+    nvar = len(xmin)
+
+    # Initialize array ran with random numbers
+    ran = np.random.rand(n, nvar)
+
+    # Initialize array s with zeros
+    s = empty((n, nvar))
+
+    # Now fill s
+    for j in range(nvar):
+        # Indefinite and semidefinite ranges need to be constrained.  Use
+        # the initial value of the parameter as a hint.
+        low, high = xmin[j], xmax[j]
+        if np.isinf(low) and np.isinf(high):
+            if initial[j] < 0.:
+                low, high = 2.0*initial[j], 0.0
+            elif initial[j] > 0.:
+                low, high = 0.0, 2.0*initial[j]
+            else:
+                low, high = -1.0, 1.0
+        elif np.isinf(low):
+            if initial[j] != high:
+                low, high = high - 2.0*abs(high-initial[j]), high
+            else:
+                low, high = high - 2.0, high
+        elif np.isinf(high):
+            if initial[j] != high:
+                low, high = low, low + 2.0*abs(initial[j] - low)
+            else:
+                low, high = low, low + 2.0
+        else:
+            pass   # low, high = low, high
+
+        if use_point:
+            # Put current value at position 0 in population
+            s[0, j] = clip(initial[j], low, high)
+            # Find which bin the current value belongs in
+            xidx = int(n * (s[0, j] - low) / (high - low))
+            # Generate random permutation of remaining bins
+            perm = np.random.permutation(n - 1)
+            perm[perm >= xidx] += 1  # exclude current value bin
+            idx = slice(1, None)
+        else:
+            # Random permutation of bins
+            perm = np.random.permutation(n)
+            idx = slice(0, None)
+
+
+        # Assign random value within each bin
+        p = (perm + ran[idx, j]) / n
+        s[idx, j] = low + p * (high - low)
+
+    return s
+
+
+def cov_init(n, initial, bounds, use_point=False, cov=None, dx=None):
+    # type: (int, np.ndarray, np.ndarray, bool, Optional[np.ndarray], Optional[np.ndarray]) -> np.ndarray
+    """
+    Initialize *n* sets of random variables from a gaussian model.
+
+    The center is at *x* with an uncertainty ellipse specified by the
+    1-sigma independent uncertainty values *dx* or the full covariance
+    matrix uncertainty *cov*.
+
+    For example, create an initial population for 20 sequences for a
+    model with local minimum x with covariance matrix C::
+
+        pop = cov_init(cov=C, pars=p, n=20)
+
+    If *use_point* is True, then the current value of the parameters
+    is returned as the first point in the population.
+    """
+    # return mean + dot(RNG.randn(n,len(mean)), chol(cov))
+    if cov is None:
+        if dx is None:
+            dx = _get_scale_factor(0.2, bounds, initial)
+            #print("= dx",dx)
+        cov = diag(dx**2)
+    xmin, xmax = bounds
+    initial = clip(initial, xmin, xmax)
+    population = np.random.multivariate_normal(mean=initial, cov=cov, size=n)
+    population = reflect(population, xmin, xmax)
+    if use_point:
+        population[0] = initial
+    return population
+
+
+def random_init(n, initial, bounds, use_point=False, problem=None):
+    """
+    Generate a random population from the problem parameters.
+
+    Values are selected at random from the bounds of the problem according
+    to the underlying probability density of each parameter.  Uniform
+    semi-definite and indefinite bounds use the standard normal distribution
+    for the underlying probability, with a scale factor determined by the
+    initial value of the parameter.
+
+    If *use_point* is True, then the current value of the parameters
+    is returned as the first point in the population.
+    """
+    population = problem.randomize(n)
+    if use_point:
+        population[0] = clip(initial, *bounds)
+    return population
+
+
+def eps_init(n, initial, bounds, use_point=False, eps=1e-6):
+    # type: (int, np.ndarray, np.ndarray, bool, float) -> np.ndarray
+    """
+    Generate a random population using an epsilon ball around the current
+    value.
+
+    Since the initial population is contained in a small volume, this
+    method is useful for exploring a local minimum around a point.  Over
+    time the ball will expand to fill the minimum, and perhaps tunnel
+    through barriers to nearby minima given enough burn-in time.
+
+    eps is in proportion to the bounds on the parameter, or the current
+    value of the parameter if the parameter is unbounded.  This gives the
+    initialization a bit of scale independence.
+
+    If *use_point* is True, then the current value of the parameters
+    is returned as the first point in the population.
+    """
+    # Set the scale from the bounds, or from the initial value if the value
+    # is unbounded.
+    xmin, xmax = bounds
+    scale = _get_scale_factor(eps, bounds, initial)
+    #print("= scale", scale)
+    initial = clip(initial, xmin, xmax)
+    population = initial + scale * (2 * np.random.rand(n, len(xmin)) - 1)
+    population = reflect(population, xmin, xmax)
+    if use_point:
+        population[0] = initial
+    return population
+
+def reflect(v, low, high):
+    """
+    Reflect v off the boundary, then clip to be sure it is within bounds
+    """
+    index = v < low
+    v[index] = (2*low - v)[index]
+    index = v > high
+    v[index] = (2*high - v)[index]
+    return clip(v, low, high)
+
+def _get_scale_factor(scale, bounds, initial):
+    # type: (float, np.ndarray, np.ndarray) -> np.ndarray
+    xmin, xmax = bounds
+    dx = (xmax - xmin) * scale  # type: np.ndarray
+    dx[isinf(dx)] = abs(initial[isinf(dx)]) * scale
+    dx[~isfinite(dx)] = scale
+    dx[dx==0] = scale
+    #print("min,max,dx",xmin,xmax,dx)
+    return dx
+
+def demo_init(seed=1):
+    # type: (Optional[int]) -> None
+    from . import util
+    from .bounds import init_bounds
+    class Problem(object):
+        def __init__(self, initial, bounds):
+            self.initial = initial
+            self._bounds = bounds
+        def getp(self):
+            return self.initial
+        def bounds(self):
+            return self._bounds
+        def cov(self):
+            return None
+        def randomize(self, n=1):
+            target = self.initial.copy()
+            target[~isfinite(target)] = 1.
+            result = [init_bounds(pair).random(n,v)
+                      for v, pair in zip(self.initial, self._bounds.T)]
+            return np.array(result).T
+
+    bounds = np.array([(2., inf),
+                       (-inf, -2.),
+                       (-inf, inf),
+                       (5.0, 6.0),
+                       (-2.0, 3.0)]).T
+    # generate takes care of bad values
+    #low = np.array([-inf]*5)
+    #high = np.array([inf]*5)
+    #bad = np.array([np.nan]*5)
+    zero = np.array([0.]*5)
+    below = np.array([-2., -4., -2., -3., -4.])
+    above = np.array([3., 4., 2., 8., 5.])
+    small = np.array([2.000001, -2.000001, 0.000001, 5.000001, -0.000001])
+    large = np.array([2000001., -2000001., 2000001., 5.5, -2.000001])
+    middle = np.array([100., -100., 100., 5.5, 0.5])
+    starting_points = 'zero below above small large middle'.split()
+    np.set_printoptions(linewidth=100000)
+    with util.push_seed(seed):
+        for init_type in ('cov', 'random', 'eps', 'lhs'):
+            print("bounds:")
+            print(bounds)
+            for name in starting_points:
+                initial = locals()[name]
+                M = Problem(initial, bounds)
+                pop = generate(problem=M, init=init_type, pop=1)
+                print("%s init from %s"%(init_type, name), str(initial))
+                print(pop)
+
+if __name__ == "__main__":
+    demo_init(seed=None)
diff --git a/bumps/lsqerror.py b/bumps/lsqerror.py
new file mode 100644
index 0000000..863cb9e
--- /dev/null
+++ b/bumps/lsqerror.py
@@ -0,0 +1,326 @@
+r"""
+Least squares error analysis.
+
+Given a data set with gaussian uncertainty on the points, and a model which
+is differentiable at the minimum, the parameter uncertainty can be estimated
+from the covariance matrix at the minimum.  The model and data are wrapped in
+a problem object, which must define the following methods:
+
+    ============ ============================================
+    getp()       get the current value of the model
+    setp(p)      set a new value in the model
+    nllf(p)      negative log likelihood function
+    residuals(p) residuals around its current value
+    bounds()     get the bounds on the parameter p [optional]
+    ============ ============================================
+
+:func:`jacobian` computes the Jacobian matrix $J$ using numerical
+differentiation on residuals. Derivatives are computed using the center
+point formula, with two evaluations per dimension.  If the problem has
+analytic derivatives with respect to the fitting parameters available,
+then these should be used to compute the Jacobian instead.
+
+:func:`hessian` computes the Hessian matrix $H$ using numerical
+differentiation on nllf.  This uses the center point formula, with
+two evaluations for each (i,j) combination.
+
+:func:`cov` takes the Jacobian and computes the covariance matrix $C$.
+
+:func:`corr` uses the off-diagonal elements of $C$ to compute correlation
+coefficients $R^2_{ij}$ between the parameters.
+
+:func:`stderr` computes the uncertain $\sigma_i$ from covariance matrix $C$,
+assuming that the $C_\text{diag}$ contains $\sigma_i^2$, which should be
+the case for functions which are approximately linear near the minimum.
+
+:func:`max_correlation` takes $R^2$ and returns the maximum correlation.
+
+The user should be shown the uncertainty $\sigma_i$ for each parameter,
+and if there are strong parameter correlations (e.g., $R^2_\text{max} > 0.2$),
+the correlation matrix as well.
+
+The bounds method for the problem is optional, and is used only to determine
+the step size needed for the numerical derivative.  If bounds are not present
+and finite, the current value for the parameter is used as a basis to
+estimate step size.
+
+"""
+from __future__ import print_function
+
+import numpy as np
+#from . import numdifftools as nd
+#import numdifftools as nd
+
+# TODO: restructure lsqerror to use mapper for evaluating multiple f
+# doesn't work for jacobian since mapper returns nllf; would need to
+# expand mapper to implement a variety of different functions.
+def jacobian(problem, p=None, step=None):
+    """
+    Returns the derivative wrt the fit parameters at point p.
+
+    Numeric derivatives are calculated based on step, where step is
+    the portion of the total range for parameter j, or the portion of
+    point value p_j if the range on parameter j is infinite.
+
+    The current point is preserved.
+    """
+    p_init = problem.getp()
+    if p is None:
+        p = p_init
+    p = np.asarray(p)
+    bounds = getattr(problem, 'bounds', lambda: None)()
+    def f(p):
+        problem.setp(p)
+        return problem.residuals()
+    J = _jacobian_forward(f, p, bounds, eps=step)
+    #J = nd.Jacobian(problem.residuals)(p)
+    problem.setp(p_init)
+    return J
+
+def _jacobian_forward(f, p, bounds, eps=None):
+    n = len(p)
+    # TODO: default to double precision epsilon
+    step = 1e-4 if eps is None else np.sqrt(eps)
+    fx = f(p)
+
+    #print("p",p,"step",step)
+    h = abs(p)*step
+    h[h==0] = step
+    if bounds is not None:
+        h[h+p>bounds[1]] *= -1.0  # step backward if forward step is out of bounds
+    ee = np.diag(h)
+
+    J = []
+    for i in range(n):
+        J.append((f(p + ee[i, :]) - fx)/h[i])
+    return np.vstack(J).T
+
+def _jacobian_central(f, p, bounds, eps=None):
+    n = len(p)
+    # TODO: default to double precision epsilon
+    step = 1e-4 if eps is None else np.sqrt(eps)
+
+    #print("p",p,"step",step)
+    h = abs(p)*step
+    h[h==0] = step
+    #if bounds is not None:
+    #    h[h+p>bounds[1]] *= -1.0  # step backward if forward step is out of bounds
+    ee = np.diag(h)
+
+    J = []
+    for i in range(n):
+        J.append((f(p + ee[i, :]) - f(p - ee[i,:]))/(2.0*h[i]))
+    return np.vstack(J).T
+
+
+def hessian(problem, p=None, step=None):
+    """
+    Returns the derivative wrt to the fit parameters at point p.
+
+    The current point is preserved.
+    """
+    p_init = problem.getp()
+    if p is None:
+        p = p_init
+    p = np.asarray(p)
+    bounds = getattr(problem, 'bounds', lambda: None)()
+    H = _hessian_forward(problem.nllf, p, bounds=bounds, eps=step)
+    #H = nd.Hessian(problem.nllf)(p)
+    #print("Hessian",H)
+    problem.setp(p_init)
+    return H
+
+def _hessian_forward(f, p, bounds, eps=None):
+    # type: (Callable[[np.ndarray], float], np.ndarray, Optional[np.ndarray]) -> np.ndarray
+    """
+    Forward difference Hessian.
+    """
+    n = len(p)
+    # TODO: default to double precision epsilon
+    step = 1e-4 if eps is None else np.sqrt(eps)
+    fx = f(p)
+
+    #print("p",p,"step",step)
+    h = abs(p)*step
+    h[h==0] = step
+    if bounds is not None:
+        h[h+p>bounds[1]] *= -1.0  # step backward if forward step is out of bounds
+    ee = np.diag(h)
+
+    g = np.empty(n, 'd')
+    for i in range(n):
+        g[i] = f(p + ee[i, :])
+    #print("fx",fx)
+    #print("h",h, h[0])
+    #print("g",g)
+    H = np.empty((n, n), 'd')
+    for i in range(n):
+        for j in range(i, n):
+            fx_ij = f(p + ee[i,:] + ee[j,:])
+            #print("fx_%d%d=%g"%(i,j,fx_ij))
+            H[i,j] = (fx_ij - g[i] - g[j] + fx)/(h[i]*h[j])
+            H[j,i] = H[i,j]
+    return H
+
+def _hessian_central(f, p, bounds, eps=None):
+    # type: (Callable[[np.ndarray], float], np.ndarray, Optional[np.ndarray]) -> np.ndarray
+    """
+    Central difference Hessian.
+    """
+    n = len(p)
+    # TODO: default to double precision epsilon
+    step = 1e-4 if eps is None else np.sqrt(eps)
+    #step = np.sqrt(step)
+    fx = f(p)
+
+    h = abs(p)*step
+    h[h==0] = step
+    # TODO: handle bounds on central difference formula
+    #if bounds is not None:
+    #    h[h+p>bounds[1]] *= -1.0  # step backward if forward step is out of bounds
+    ee = np.diag(h)
+
+    gp = np.empty(n, 'd')
+    gm = np.empty(n, 'd')
+    for i in range(n):
+        gp[i] = f(p + ee[i, :])
+        gm[i] = f(p - ee[i, :])
+    H = np.empty((n, n), 'd')
+    for i in range(n):
+        for j in range(i, n):
+            fp_ij = f(p + ee[i,:] + ee[j,:])
+            fm_ij = f(p - ee[i,:] - ee[j,:])
+            #print("fx_%d%d=%g"%(i,j,fx_ij))
+            H[i,j] = (fp_ij - gp[i] - gp[j] + fm_ij - gm[i] - gm[j] + 2.0*fx)/(2.0*h[i]*h[j])
+            H[j,i] = H[i,j]
+    return H
+
+
+def perturbed_hessian(H, scale=None):
+    """
+    Adjust Hessian matrix to be positive definite.
+
+    Returns the adjusted Hessian and its Cholesky decomposition.
+    """
+    from .quasinewton import modelhess
+    n = H.shape[0]
+    if scale is None:
+        scale = np.ones(n)
+    macheps = np.finfo('d').eps
+    return modelhess(n, scale, macheps, H)
+
+
+def chol_stderr(L):
+    """
+    Return parameter uncertainty from the Cholesky decomposition of the
+    Hessian matrix, as returned, e.g., from the quasi-Newton optimizer BFGS
+    or as calculated from :func:`perturbed_hessian` on the output of
+    :func:`hessian` applied to the cost function problem.nllf.
+    """
+    return np.sqrt(1. / np.diag(L))
+
+
+def chol_cov(L):
+    """
+    Given the cholesky decomposition of the Hessian matrix H, compute
+    the covariance matrix $C = H^{-1}$
+    """
+    Linv = np.linalg.inv(L)
+    return np.dot(Linv.T.conj(), Linv)
+
+
+def cov(J, tol=1e-8):
+    """
+    Given Jacobian J, return the covariance matrix inv(J'J).
+
+    We provide some protection against singular matrices by setting
+    singular values smaller than tolerance *tol* to the tolerance
+    value.
+    """
+
+    # Find cov of f at p
+    #     cov(f,p) = inv(J'J)
+    # Use SVD
+    #     J = U S V'
+    #     J'J = (U S V')' (U S V')
+    #         = V S' U' U S V'
+    #         = V S S V'
+    #     inv(J'J) = inv(V S S V')
+    #              = inv(V') inv(S S) inv(V)
+    #              = V inv (S S) V'
+    u, s, vh = np.linalg.svd(J, 0)
+    s[s <= tol] = tol
+    JTJinv = np.dot(vh.T.conj() / s ** 2, vh)
+    return JTJinv
+
+
+def corr(C):
+    """
+    Convert covariance matrix $C$ to correlation matrix $R^2$.
+
+    Uses $R = D^{-1} C D^{-1}$ where $D$ is the square root of the diagonal
+    of the covariance matrix, or the standard error of each variable.
+    """
+    Dinv = 1. / stderr(cov)
+    return np.dot(Dinv, np.dot(cov, Dinv))
+
+
+def max_correlation(Rsq):
+    """
+    Return the maximum correlation coefficient for any pair of variables
+    in correlation matrix Rsq.
+    """
+    return np.max(np.tril(Rsq, k=-1))
+
+
+def stderr(C):
+    r"""
+    Return parameter uncertainty from the covariance matrix C.
+
+    This is just the square root of the diagonal, without any correction
+    for covariance.
+
+    If measurement uncertainty is unknown, scale the returned uncertainties
+    by $\sqrt{\chi^2_N}$, where $\chi^2_N$ is the sum squared residuals
+    divided by the degrees  of freedom.  This will match the uncertainty on
+    the parameters to the observed scatter assuming the model is correct and
+    the fit is optimal.  This will also be appropriate for weighted fits
+    when the true measurement uncertainty dy_i is known up to a scaling
+    constant for all y_i.
+
+    Standard error on scipy.optimize.curve_fit always includes the chisq
+    correction, whereas scipy.optimize.leastsq never does.
+    """
+    return np.sqrt(np.diag(C))
+
+
+def demo_hessian():
+    rosen = lambda x: (1.-x[0])**2 + 105*(x[1]-x[0]**2)**2
+    p = np.array([1.,1.])
+    H = _hessian_forward(rosen, p, bounds=None, eps=1e-16)
+    print("forward difference H", H)
+    H = _hessian_central(rosen, p, bounds=None, eps=1e-16)
+    print("central difference H", H)
+
+    #from . import numdifftools as nd
+    #import numdifftools as nd
+    #Hfun = nd.Hessian(rosen)
+    #print("numdifftools H", Hfun(p))
+
+def demo_jacobian():
+    y = np.array([1., 2., 3.])
+    f = lambda x: x[0]*y + x[1]
+    p = np.array([2., 3.])
+    J = _jacobian_forward(f, p, bounds=None, eps=1e-16)
+    print("forward difference J", J)
+    J = _jacobian_central(f, p, bounds=None, eps=1e-16)
+    print("central difference J", J)
+
+    #from . import numdifftools as nd
+    #import numdifftools as nd
+    #Jfun = nd.Jacobian(f)
+    #print("numdifftools J", Jfun(p))
+
+if __name__ == "__main__":
+    demo_hessian()
+    demo_jacobian()
diff --git a/bumps/mapper.py b/bumps/mapper.py
new file mode 100644
index 0000000..01c405e
--- /dev/null
+++ b/bumps/mapper.py
@@ -0,0 +1,239 @@
+"""
+Parallel and serial mapper implementations.
+"""
+import sys
+import os
+
+# {{{ http://code.activestate.com/recipes/496767/ (r1)
+# Converted to use ctypes by Paul Kienzle
+
+
+PROCESS_ALL_ACCESS = 0x1F0FFF
+
+
+def setpriority(pid=None, priority=1):
+    """
+    Set The Priority of a Windows Process.  Priority is a value between 0-5
+    where 2 is normal priority and 5 is maximum.  Default sets the priority
+    of the current python process but can take any valid process ID.
+    """
+
+    #import win32api,win32process,win32con
+    from ctypes import windll
+
+    priorityclasses = [0x40,   # IDLE_PRIORITY_CLASS,
+                       0x4000,  # BELOW_NORMAL_PRIORITY_CLASS,
+                       0x20,   # NORMAL_PRIORITY_CLASS,
+                       0x8000,  # ABOVE_NORMAL_PRIORITY_CLASS,
+                       0x80,   # HIGH_PRIORITY_CLASS,
+                       0x100,  # REALTIME_PRIORITY_CLASS
+                       ]
+    if pid is None:
+        pid = windll.kernel32.GetCurrentProcessId()
+    handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)
+    windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])
+# end of http://code.activestate.com/recipes/496767/ }}}
+
+
+def nice():
+    if os.name == 'nt':
+        setpriority(priority=1)
+    else:
+        os.nice(5)
+
+
+class SerialMapper(object):
+
+    @staticmethod
+    def start_worker(problem):
+        pass
+
+    @staticmethod
+    def start_mapper(problem, modelargs):
+        # Note: map is n iterator in python 3.x
+        return lambda points: list(map(problem.nllf, points))
+
+    @staticmethod
+    def stop_mapper(mapper):
+        pass
+
+
+# Load the problem in the remote process rather than pickling
+#def _MP_load_problem(*modelargs):
+#    from .fitproblem import load_problem
+#    _MP_set_problem(load_problem(*modelargs))
+
+
+def _MP_set_problem(problem):
+    global _problem
+    nice()
+    _problem = problem
+
+
+def _MP_run_problem(point):
+    global _problem
+    return _problem.nllf(point)
+
+
+class MPMapper(object):
+    pool = None
+
+    @staticmethod
+    def start_worker(problem):
+        pass
+
+    @staticmethod
+    def start_mapper(problem, modelargs, cpus=None):
+        import multiprocessing
+        if cpus is None:
+            cpus = multiprocessing.cpu_count()
+        if MPMapper.pool is not None:
+            MPMapper.pool.terminate()
+        #MPMapper.pool = multiprocessing.Pool(cpus,_MP_load_problem,modelargs)
+        MPMapper.pool = multiprocessing.Pool(cpus, _MP_set_problem, (problem,))
+        mapper = lambda points: MPMapper.pool.map(_MP_run_problem, points)
+        return mapper
+
+    @staticmethod
+    def stop_mapper(mapper):
+        pass
+
+
+def _MPI_set_problem(comm, problem, root=0):
+    global _problem
+    _problem = comm.bcast(problem)
+
+
+def _MPI_run_problem(point):
+    global _problem
+    return _problem.nllf(point)
+
+
+def _MPI_map(comm, points, root=0):
+    import numpy as np
+    from mpi4py import MPI
+    # Send number of points and number of variables per point
+    npoints, nvars = comm.bcast(
+        points.shape if comm.rank == root else None, root=root)
+    if npoints == 0:
+        raise StopIteration
+
+    # Divvy points equally across all processes
+    whole = points if comm.rank == root else None
+    idx = np.arange(comm.size)
+    size = np.ones(comm.size, idx.dtype) * \
+        (npoints // comm.size) + (idx < npoints % comm.size)
+    offset = np.cumsum(np.hstack((0, size[:-1])))
+    part = np.empty((size[comm.rank], nvars), dtype='d')
+    comm.Scatterv((whole, (size * nvars, offset * nvars), MPI.DOUBLE),
+                  (part, MPI.DOUBLE),
+                  root=root)
+
+    # Evaluate models assigned to each processor
+    partial_result = np.array([_MPI_run_problem(pi) for pi in part],
+                                 dtype='d')
+
+    # Collect results
+    result = np.empty(npoints, dtype='d') if comm.rank == root else None
+    comm.Barrier()
+    comm.Gatherv((partial_result, MPI.DOUBLE),
+                 (result, (size, offset), MPI.DOUBLE),
+                 root=root)
+    comm.Barrier()
+    return result
+
+
+class MPIMapper(object):
+
+    @staticmethod
+    def start_worker(problem):
+        global _problem
+        _problem = problem
+        from mpi4py import MPI
+        root = 0
+        # If master, then return to main program
+        if MPI.COMM_WORLD.rank == root:
+            return
+        # If slave, then set problem and wait in map loop
+        #_MPI_set_problem(MPI.COMM_WORLD, None, root=root)
+        try:
+            while True:
+                _MPI_map(MPI.COMM_WORLD, None, root=root)
+        except StopIteration:
+            pass
+        MPI.Finalize()
+        sys.exit(0)
+
+    @staticmethod
+    def start_mapper(problem, modelargs):
+        # Slave started from start_worker, so it never gets here
+        # Slave expects _MPI_set_problem followed by a series
+        # of map requests
+        from mpi4py import MPI
+        #_MPI_set_problem(MPI.COMM_WORLD, problem)
+        return lambda points: _MPI_map(MPI.COMM_WORLD, points)
+
+    @staticmethod
+    def stop_mapper(mapper):
+        from mpi4py import MPI
+        import numpy as np
+        # Send an empty point list to stop the iteration
+        try:
+            mapper(np.empty((0, 0), 'd'))
+            raise RuntimeException("expected StopIteration")
+        except StopIteration:
+            pass
+        MPI.Finalize()
+
+
+class AMQPMapper(object):
+
+    @staticmethod
+    def start_worker(problem):
+        #sys.stderr = open("bumps-%d.log"%os.getpid(),"w")
+        #print >>sys.stderr,"worker is starting"; sys.stdout.flush()
+        from amqp_map.config import SERVICE_HOST
+        from amqp_map.core import connect, start_worker as serve
+        server = connect(SERVICE_HOST)
+        #os.system("echo 'serving' > /tmp/map.%d"%(os.getpid()))
+        # print "worker is serving"; sys.stdout.flush()
+        serve(server, "bumps", problem.nllf)
+        #print >>sys.stderr,"worker ended"; sys.stdout.flush()
+
+    @staticmethod
+    def start_mapper(problem, modelargs):
+        import sys
+        import multiprocessing
+        import subprocess
+        from amqp_map.config import SERVICE_HOST
+        from amqp_map.core import connect, Mapper
+
+        server = connect(SERVICE_HOST)
+        mapper = Mapper(server, "bumps")
+        cpus = multiprocessing.cpu_count()
+        pipes = []
+        for _ in range(cpus):
+            cmd = [sys.argv[0], "--worker"] + modelargs
+            # print "starting",sys.argv[0],"in",os.getcwd(),"with",cmd
+            pipe = subprocess.Popen(cmd, universal_newlines=True,
+                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            pipes.append(pipe)
+        for pipe in pipes:
+            if pipe.poll() > 0:
+                raise RuntimeError("subprocess returned %d\nout: %s\nerr: %s"
+                    % (pipe.returncode, pipe.stdout, pipe.stderr))
+        #os.system(" ".join(cmd+["&"]))
+        import atexit
+
+        def exit_fun():
+            for p in pipes:
+                p.terminate()
+        atexit.register(exit_fun)
+
+        # print "returning mapper",mapper
+        return mapper
+
+    @staticmethod
+    def stop_mapper(mapper):
+        for pipe in mapper.pipes:
+            pipe.terminate()
diff --git a/bumps/monitor.py b/bumps/monitor.py
new file mode 100644
index 0000000..a05124d
--- /dev/null
+++ b/bumps/monitor.py
@@ -0,0 +1,123 @@
+# This program is in the public domain
+"""
+Progress monitors.
+
+Process monitors accept a :class:`bumps.history.History` object each cycle
+and perform some sort of work.
+"""
+from __future__ import print_function
+
+__all__ = ['Monitor', 'Logger', 'TimedUpdate']
+
+from numpy import inf
+
+
+class Monitor(object):
+    """
+    Base class for monitors.
+    """
+    def config_history(self, history):
+        """
+        Indicate which fields are needed by the monitor and for what duration.
+        """
+        pass
+
+    def __call__(self, history):
+        """
+        Give the monitor a new piece of history to work with.
+        """
+        pass
+
+
+def _getfield(history, field):
+    """
+    Return the last value in the trace, or None if there is no
+    last value or no trace.
+    """
+    trace = getattr(history, field, [])
+    try:
+        return trace[0]
+    except IndexError:
+        return None
+
+
+class Logger(Monitor):
+    """
+    Keeps a record of all values for the desired fields.
+
+    *fields* is a list of history fields to store.
+
+    *table* is an object with a *store(field=value,...)* method, which gets
+    the current value of each field every time the history is updated.
+
+    Call :meth:`config_history` with the :class:`bumps.history.History`
+    object before starting so that the correct fields are stored.
+    """
+    def __init__(self, fields=(), table=None):
+        self.fields = fields
+        self.table = table
+
+    def config_history(self, history):
+        """
+        Make sure history records each logged field.
+        """
+        kwargs = dict((key, 1) for key in self.fields)
+        history.requires(**kwargs)
+
+    def __call__(self, history):
+        """
+        Record the next piece of history.
+        """
+        record = dict((f, _getfield(history, f)) for f in self.fields)
+        self.table.store(**record)
+
+
+class TimedUpdate(Monitor):
+    """
+    Indicate progress every n seconds.
+
+    The process should provide time, value, point, and step to the
+    history update. Call :meth:`config_history` with the
+    :class:`bumps.history.History` object before starting so that
+    these fields are stored.
+
+    *progress* is the number of seconds to go before showing progress, such
+    as time or step number.
+
+    *improvement* is the number of seconds to go before showing
+    improvements to value.
+
+    By default, the updater only prints step number and improved value.
+    Subclass TimedUpdate with replaced :meth:`show_progress` and
+    :meth:`show_improvement` to trigger GUI updates or show parameter
+    values.
+    """
+    def __init__(self, progress=60, improvement=5):
+        self.progress_delta = progress
+        self.improvement_delta = improvement
+        self.progress_time = -inf
+        self.improvement_time = -inf
+        self.value = inf
+        self.improved = False
+
+    def config_history(self, history):
+        history.requires(time=1, value=1, point=1, step=1)
+
+    def show_improvement(self, history):
+        print("step", history.step, "value", history.value)
+
+    def show_progress(self, history):
+        pass
+
+    def __call__(self, history):
+        t = history.time[0]
+        v = history.value[0]
+        if v < self.value:
+            self.improved = True
+        if t > self.progress_time + self.progress_delta:
+            self.progress_time = t
+            self.show_progress(history)
+        if self.improved and t > self.improvement_time + self.improvement_delta:
+            self.improved = False
+            self.improvement_time = t
+            self.show_improvement(history)
diff --git a/bumps/mono.py b/bumps/mono.py
new file mode 100644
index 0000000..c4d0663
--- /dev/null
+++ b/bumps/mono.py
@@ -0,0 +1,115 @@
+"""
+Monotonic spline modeling.
+"""
+
+from __future__ import division
+
+__all__ = ['monospline', 'hermite', 'count_inflections', 'plot_inflections']
+
+import numpy as np
+from numpy import (diff, hstack, sqrt, searchsorted, asarray,
+                   nonzero, linspace, isnan)
+
+
+def monospline(x, y, xt):
+    r"""
+    Monotonic cubic hermite interpolation.
+
+    Returns $p(x_t)$ where $p(x_i)= y_i$ and $p(x) \leq p(x_i)$
+    if $y_i \leq y_{i+1}$ for all $y_i$.  Also works for decreasing
+    values $y$, resulting in decreasing $p(x)$.  If $y$ is not monotonic,
+    then $p(x)$ may peak higher than any $y$, so this function is not
+    suitable for a strict constraint on the interpolated function when
+    $y$ values are unconstrained.
+
+    http://en.wikipedia.org/wiki/Monotone_cubic_interpolation
+    """
+    with np.errstate(all='ignore'):
+        x = hstack((x[0] - 1, x, x[-1] + 1))
+        y = hstack((y[0], y, y[-1]))
+        dx = diff(x)
+        dy = diff(y)
+        dx[abs(dx) < 1e-10] = 1e-10
+        delta = dy / dx
+        m = (delta[1:] + delta[:-1]) / 2
+        m = hstack((0, m, 0))
+        alpha, beta = m[:-1] / delta, m[1:] / delta
+        d = alpha ** 2 + beta ** 2
+
+        # print "ma",m
+        for i in range(len(m) - 1):
+            if isnan(delta[i]):
+                m[i] = delta[i + 1]
+            elif dy[i] == 0 or alpha[i] == 0 or beta[i] == 0:
+                m[i] = m[i + 1] = 0
+            elif d[i] > 9:
+                tau = 3. / sqrt(d[i])
+                m[i] = tau * alpha[i] * delta[i]
+                m[i + 1] = tau * beta[i] * delta[i]
+                # if isnan(m[i]) or isnan(m[i+1]):
+                #    print i,"isnan",tau,d[i], alpha[i],beta[i],delta[i]
+            # elif isnan(m[i]):
+            #    print i,"isnan",delta[i],dy[i]
+                #m[ dy[1:]*dy[:-1]<0 ] = 0
+        # if np.any(isnan(m)|isinf(m)):
+        #    print "mono still has bad values"
+        #    print "m",m
+        #    print "delta",delta
+        #    print "dx,dy",list(zip(dx,dy))
+        #    m[isnan(m)|isinf(m)] = 0
+
+    return hermite(x, y, m, xt)
+
+
+def hermite(x, y, m, xt):
+    """
+    Computes the cubic hermite polynomial $p(x_t)$.
+
+    The polynomial goes through all points $(x_i,y_i)$ with slope
+    $m_i$ at the point.
+    """
+    with np.errstate(all='ignore'):
+        x, y, m, xt = [asarray(v, 'd') for v in (x, y, m, xt)]
+        idx = searchsorted(x[1:-1], xt)
+        h = x[idx + 1] - x[idx]
+        h[h <= 1e-10] = 1e-10
+        s = (y[idx + 1] - y[idx]) / h
+        v = xt - x[idx]
+        c3, c2, c1, c0 = ((m[idx] + m[idx + 1] - 2 * s) / h ** 2,
+                          (3 * s - 2 * m[idx] - m[idx + 1]) / h,
+                          m[idx],
+                          y[idx])
+    return ((c3 * v + c2) * v + c1) * v + c0
+
+
+# TODO: move inflection point code to data.py
+def count_inflections(x, y):
+    """
+    Count the number of inflection points in a curve.
+    """
+    with np.errstate(all='ignore'):
+        m = (y[2:] - y[:-2]) / (x[2:] - x[:-2])
+        b = y[2:] - m * x[2:]
+        delta = y[1:-1] - (m * x[1:-1] + b)
+        delta = delta[nonzero(delta)]  # ignore points on the line
+
+    sign_change = (delta[1:] * delta[:-1]) < 0
+    return sum(sign_change)
+
+
+def plot_inflections(x, y):
+    """
+    Plot inflection points in a curve.
+    """
+    m = (y[2:] - y[:-2]) / (x[2:] - x[:-2])
+    b = y[2:] - m * x[2:]
+    delta = y[1:-1] - (m * x[1:-1] + b)
+    t = linspace(x[0], x[-1], 400)
+    import pylab
+    ax1 = pylab.subplot(211)
+    pylab.plot(t, monospline(x, y, t), '-b', x, y, 'ob')
+    pylab.subplot(212, sharex=ax1)
+    delta_x = x[1:-1]
+    pylab.stem(delta_x, delta)
+    pylab.plot(delta_x[delta < 0], delta[delta < 0], 'og')
+    pylab.axis([x[0], x[-1], min(min(delta), 0), max(max(delta), 0)])
diff --git a/bumps/mystic/__init__.py b/bumps/mystic/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/bumps/mystic/condition.py b/bumps/mystic/condition.py
new file mode 100644
index 0000000..0b4b681
--- /dev/null
+++ b/bumps/mystic/condition.py
@@ -0,0 +1,249 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Conditional expression manager
+
+Conditional expressions are formed from combinations of
+and (&), or (|), exclusive or (^) and not  (!), as well
+as primitive tests.   The resulting expressions can be
+evaluated on particular inputs, with the inputs passed
+down to the primitive tests.  Primitive tests should be
+subclassed from Condition, with the __call__  method
+defined to return the truth value of the condition
+given the inputs.
+
+For example, the following creates a test whether a
+number is in the open interval (0,1)::
+
+    >>> class gt(Condition):
+    ...    def __init__(self, base): self.base = base
+    ...    def __call__(self, test): return test>self.base
+    ...    def __str__(self): return "x>%g"%self.base
+    >>> class lt(Condition):
+    ...    def __init__(self, base): self.base = base
+    ...    def __call__(self, test): return test<self.base
+    ...    def __str__(self): return "x<%g"%self.base
+    >>> test = lt(1) & gt(0)
+    >>> print(test)
+    (x<1 and x>0)
+    >>> test(0.5)
+    True
+    >>> test(1)
+    False
+
+No assumptions are made about the structure of the arguments
+to the primitives, but all primitives in an expression should
+accept the same arguments.
+
+The constants true and false are predefined as primitives
+which can take any arguments::
+
+    >>> true()
+    True
+    >>> false("this",string="ignored")
+    False
+
+You can find the individual terms of the expression using
+the method primitives:
+
+    >>> (true&false).primitives() == set([true,false])
+    True
+
+In many instances you not only want to know that an expression
+is true or false, but why it is true or false. The status method
+on expressions does this, returning not only the truth status as
+a boolean, but also a list of primitives and Not(primitives)
+saying which conditions are responsible.  For example::
+
+    >>> class iters(Condition):
+    ...    def __init__(self, base): self.base = base
+    ...    def __call__(self, state): return state['iters']>self.base
+    ...    def __str__(self): return "iters>%d"%self.base
+    >>> class stepsize(Condition):
+    ...    def __init__(self, base): self.base = base
+    ...    def __call__(self, state): return state['stepsize']<self.base
+    ...    def __str__(self): return "stepsize<%g"%self.base
+    >>> converge = stepsize(0.001)
+    >>> diverge = iters(100)
+    >>> result,why = converge.status(dict(stepsize=21.2,iters=20))
+    >>> print("%s %s"%(result, ", ".join(str(c) for c in why)))
+    False not stepsize<0.001
+    >>> result,why = diverge.status(dict(stepsize=21.2,iters=129))
+    >>> print("%s %s"%(result, ", ".join(str(c) for c in why)))
+    True iters>100
+
+Note that status will be less efficient than direct evaluation
+because it has to test all branches and construct the result list.
+Normally the And and Or calculations an short circuit, and only
+compute what they need to guarantee the resulting truth value.
+"""
+
+class Condition(object):
+    """
+    Condition abstract base class.
+
+    Subclasses should define __call__(self, *args, **kw)
+    which returns True if the condition is satisfied, and
+    False otherwise.
+    """
+    def __and__(self, condition):
+        return And(self, condition)
+    def __or__(self, condition):
+        return Or(self, condition)
+    def __xor__(self, condition):
+        return Xor(self, condition)
+    def __invert__(self):
+        return Not(self)
+    def __call__(self, *args, **kw):
+        raise NotImplementedError
+    def _negate(self):
+        return _Bar(self)
+    def status(self, *args, **kw):
+        """
+        Evaluate the condition, returning both the status and a list of
+        conditions.  If the status is true, then the conditions are those
+        that contribute to the true status.  If the status is false, then
+        the conditions are those that contribute to the false status.
+        """
+        stat = self.__call__(*args, **kw)
+        if stat:
+            return stat, [self]
+        else:
+            return stat, [Not(self)]
+    def primitives(self):
+        """
+        Return a list of terms in the condition.
+        """
+        return set([self])
+
+class And(Condition):
+    """
+    True if both conditions are satisfied.
+    """
+    def __init__(self, left, right):
+        self.left,self.right = left,right
+    def __call__(self, *args, **kw):
+        return self.left(*args, **kw) and self.right(*args, **kw)
+    def __str__(self):
+        return "(%s and %s)"%(self.left,self.right)
+    def _negate(self):
+        return Or(self.left._negate(),self.right._negate())
+    def status(self, *args, **kw):
+        lstat,lcond = self.left.status(*args, **kw)
+        rstat,rcond = self.right.status(*args, **kw)
+        if lstat and rstat:
+            return True,lcond+rcond
+        elif lstat:
+            return False,rcond
+        elif rstat:
+            return False,lcond
+        else:
+            return False,lcond+rcond
+    def primitives(self):
+        return self.left.primitives() | self.right.primitives()
+
+
+class Or(Condition):
+    """
+    True if either condition is satisfied
+    """
+    def __init__(self, left, right):
+        self.left,self.right = left,right
+    def __call__(self, *args, **kw):
+        return self.left(*args, **kw) or self.right(*args, **kw)
+    def __str__(self):
+        return "(%s or %s)"%(self.left,self.right)
+    def _negate(self):
+        return And(self.left._negate(),self.right._negate())
+    def status(self, *args, **kw):
+        lstat,lcond = self.left.status(*args, **kw)
+        rstat,rcond = self.right.status(*args, **kw)
+        if lstat and rstat:
+            return True,lcond+rcond
+        elif lstat:
+            return True,lcond
+        elif rstat:
+            return True,rcond
+        else:
+            return False,lcond+rcond
+    def primitives(self):
+        return self.left.primitives() | self.right.primitives()
+
+class Xor(Condition):
+    """
+    True if only one condition is satisfied
+    """
+    def __init__(self, left, right):
+        self.left,self.right = left,right
+    def __call__(self, *args, **kw):
+        l,r = self.left(*args, **kw),self.right(*args, **kw)
+        return (l or r) and not (l and r)
+    def __str__(self):
+        return "(%s xor %s)"%(self.left,self.right)
+    def _negate(self):
+        return Xor(self.left,self.right._negate())
+    def status(self, *args, **kw):
+        lstat,lcond = self.left.status(*args, **kw)
+        rstat,rcond = self.right.status(*args, **kw)
+        if lstat ^ rstat:
+            return True,lcond+rcond
+        else:
+            return False,lcond+rcond
+    def primitives(self):
+        return self.left.primitives() | self.right.primitives()
+
+class Not(Condition):
+    """
+    True if condition is not satisfied
+    """
+    def __init__(self, condition):
+        self.condition = condition
+    def __call__(self, *args, **kw):
+        return not self.condition(*args, **kw)
+    def __str__(self):
+        return "not "+str(self.condition)
+    def _negate(self):
+        return self.condition
+    def status(self, *args, **kw):
+        stat,cond = self.condition.status()
+        return not stat, cond
+    def __eq__(self, other):
+        return isinstance(other,Not) and self.condition == other.condition
+    def __ne__(self, other):
+        return not isinstance(other,Not) or self.condition != other.condition
+    def primitives(self):
+        return self.condition.primitives()
+
+class _Bar(Condition):
+    """
+    This is an internal condition structure created solely to handle
+    negated primitives when computing status.  It should not be used
+    externally.
+    """
+    def __init__(self, condition):
+        self.condition = condition
+    def __call__(self, *args, **kw):
+        return not self.condition(*args, **kw)
+    def _negate(self):
+        return self.condition
+    def status(self, *args, **kw):
+        stat,cond = self.condition.status(*args,**kw)
+        return not stat, cond
+    def __str__(self):
+        return "not "+str(self.condition)
+    def primitives(self):
+        return self.condition.primitives()
+
+class Constant(Condition):
+    """
+    Constants true and false.
+    """
+    def __init__(self, value):
+        self._value = value
+    def __call__(self, *args, **kw):
+        return self._value
+    def __str__(self):
+        return str(self._value)
+
+true = Constant(True)
+false = Constant(False)
diff --git a/bumps/mystic/examples/__init__.py b/bumps/mystic/examples/__init__.py
new file mode 100644
index 0000000..f5c55fb
--- /dev/null
+++ b/bumps/mystic/examples/__init__.py
@@ -0,0 +1,31 @@
+"""
+Sample models and functions prepared for use in mystic
+
+
+Functions
+=========
+
+    corana 1d,2d,3d,4d     -- Corana's function
+
+
+Models
+======
+
+    dense_circle     -- 2d array representation of a circle
+    sparse_circle
+    minimal_circle
+    decay            -- Bevington & Robinson's model of dual exponential decay
+
+"""
+
+# base classes
+#from .model import Fitness, Function
+
+# models
+#from .decay import decay
+#from .circle import dense_circle, sparse_circle, minimal_circle
+
+# functions
+#from .corana import corana1d, corana2d, corana3d, corana4d
+
+# end of file
diff --git a/bumps/mystic/examples/circle.py b/bumps/mystic/examples/circle.py
new file mode 100644
index 0000000..0b39bb8
--- /dev/null
+++ b/bumps/mystic/examples/circle.py
@@ -0,0 +1,86 @@
+"""
+2d array representation of a circle
+
+References::
+    None
+"""
+
+import numpy as np
+from numpy import arange
+from numpy import random, sin, cos, pi, inf, sqrt
+
+random.seed(123)
+
+def circle(coeffs,interval=0.02):
+    """
+    generate a 2D array representation of a circle of given coeffs
+    coeffs = (x,y,r)
+    """
+    xc,yc,r = coeffs
+    theta = arange(0, 2*pi, interval)
+    return r*cos(theta)+xc, r*sin(theta)+yc
+
+def genpoints(coeffs, npts=20):
+    """
+    Generate a 2D dataset of npts enclosed in circle of given coeffs,
+    where coeffs = (x,y,r).
+
+    NOTE: if npts is None, constrain all points to circle of given radius
+    """
+    xo,yo,R = coeffs
+    # Radial density varies as sqrt(x)
+    r,theta = sqrt(random.rand(npts)), 2*pi*(random.rand(npts))
+    x,y = r*cos(theta)+xo, r*sin(theta)+yo
+    return x,y
+
+def gendensity(coeffs,density=0.1):
+    xo,yo,R = coeffs
+    npts = int(0.2*pi*R**2)
+    return genpoints(coeffs,npts)
+
+from .model import Fitness
+class MinimumCircle(Fitness):
+    def __init__(self, data=None, limits=None, start=None):
+        self.x,self.y = data
+        self.dy = None
+        self.limits = limits
+        self.start = start
+
+    def _residuals(self, p):
+        x,y = self.x,self.y
+        xc,yc,r = p
+        d = sqrt((x-xc)**2 + (y-yc**2))
+        d[d<r] = 0
+        return d
+
+    def profile(self, p):
+        return circle(p)
+
+    def residuals(self, p):
+        """
+        Residuals is used by Levenburg-Marquardt, so fake the
+        penalty terms of the normal cost function for use here.
+        """
+        resid = self._residuals(p)
+        # Throw r in the residual so that it is minimized, punish the circle
+        # if there are too many points outside.
+        d = np.concatenate((resid,self.r,sum(resid>0)))
+        return d
+
+    def __call__(self, p):
+        xc,yc,r = p
+        resid = self._residuals(p)
+        # Penalties are the number
+        # Add additional penalties for each point outside the circle
+        return sum(resid>0) + sum(resid) + abs(r)
+
+# prepared instances
+Po = [0,0,1]
+Plo=[-inf,-inf,0]
+Phi=[inf,inf,inf]
+dense_circle = MinimumCircle(data=gendensity([3,4,20],density=5),
+                             limits=(Plo,Phi), start=Po)
+sparse_circle = MinimumCircle(data=gendensity([3,-4,32],density=0.5),
+                              limits=(Plo,Phi), start=Po)
+minimal_circle = MinimumCircle(data=gendensity([0,0,10],density=5),
+                               limits=(Plo,Phi), start=Po)
diff --git a/bumps/mystic/examples/corana.py b/bumps/mystic/examples/corana.py
new file mode 100644
index 0000000..cd303e8
--- /dev/null
+++ b/bumps/mystic/examples/corana.py
@@ -0,0 +1,63 @@
+"""
+Ccorana's function
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+from math import pow
+
+from numpy import sign, floor, inf
+
+def corana(coeffs):
+    """
+    evaluates the Corana function for a list of coeffs
+
+    minimum is f(x)=0.0 at xi=0.0
+    """
+    d = [1., 1000., 10., 100.]
+    x = coeffs
+    r = 0
+    for xj,dj in zip(x,d):
+        zj =  floor( abs(xj/0.2) + 0.49999 ) * sign(xj) * 0.2
+        if abs(xj-zj) < 0.05:
+            r += 0.15 * pow(zj - 0.05*sign(zj), 2) * dj
+        else:
+            r += dj * xj ** 2
+        return r
+
+
+def corana1d(x):
+    """Corana in 1D; coeffs = (x,0,0,0)"""
+    return corana([x[0], 0, 0, 0])
+
+def corana2d(x):
+    """Corana in 2D; coeffs = (x,0,y,0)"""
+    return corana([x[0], 0, x[1], 0])
+
+def corana3d(x):
+    """Corana in 3D; coeffs = (x,0,y,z)"""
+    return corana([x[0], 0, x[1], x[2]])
+
+Po = [1,1,1,1]
+Plo = [-inf,-inf,-inf,-inf]
+Phi = [inf,inf,inf,inf]
+
+def s(V,*args):
+    retval = []
+    for i in args: retval.append(V[i])
+    return retval
+from .model import Function
+corana1d = Function(f=corana1d, limits=(s(Plo,0),s(Phi,0)),
+                    start=s(Po,0))
+corana2d = Function(f=corana2d, limits=(s(Plo,0,2),s(Phi,0,2)),
+                    start=s(Po,0,2))
+corana3d = Function(f=corana3d, limits=(s(Plo,0,2,3),s(Phi,0,2,3)),
+                    start=s(Po,0,2,3))
+corana4d = Function(f=corana, limits=(Plo,Phi), start=Po)
+# End of file
diff --git a/bumps/mystic/examples/decay.py b/bumps/mystic/examples/decay.py
new file mode 100644
index 0000000..761c6be
--- /dev/null
+++ b/bumps/mystic/examples/decay.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+"""
+Bevington & Robinson's model of dual exponential decay
+
+References::
+    [5] Bevington & Robinson (1992).
+    Data Reduction and Error Analysis for the Physical Sciences,
+    Second Edition, McGraw-Hill, Inc., New York.
+"""
+
+import numpy as np
+from numpy import exp, inf, sqrt
+
+def dual_exponential(t,a):
+    """
+    Computes dual exponential decay.
+
+        y = a1 + a2 exp(-t/a3) + a4 exp(-t/a5)
+    """
+    a1,a2,a3,a4,a5 = a
+    t = np.asarray(t)
+    return a1 + a2*exp(-t/a4) + a3*exp(-t/a5)
+
+# data from Chapter 8 of [5].
+data = np.array([[15, 775], [30, 479], [45, 380], [60, 302],
+[75, 185], [90, 157], [105,137], [120, 119], [135, 110],
+[150, 89], [165, 74], [180, 61], [195, 66], [210, 68],
+[225, 48], [240, 54], [255, 51], [270, 46], [285, 55],
+[300, 29], [315, 28], [330, 37], [345, 49], [360, 26],
+[375, 35], [390, 29], [405, 31], [420, 24], [435, 25],
+[450, 35], [465, 24], [480, 30], [495, 26], [510, 28],
+[525, 21], [540, 18], [555, 20], [570, 27], [585, 17],
+[600, 17], [615, 14], [630, 17], [645, 24], [660, 11],
+[675, 22], [690, 17], [705, 12], [720, 10], [735, 13],
+[750, 16], [765, 9], [780, 9], [795, 14], [810, 21],
+[825, 17], [840, 13], [855, 12], [870, 18], [885, 10]])
+
+
+x = data[:,0]
+y = data[:,1]
+dy = sqrt(data[:,1])
+Po = [1,1,1,1,1]
+Plo = [-inf,0,0,0,0]
+Phi = [inf,inf,inf,inf,inf]
+
+from .model import Fitness
+decay = Fitness(f=dual_exponential, data=(x,y,dy),
+                limits=(Plo,Phi), start=Po)
diff --git a/bumps/mystic/examples/model.py b/bumps/mystic/examples/model.py
new file mode 100644
index 0000000..5cc7cfd
--- /dev/null
+++ b/bumps/mystic/examples/model.py
@@ -0,0 +1,77 @@
+import numpy as np
+
+class Function(object):
+    def __init__(self, f=None, limits=None, start=None):
+        """
+        f=callable is the function
+        limits=(lo,hi) are the limits
+        start=vector is the start point
+        """
+        self.f, self.limits, self.start = f, limits, start
+        self.__name__ = f.__name__
+
+    def __call__(self,p):
+        return self.f(p)
+
+    def response_surface(self, p=None, dims=[0,1]):
+        if p is None: p = self.start
+        plot_response_surface(self, p, dims)
+
+class Fitness(object):
+    def __init__(self, f=None, data=None, limits=None, start=None):
+        self.f, self.limits, self.start = f, limits, start
+        if len(data) == 2:
+            self.x, self.y = data
+            self.dy = None
+        else:
+            self.x, self.y, self.dy = data
+
+    def profile(self, p):
+        return self.x, self.f(self.x, p)
+
+    def residuals(self, p):
+        return (self.profile(p) - self.y)/self.dy
+
+    def __call__(self, p):
+        return np.sum(self.residuals(p)**2)
+
+    def plot(self, p=None):
+        """
+        Plot a profile for the given p
+        """
+        import pylab
+        if self.dy is not None:
+            pylab.errorbar(self.x, self.y, yerr=self.dy, fmt='x')
+        else:
+            pylab.plot(self.x, self.y, 'x')
+        if p is None: p = self.start
+        x,y = self.profile(p)
+        pylab.plot(x,y)
+
+    def response_surface(self, p=None, dims=[0,1]):
+        if p is None: p = self.start
+        plot_response_surface(self, p, dims)
+
+def plot_response_surface(f, p, dims=[0,1]):
+    """
+    Plot a line or a slice around a point in a n-D function
+    """
+    import pylab
+    if len(dims) == 1:
+        xi = dims[0]
+        x = pylab.linspace(-10,10,40) - p[xi]
+        def value(v):
+            p[xi] = v
+            return f(p)
+        z = [value(v) for v in x]
+        pylab.plot(x,z)
+    else:
+        xi,yi = dims
+        x = pylab.linspace(-10,10,40) - p[xi]
+        y = pylab.linspace(-10,10,40) - p[yi]
+        def value(pt):
+            p[xi] = pt[0]
+            p[yi] = pt[1]
+            return f(p)
+        z = np.array([[value((v,w)) for v in x] for w in y])
+        pylab.pcolor(x,y,z)
diff --git a/bumps/mystic/examples/simple.py b/bumps/mystic/examples/simple.py
new file mode 100644
index 0000000..6ebeef8
--- /dev/null
+++ b/bumps/mystic/examples/simple.py
@@ -0,0 +1,2 @@
+def f(x):
+    return (x[0]-3)**2+(x[1]-5)**2
diff --git a/bumps/mystic/optimizer/__init__.py b/bumps/mystic/optimizer/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/bumps/mystic/optimizer/de.py b/bumps/mystic/optimizer/de.py
new file mode 100644
index 0000000..62d3dbb
--- /dev/null
+++ b/bumps/mystic/optimizer/de.py
@@ -0,0 +1,263 @@
+# This code is public domain
+# Author: Paul Kienzle
+
+## Differential Evolution Solver Class
+## Based on algorithms developed by Dr. Rainer Storn & Kenneth Price
+## Influenced by
+##    Lester E. Godwin (godwin at pushcopr.com)  1998: C++ version
+##    James R. Phillips (zunzun at zunzun.com)   2002: Python conversion
+##    Patrick Hung                            2006: cleanup
+##    Mike McKerns (mmckerns at caltech.edu)     2008: parallel version, bounds
+##    Paul Kienzle                            2009: rewrite
+
+"""
+Differential evolution optimizer.
+
+This module contains a collection of optimization routines based on
+Storn and Price's differential evolution algorithm.
+
+Minimal function interface to optimization routines::
+
+    x = diffev(f,xo)
+
+Stepping interface::
+
+    DifferentialEvolution
+
+
+References
+==========
+
+[1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+Optimization 11: 341-359, 1997.
+
+[2] Price, K., Storn, R., and Lampinen, J. - Differential Evolution,
+A Practical Approach to Global Optimization. Springer, 1st Edition, 2005
+
+"""
+
+# Symbols required for simple interface
+__all__ = ['de','stop']
+
+import numpy as np
+
+from .. import stop
+from .. import solver
+from ..util import choose_without_replacement
+
+CROSSOVER = 'c_exp','c_bin'
+MUTATE = 'best1','best1u','best2','randtobest1','rand1','rand2'
+
+#############################################################################
+#  Code below are the different crossovers/mutation strategies
+#############################################################################
+def c_exp(ndim, CR):
+    """
+    Select a sequence of dimensions.
+
+    The length of the sequence follows the geometric distribution for 1-CR.
+
+    This is equivalent to flipping the first heads after n flips with
+    a weighted coin.
+
+    The sequence starts at a random position and wraps if necessary.
+    """
+    # Note: this is different from Patrick Hung's version in that it forces
+    # at least one success.
+    L = min(abs(np.random.geometric(1-CR)),ndim)
+    idx = np.zeros(ndim,'bool')
+    n = np.random.randint(ndim)
+    idx[np.arange(n,n+L)%ndim] = True
+    return idx
+
+def c_bin(ndim, CR):
+    """
+    Select random dimensions.
+
+    The probability of selecting any dimension is CR.  At least one dimension
+    will be selected.
+    """
+    n = np.random.randint(ndim)
+    idx = np.random.rand(ndim) < CR
+    idx[n] = True
+    return idx
+
+
+def best1(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = best + F*(r1-r2)
+    """
+    r1,r2 = _candidates(pop, 2, exclude=idx)
+    return best[dims] + F*(r1[dims]-r2[dims])
+
+def best1u(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = best + U*(r1-r2),  U ~ Uniform[0,F]
+    """
+    r1,r2 = _candidates(pop,2,exclude=idx)
+    return best[dims] + F*np.random.rand()*(r1[dims]-r2[dims])
+
+def best2(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = best + F*(r1+r2-r3-r4)
+    """
+    r1,r2,r3,r4 = _candidates(pop, 4, exclude=idx)
+    return best[dims] + F*(r1[dims]+r2[dims]-r3[dims]-r4[dims])
+
+def randtobest1(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = F*(best-old + r1-r2)
+    """
+    r1,r2 = _candidates(pop,2,exclude=idx)
+    return F*(best[dims]-pop[idx][dims] + r1[dims]-r2[dims])
+
+def rand1(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = r0 + F*(r1-r2)
+    """
+    r0,r1,r2 = _candidates(pop, 3, exclude=idx)
+    return r0[dims] + F*(r1[dims]-r2[dims])
+
+def rand1u(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = r0 + U*(r1-r2), U ~ Uniform[0,F]
+    """
+    r0,r1,r2 = _candidates(pop, 3, exclude=idx)
+    return r0[dims] + F*np.random.rand()*(r1[dims]-r2[dims])
+
+def rand2(F, best, pop, idx, dims):
+    """
+    Differential evolution mutation T = r0 + F*(r1+r2-r3-r4)
+    """
+    r0,r1,r2,r3,r4 = _candidates(pop, 5, exclude=idx)
+    return r0[dims] + F*(r1[dims]+r2[dims]-r3[dims]-r4[dims])
+
+############################################################
+
+
+def _candidates(pop, n, exclude=None):
+    """
+    Select *n* random candidates from *pop*, not including the
+    candidate at index *exclude*.
+    """
+    selection = choose_without_replacement(len(pop)-1, n)
+    selection[selection>=exclude] += 1
+    return pop[selection]
+
+##########################################################################
+
+class DifferentialEvolution(solver.Strategy):
+    """
+    Differential evolution optimization.
+
+    *CR*  float in [0-1]
+        Crossover rate.
+    *F* float in (0,inf)
+        Crossover step size.
+    *npop* float
+        The size of the population is npop times the number of dimensions
+        in the problem.
+    *crossover* func(ndim, CR) -> index vector
+        Crossover selection.  Returns the index vector of dimensions which
+        should be mutated.
+    *mutate* fn(F, best, pop, idx, dims) -> new[dims]
+        Mutation strategy.  Selects the crossover population members and
+        returns the mutated portion of the trial point in the new population.
+        *F* is the scale factor, *best* is the best point seen so far, *pop*
+        is the current population, *idx* is the vector being updated and
+        *dims* is the set of dimensions to update.
+
+    Available crossover functions (block is default)::
+
+        c_exp:  start at dimension n and continue until U[0,1] >= CR
+        c_bin:  select dimension i if U[0,1] >= CR
+
+    Available mutation functions (best1u is default)::
+
+        best1u: T = best + U(F)*(r1-r2),  U(F) ~ Uniform in [0,F]
+        best1:  T = best + F*(r1-r2)
+        best2:  T = best + F*(r1+r2-r3-r4)
+        randtobest1:  T = F*(best-old) + F*(r1-r2)
+        rand1:  T = r0 + F*(r1-r2)
+        rand2:  T = r0 + F*(r1+r2-r3-r4)
+    """
+    requires = [('mystic','0.9')]
+
+    def __init__(self, CR=0.5, F=2.0, npop=3,
+                 crossover=c_exp, mutate=best1u):
+        self.crossover = crossover
+        self.mutate = mutate
+        self.CR, self.F = CR, F
+        self.npop = npop
+
+    def default_termination_conditions(self, problem):
+        success = stop.Cf(tol=1e-7,scaled=False)
+        #maxiter = 100
+        maxiter = len(problem.getp())*200
+        #maxfun  = self.npop*maxiter
+        failure = stop.Steps(maxiter)
+        return success,failure
+
+    def config_history(self, history):
+        """
+        Indicates how much history is required.
+        """
+        history.requires(value=1, population_points=2, population_values=2)
+
+    def start(self, problem):
+        """
+        Generate the initial population.
+
+        Returns a matrix *P* of points to be evaluated.
+        """
+        # Generate a random population
+        current = problem.getp()
+        ndim = len(current)
+        population = problem.randomize(int(self.npop * ndim))
+        population[0] = current
+
+        # Return the population
+        return population
+
+    def step(self, history):
+        """
+        Generate the next population.
+
+        Returns a matrix *P* of points to be evaluated.
+
+        *history* contains the previous history of the computation,
+        including any fields placed by :meth:`update`.
+        """
+
+        best = history.point[0]
+        pop = history.population_points[0]
+        pop_size,ndim = pop.shape
+
+        trial = pop.copy()
+        for idx,vec in enumerate(trial):
+            dims = self.crossover(ndim, self.CR)
+            vec[dims] = self.mutate(self.F, best, pop, idx, dims)
+        return trial
+
+    def update(self, history):
+        """
+        Update population, keeping old points that are better than
+        the trial points.
+        """
+        #print "result",history.step[0]
+        #for i,v in enumerate(history.population_values[0]):
+        #    print history.population_points[0][i],'=',v
+        if len(history.population_points) > 1:
+            oldpop = history.population_points[1]
+            oldval = history.population_values[1]
+            newpop = history.population_points[0]
+            newval = history.population_values[0]
+
+            worse = newval > oldval
+            newpop[worse] = oldpop[worse]
+            newval[worse] = oldval[worse]
+
+#minimizer_function(strategy=DifferentialEvolution,
+#                   success=stop.Df(1e-5,n=10),
+#                   failure=stop.Steps(100))
diff --git a/bumps/mystic/optimizer/diffev_compat.py b/bumps/mystic/optimizer/diffev_compat.py
new file mode 100644
index 0000000..b605cd3
--- /dev/null
+++ b/bumps/mystic/optimizer/diffev_compat.py
@@ -0,0 +1,144 @@
+from __future__ import print_function
+
+from ..solver import Minimizer
+from ...parameter import Parameter
+from .. import stop
+
+from .de import DifferentialEvolution, best1
+
+class Function(object):
+    def __init__(self, f, ndim=None, po=None, bounds=None, args=()):
+        if bounds is not None and po is not None:
+            self.parameters = [Parameter(value=v,bounds=b)
+                               for v,b in zip(po,bounds)]
+        elif bounds is not None:
+            self.parameters = [Parameter(b) for b in bounds]
+        elif po is not None:
+            self.parameters = [Parameter(v) for v in po]
+        elif ndim is not None:
+            self.parameters = [Parameter() for _ in range(ndim)]
+        else:
+            raise TypeError("Need ndim, po or bounds to get problem dimension")
+        if ((ndim is not None and ndim != len(self.parameters))
+            or (po is not None and len(po) != len(self.parameters))
+            or (bounds is not None and len(bounds) != len(self.parameters))):
+            raise ValueError("Inconsistent dimensions for ndim, po and bounds")
+        if po is None:
+            po = [p.start_value() for p in self.parameters]
+
+        self.f = f
+        self.bounds = bounds
+        self.po = po
+        self.args = args
+
+    def guess(self):
+        if self.po is not None:
+            return self.po
+        else:
+            return [p.start_value() for p in self.parameters]
+    def __call__(self, p):
+        return self.f(p, *self.args)
+
+
+def diffev(func, x0=None, npop=10, args=(), bounds=None,
+           ftol=5e-3, gtol=None,
+           maxiter=None, maxfun=None, CR=0.9, F=0.8,
+           full_output=0, disp=1, retall=0, callback=None):
+    """\
+Minimize a function using differential evolution.
+
+Inputs::
+
+    *func* -- the callable function to be minimized.
+    *x0* -- the initial guess, or none for entirely random population.
+    *npop* -- points per dimension.  Population size is npop*nparameters.
+
+Additional Inputs::
+
+    *args* -- extra arguments for func.
+    *bounds* -- list of bounds (min,max), one pair for each parameter.
+    *ftol* -- acceptable relative error in func(xopt) for convergence.
+    *gtol* -- maximum number of iterations to run without improvement.
+    *maxiter* -- the maximum number of iterations to perform.
+    *maxfun* -- the maximum number of function evaluations.
+    *CR* -- the probability of cross-parameter mutations
+    *F* -- multiplier for impact of mutations on trial solution.
+    *full_output* -- non-zero if fval and warnflag outputs are desired.
+    *disp* -- non-zero to print convergence messages.
+    *retall* -- non-zero to return list of solutions at each iteration.
+    *callback* -- function(xk) to call after each iteration.
+
+Returns:: (xopt, {fopt, iter, funcalls, status}, [allvecs])
+
+    *xopt* -- the best point
+    *fopt* -- value of function at the best point: fopt = func(xopt)
+    *iter* -- number of iterations
+    *funcalls* -- number of function calls
+    *status* -- termination status
+        0 : Function converged within tolerance.
+        1 : Maximum number of function evaluations.
+        2 : Maximum number of iterations.
+    *allvecs* -- a list of solutions at each iteration
+"""
+
+    problem = Function(f=func, args=(), po=x0, bounds=None)
+    strategy = DifferentialEvolution(CR=CR, F=F, npop=npop, mutate=best1)
+    ndim = len(x0)
+
+    # Determine success and failure conditions
+    if gtol: # Improvement window specified
+        # look for ftol improvement over gtol generations
+        success = stop.Df(tol=ftol,n=gtol,scaled=False)
+    else:
+        # look for f < ftol.
+        success = stop.Cf(tol=ftol,scaled=False)
+    if maxiter is None: maxiter = ndim*100
+    if maxfun is None: maxfun = npop*maxiter
+    failure = stop.Calls(maxfun)|stop.Steps(maxiter)
+
+    monitors = []
+    #if callback is not None:
+    #    monitors.append(CallbackMonitor(callback))
+    #if retall:
+    #    population_monitor = StepMonitor('population_values')
+    #    monitors.append(population_monitor)
+    minimize = Minimizer(problem=problem, strategy=strategy,
+                         monitors=monitors, success=success, failure=failure)
+
+    # Preserve history for output (must be after solver.reset())
+    hist = minimize.history
+    hist.requires(point=1,value=1,calls=1,step=1)
+
+
+    # Run the solver
+    minimize()
+
+
+    # Generate return values (must be after call to minimize)
+    if hist.calls[0] > maxfun:
+        status = 1
+        msg = "Warning: Maximum number of function evaluations exceeded."
+    elif hist.step[0] > maxiter:
+        status = 2
+        msg = "Warning: Maximum number of iterations exceeded."
+    else:
+        status = 0
+        msg = """Optimization terminated successfully.
+    Best point: %s
+    Best value: %g
+    Iterations: %d
+    Function evaluations: %d"""%(hist.point[0],hist.value[0],
+                                 hist.step[0],hist.calls[0])
+    if disp: print(msg)
+    if not full_output and not retall:
+        ret = hist.point[0]
+    elif full_output:
+        ret = (hist.point[0], hist.value[0],
+            hist.step[0], hist.calls[0], status)
+        if retall:
+            raise NotImplementedError("retall not implemented")
+            #ret += (population_monitor.population_points,)
+    else:
+        raise NotImplementedError("retall not implemented")
+        #ret = hist.point[0], population_monitor.population_points
+    return ret
diff --git a/bumps/mystic/solver.py b/bumps/mystic/solver.py
new file mode 100644
index 0000000..3327f02
--- /dev/null
+++ b/bumps/mystic/solver.py
@@ -0,0 +1,331 @@
+"""
+Generic minimizers
+
+The general optimization algorithm is as follows::
+
+    fit = Minimizer(problem=Problem(),
+                 strategy=Strategy(),
+                 monitors=[],
+                 success=Df(1e-5) & Dx(1e-5),
+                 failure=Calls(10000) | Steps(100))
+    population = fit.start()
+    while True:
+        result = list(map(fit.problem, population))
+        fit.update(population, result)
+        if fit.isdone(): break
+        population = fit.step()
+
+    if fit.successful:
+        print "Converged"
+    else:
+        print "Stopped with",", ".join(str(c) for c in fit.failure_cond)
+
+Variations are possible, such as the multistep process where the
+program first generates an initial population, then each time it
+is run generates an updated population based on the results of
+submitting the previous population to a batch queue.
+
+History traces
+==============
+
+Stopping conditions use history traces to evaluate whether the program
+should continue.  When adding new stopping conditions, the programmer
+must define a config_history method to indicate which values are needed.
+For example::
+
+    def config_history(self, history):
+        history.requires(points=3)
+
+When checking convergence, the programmer must still check that enough
+history is available for the test. For example,
+::
+
+    def __call__(self, history):
+        from numpy.linalg import norm
+        if len(history.value) < 3: return False
+        return norm(history.value[0] - history.value[2]) < 0.001
+
+The optimizer will make sure that config_history is called for each
+condition before the fit starts::
+
+    for t in success.primitives()|failure.primitives():
+        t.config_history()
+
+Each optimizer can control which values it wants to monitor.  For
+consistency and ease of use, monitor names should be chosen from
+the standard names below.  As new optimizers are created, the
+list of standard names may expand.
+
+Fixed properties of history::
+
+    *ndim* (int)
+         problem dimension
+    *lower_bound*, *upper_bound* ([float])
+         bounds constraints
+
+Accumulated properties::
+
+    *step* (int)
+         iteration number
+    *calls* (int)
+        cumulative number of function evaluations
+    *time* (seconds)
+        cumulative wall clock time
+    *cpu_time* (seconds)
+        cumulative CPU time
+    *value* (float)
+        best function value
+    *point* (vector)
+        parameter values for the best function value
+    *gradient* (vector)
+        del f: gradient at best point (if available)
+    *hessian* (matrix)
+        del**2 F: Hessian at best point (if available)
+    *population_values* (vector)
+        function values of the current population (if available)
+    *population_points* (matrix)
+        parameter values for the current population (if available)
+"""
+
+# TODO: Ordered fits
+#
+# Want a list of strategies and a parameter subset associated with each
+# strategy.  The fit is nested, with the outermost parameters are set
+# to a particular value, then the inner parameters are optimized for
+# those values before an alternative set of outer parameters is considered.
+#
+# Will probably want the inner strategy to be some sort of basin hopping
+# method with the new value jumping around based on the best value for
+# those parameters.  This is complicated because the first fit should be
+# done using a more global method.
+#
+# In the context of simultaneous fitting, it would be natural to partition
+# the parameters so that the dependent parameters of the slowest models
+# are varied last.  This can be done automatically if each model has
+# a cost estimate associated with it.
+#
+# TODO: Surrogate models
+#
+# Want to use surrogate models for the expensive models when far from the
+# minimum, only calculating the real model when near the minimum.
+
+from __future__ import print_function
+
+import time
+import os
+
+import numpy as np
+
+def default_mapper(f, x):
+    return list(map(f, x))
+
+def cpu_time():
+    """Current cpu time for this process"""
+    user_time,sys_time,_,_,_ = os.times()
+    return user_time+sys_time
+
+class Minimizer:
+    """
+    Perform a minimization.
+    """
+    def __init__(self, problem=None, strategy=None, monitors=[],
+                 history=None, success=None, failure=None):
+        self.problem = problem
+        self.strategy = strategy
+        # Ask strategy to fill in the default termination conditions
+        # in case the user doesn't supply them.
+        defsucc, deffail = strategy.default_termination_conditions(problem)
+        self.success = success if success is not None else defsucc
+        self.failure = failure if failure is not None else deffail
+        self.monitors = monitors
+        self.history = history
+        self.reset()
+
+    def minimize(self, mapper=default_mapper, abort_test=None, resume=False):
+        """
+        Run the solver to completion, returning the best point.
+
+        Note: only used stand-alone, not within fit service
+        """
+        self.time = time.time()
+        self.remote_time = -cpu_time()
+        population = self.step() if resume else self.start()
+        try:
+            while True:
+                result = mapper(self.problem, population)
+                #print "map result",result
+                self.update(population, result)
+                #print self.history.step, self.history.value
+                if self.isdone(): break         #STOPHERE combine
+                if abort_test is not None and abort_test(): break
+                population = self.step()
+        except KeyboardInterrupt:
+            pass
+        return self.history.point[0]
+
+    __call__ = minimize
+
+    def reset(self):
+        """
+        Clear the solver history.
+        """
+        self.history.clear()
+        self.history.provides(calls=1, time=1, cpu_time=1, step=1,
+                   point=1, value=1,
+                   population_points=0, population_values=0)
+        for c in self.success.primitives()|self.failure.primitives():
+            c.config_history(self.history)
+        for m in self.monitors:
+            m.config_history(self.history)
+        self.strategy.config_history(self.history)
+
+    def start(self):
+        """
+        Start the optimization but generating an initial population.
+        """
+        # Reset the timers so we know how long the fit takes.
+        # We are cheating by initializing remote_time to -cpu_time, then
+        # later adding cpu_time back to remote_time to get the total cost
+        # of local and remote computation.
+
+        if len(self.problem.getp()) == 0:
+            raise ValueError("Problem has no fittable parameters")
+
+        return self.strategy.start(self.problem)
+
+    def update(self, points, values):
+        """
+        Collect statistics on time and resources
+        """
+        if hasattr(values,'__cpu_time__'):
+            self.remote_time += values.__cpu_time__
+
+        # Locate the best member of the population
+        values = np.asarray(values)
+        #print("values",values,file=sys.stderr)
+
+        # Update the history
+        self.history.update(
+            time = time.time() - self.time,
+            cpu_time = cpu_time() + self.remote_time,
+            population_points = points,
+            population_values = values,
+        )
+        self.history.accumulate(step=1,calls=len(points))
+
+        self.strategy.update(self.history)
+
+        minidx = np.argmin(values)
+        self.history.update(
+            point = points[minidx],
+            value = values[minidx],
+        )
+
+        # Tell all the monitors that the history has been updated
+        for m in self.monitors:
+            m(self.history)
+
+    def step(self):
+        """
+        Generate the next population to evaluate.
+        """
+        return self.strategy.step(self.history)
+
+    def isdone(self):
+        """
+        Check if the fit has converged according to the criteria proposed
+        by the user and/or the optimizer.
+
+        Returns True if either the fit converged or is forced to stop for
+        some other reason.
+
+        Sets the following attributes::
+
+            *successful* (boolean)
+                True if the fit converged
+            *success_cond* ([Condition])
+                Reasons for success or lack of success
+            *failed* (boolean)
+                True if the fit should stop
+            *failure_cond* ([Condition])
+                Reasons for failure or lack of failure
+
+        Note that success and failure can occur at the same time if
+        for example the convergence criteria are met but the resource
+        limits were exceeded.
+        """
+        self.successful, self.success_cond = self.success.status(self.history)
+        self.failed, self.failure_cond = self.failure.status(self.history)
+        return self.successful or self.failed
+
+    def termination_condition(self):
+        if self.successful:
+            return "succeeded with "+", ".join(str(c) for c in self.success_cond)
+        else:
+            return ("failed with "+", ".join(str(c) for c in self.failure_cond)
+                    +" and "+", ".join(str(c) for c in self.success_cond))
+
+class Strategy:
+    """
+    Optimization strategy to use.
+
+    The doc string for the strategy will be used in the construction of
+    the doc strings for the simple optimizer interfaces, with a description
+    of the standard optimizer options at the end.   The following template
+    works well::
+
+        Name of optimization strategy
+
+        Brief description of the strategy
+
+        Optimizer parameters::
+
+            *argument* is the description of the argument
+
+        Additional details about the solver.
+
+    The __init__ arguments for the strategy will be passed in through
+    the simple optimizer interface.  Be sure to make them all keyword
+    arguments.
+    """
+    def config_history(self, history):
+        """
+        Specify which information needs to be preserved in history.
+
+        For example, parallel tempering needs to save its level values::
+
+            history.requires(levels=2)
+        """
+        pass
+
+    def update(self, history):
+        """
+        Update history with optimizer specific state information.
+
+        Note: standard history items (step, calls, value, point,
+        population_values, population_points, time, cpu_time) are
+        already recorded.  Additional items should be recorded
+        directly in the trace.  For example::
+
+            history.levels.put([1,2,3])
+        """
+        pass
+
+    def start(self, problem):
+        """
+        Generate the initial population.
+
+        Returns a matrix *P* of points to be evaluated.
+        """
+        raise NotImplementedError
+
+    def step(self, history):
+        """
+        Generate the next population.
+
+        Returns a matrix *P* of points to be evaluated.
+
+        *history* contains the previous history of the computation,
+        including any fields placed by :meth:`update`.
+        """
+        raise NotImplementedError
diff --git a/bumps/mystic/stop.py b/bumps/mystic/stop.py
new file mode 100644
index 0000000..8f48b0f
--- /dev/null
+++ b/bumps/mystic/stop.py
@@ -0,0 +1,874 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Termination conditions for solvers.
+
+In order to decide when to stop fitting, the user needs to specify
+stop conditions based on the optimizer history.  The test can use
+the most recent value, the last n values or the entire computation.
+See :mod:`monitor` for details.
+
+Conditions can be composed, creating complicated criterion for
+termination.  We may want to stop when we have found a minimum
+and know its location.  We may be content just knowing the
+minimum, and not worrying about the uncertainty in its location.
+For some searches we want to be sure we that we are examining a
+broad range of search space.  Here are some examples::
+
+    import mystic.termination as stop
+
+    # Stop when we know the location of the minimum; fail if run too long
+    success = stop.Dx(0.001) & stop.Df(5)
+    failure = stop.Steps(100)
+
+
+    # Stop when we know the value of the minimum; fail if run too long
+    success = stop.Dx(0.001) | stop.Df(5)
+    failure = stop.Steps(100)
+
+    # GA may want to run for a while, but only with a diverse population
+    success = stop.Df(0.001,n=5) & stop.Steps(15)
+    failure = stop.Steps(100) | stop.Dx(0.001)
+
+When testing a conditional expression, a list of all conditions which
+match is returned, or [] if no conditions match.
+
+
+Predefined Conditions
+---------------------
+
+Dx: difference in x for step size test
+
+    || (x_k - x_{k-1})/scale || < tolerance
+
+Df: difference in f for improvement rate test
+
+    | (f_k - f_{k-1})/scale | < tolerance
+
+Rx: range in x for population distribution test
+
+    max || (y - <y>)/ scale || < tolerance
+    for y in population
+
+Rf: range in f for value distribution test
+
+    (max f(y) - min f(y))/scale < tolerance
+    for y in population
+
+Cx: constant x for target point
+
+    ||(x_k - Z)/scale|| < tolerance
+
+Cf: constant f for target value
+
+    |f_k - A|/scale < tolerance
+
+Steps: specific number of iterations
+
+    k >= steps
+
+Calls: specific number of function calls
+
+    n >= calls
+
+Time: wall clock time
+
+    t_k >= time
+
+CPU: CPU time
+
+    t(CPU)_k >= time
+
+Worse: fit is diverging
+
+    (f_k - f_{k-1})/scale < -tolerance
+
+Grad: fit is flat
+
+    || del f_k || < tolerance
+
+Feasible: value is in the feasible region ** Not implemented **
+
+    f_k satisfies soft constraints
+
+Invalid: values are not well defined ** Not implemented **
+
+    isinf(y) or isinf(f(y)) or isnan(y) or isnan(f(y))
+    for y in population
+
+
+Distances and scaling
+=====================
+
+The following distance functions are predefined:
+
+    norm_p(p): (sum |x_i|^p )^(1/p)  (generalized p-norm)
+    norm_1:    sum |x_i|             (Manahattan distance)
+    norm_2:    sqrt sum |x_i|^2      (Euclidian distance)
+    norm_inf:  max |x_i|             (Chebychev distance)
+    norm_min:  min |x_i|             (not a true norm)
+
+
+The predefined scale factors in essence test for
+percentage changes rather than absolute changes.
+"""
+
+import math
+
+import numpy as np
+from numpy import inf, isinf
+
+from .condition import Condition
+
+# ==== Norms ====
+def norm_1(x):
+    """1-norm: sum(|x_i|)"""
+    return np.sum(abs(x))
+def norm_2(x):
+    """2-norm: sqrt(sum(|x_i|^2))"""
+    return math.sqrt(np.sum(abs(x)**2))
+def norm_inf(x):
+    """inf-norm: max(|x_i|)"""
+    return max(abs(x))
+def norm_min(x):
+    """min-norm: min(|x_i|); this is not a true norm"""
+    return min(abs(x))
+def norm_p(p):
+    """p-norm: sum(|x_i|^p)^(1/p)"""
+    if isinf(p):
+        if p < 0:
+            return norm_min
+        else:
+            return norm_inf
+    elif p == 1:
+        return norm_1
+    elif p == 2:
+        return norm_2
+    else:
+        return lambda x: np.sum(abs(x)**p)**(1/p)
+
+# ==== Conditions ====
+class Dx(Condition):
+    """
+    Improvement in x.
+
+    This condition measures the improvement over the last n iterations
+    in terms of how much the value of x has changed::
+
+        norm((x[k]- x[k-n])/scale) < tol
+
+    where x[k] is the best parameter set for iteration step k.
+
+    The scale factor to use if scaled is upper bound - lower bound
+    if the parameter is bounded, or 1/2 (|x[k]| + |x[k-n]|)
+    if the parameter is unbounded, with protection against a scale
+    factor of zero.
+
+    Parameters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *norm* ( f(vector): float  =  norm_2)
+            norm to use to measure the size of x.  Predefined norms
+            include norm_1, norm_2, norm_info, norm_min and norm_p(p)
+        *n* (int = 1)
+            number of steps back in history to compare
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0.001, norm=norm_2, n=1, scaled=True):
+        self.tol = tol
+        self.norm = norm
+        self.n = n
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        x1,x2 = history.point[0], history.point[self.n]
+        scale = history.upper_bound - history.lower_bound
+        scale[isinf(scale)] = ((abs(x1)+abs(x2))/2)[isinf(scale)]
+        scale[scale == 0] = 1
+        return self.norm((x2-x1)/scale)
+    def _raw_condition(self, history):
+        x1,x2 = history.point[0], history.point[self.n]
+        return self.norm(x2-x1)
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        if self.tol > 0:
+            history.requires(point=self.n+1)
+    def _subcall(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.point) < self.n+1:
+            return False  # Cannot succeed until at least n generations
+        elif self.scaled:
+            return self._scaled_condition(history)
+        else:
+            return self._raw_condition(history)
+    def __call__(self, history):
+        return self._subcall(history) < self.tol
+    def completeness(self, history):
+        return self._subcall(history)/self.tol
+    def __str__(self):
+        if self.scaled:
+            return "||(x[k] - x[k-%d])/range|| < %g"%(self.n,self.tol)
+        else:
+            return "||x[k] - x[k-%d]|| < %g"%(self.n,self.tol)
+
+
+class Df(Condition):
+    """
+    Improvement in F(x)
+
+    This condition measures the improvement over the last n iterations
+    in terms of how much the value of the function has changed::
+
+        | (F[k] - F[k-1])/scale | < tol
+
+    where F[k] is the value for the best parameter set for iteration step k.
+
+    The scale factor to use is 1/2 (|F(k)| + |F(k-n)|) with protection
+    against zero.
+
+    Parameters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *n* (int = 1)
+            number of steps back in history to compare
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0.001, n=1, scaled=True):
+        self.tol = tol
+        self.n = n
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        f1,f2 = history.value[0], history.value[self.n]
+        scale = (abs(f1)+abs(f2))/2
+        if scale == 0: scale = 1
+        #print "Df",f1,f2,abs(float(f2-f1)/scale),self.tol
+        return abs(float(f2-f1)/scale)
+    def _raw_condition(self, history):
+        f1,f2 = history.value[0], history.value[self.n]
+        return abs(f2-f1)
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        if self.tol > 0:
+            history.requires(value=self.n+1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.value) < self.n+1:
+            return False  # Cannot succeed until at least n generations
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "|F[k]-F[k-%d]| / (|F[k]|+|F[k-%d]|)/2 < %g" % (
+                self.n,self.n,self.tol)
+        else:
+            return "|F[k]-F[k-%d]| < %g" % (self.n,self.tol)
+
+class Worse(Condition):
+    """
+    Worsening of F(x)
+
+    This condition measures whether the fit is diverging.  You may want
+    to use this for non-greedy optimizers which can get worse over time::
+
+        (F[k] - F[k-1])/scale < -tol
+
+    where F[k] is the value for the best parameter set for iteration step k.
+
+    The scale factor to use is 1/2 (|F(k)| + |F(k-n)|) with protection
+    against zero.
+
+    Parameters::
+
+        *tol* (float = 0)
+            tolerance to test against
+        *n* (int = 1)
+            number of steps back in history to compare
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0, n=1, scaled=True):
+        self.tol = tol
+        self.n = n
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        f1,f2 = history.value[0], history.value[self.n]
+        scale = (abs(f1)+abs(f2))/2
+        if scale == 0: scale = 1
+        return float(f2-f1)/scale
+    def _raw_condition(self, history):
+        f1,f2 = history.value[0], history.value[self.n]
+        return f2-f1
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        history.requires(value=self.n+1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.value) < self.n+1:
+            return False  # Cannot succeed until at least n generations
+        elif self.scaled:
+            return self._scaled_condition(history) < -self.tol
+        else:
+            return self._raw_condition(history) < -self.tol
+    def __str__(self):
+        if self.scaled:
+            return "F[k]-F[k-%d] / (|F[k]|+|F[k-%d]|)/2 < -%g" % (
+                self.n,self.n,self.tol)
+        else:
+            return "F[k]-F[k-%d] < -%g" % (self.n,self.tol)
+
+
+class Grad:
+    """
+    Flat function value
+
+    This condition measures whether the fit surface is flat near the best
+    value.  This only works for fits which compute the gradient.
+
+        || del F[k]/scale || < tol
+
+    where F[k] is the value for the best parameter set for iteration step k.
+
+    The scale factor to use is |F(k)| with protection against zero.
+
+    Parameters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *norm* ( f(vector): float  =  norm_2)
+            norm to use to measure the size of x.  Predefined norms
+            include norm_1, norm_2, norm_info, norm_min and norm_p(p)
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0.001, norm=norm_2, scaled=True):
+        self.tol = tol
+        self.norm = norm
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        df = history.gradient[0]
+        f = history.value[0]
+        scale = abs(f)
+        if scale == 0: scale = 1
+        return self.norm(df/float(scale))
+    def _raw_condition(self, history):
+        df = history.gradient[0]
+        return self.norm(df)
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        if self.tol > 0:
+            history.requires(gradient=1, value=1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.gradient) < 1:
+            return False  # Cannot succeed until at least n generations
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "|| del F[k]/F[k] || < %g" % (self.tol)
+        else:
+            return "|| del F[k] || < %g" % (self.tol)
+
+
+class r_best:
+    """
+    Measure of population radius based on distance from the best.
+
+        max ||(y - x[k])/scale|| for y in population
+
+    scipy.optimize.fmin uses r_best(norm_inf) as its measure of radius.
+    """
+    def __init__(self, norm):
+        self.norm = norm
+    def __call__(self, population, best, scale):
+        P = np.asarray(population)
+        r = max(self.norm(p - best)/scale for p in P)
+        return r
+
+class r_centroid:
+    """
+    Measure of population radius based on distance from the centroid.
+
+        max ||(y - <y>)/scale|| for y in population
+    """
+    def __init__(self, norm):
+        self.norm = norm
+    def __call__(self, population, best, scale):
+        P = np.asarray(population)
+        c_i = np.mean(P,axis=0)
+        r = max(self.norm(p - c_i)/scale for p in P)
+        return r
+
+def r_boundingbox(population, best, scale):
+    """
+    Measure of population radius based on the volume of the bounding box.
+
+        (product (max(y_i) - min(y_i))/scale)**1/k  for i in dimensions-k
+    """
+    P = np.asarray(population)
+    lo = max(P,index=0)
+    hi = max(P,index=0)
+    r = np.prod((hi-lo)/scale)**(1/len(hi))
+    return r
+
+class r_hull:
+    """
+    Measure of population radius based on maximum diameter in convex hull.
+
+        1/2 max || (y1 - y2)/scale || for y1,y2 in population
+    """
+    def __init__(self, norm):
+        self.norm = norm
+    def __call__(self, population, best, scale):
+        r = 0
+        for i,y1 in enumerate(population):
+            for y2 in population[i+1:]:
+                d = self.norm(y2-y1)/scale
+                if d > r: r = d
+        return r/2
+
+class Rx(Condition):
+    """
+    Domain size
+
+    This condition measures the size of the population domain.  Some
+    algorithms are done when the domain size shrinks while others have
+    failed if the domain size shrinks.
+
+    There are a number of ways of measuring the domain size::
+
+        r_best(norm) : radius from best point
+
+            max ||(y - x[k])/scale|| for y in population
+
+        r_centroid(norm) : radius from centroid
+
+            max ||(y - <y>)/scale|| for y in population
+
+        r_boundingbox : radius from bounding box
+
+            (product (max(y_i) - min(y_i))/scale)**1/k  for i in dimensions-k
+
+        r_hull(norm) : radius from convex hull
+
+            1/2 max || (y1 - y2)/scale || for y1,y2 in population
+
+    scale is determined from the fit bounds (max-min) or the
+    values sum(|y_i|)/n, with protection against zero values.
+
+    Parameters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *radius* (function(history,best,scale): float = r_centroid(norm_2))
+            measure of domain size
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0, radius=r_centroid(norm_2), scaled=False):
+        self.tol = tol
+        self.radius = radius
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        P = np.asarray(history.population_points[0])
+        scale = history.upper_bound - history.lower_bound
+        idx = isinf(scale)
+        if any(idx):
+            range = np.sum(abs(P),axis=0)/P.shape[0]
+            scale[idx] = range[idx]
+        scale[scale == 0] = 1
+        r = self.radius(P, history.point[0], scale)
+        #print "Rx=%g, scale=%g"%(r,scale)
+        return r
+    def _raw_condition(self, history):
+        P = np.asarray(history.population_points[0])
+        r = self.radius(P, history.point[0], scale=1.)
+        #print "Rx=%g"%r
+        return r
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        if self.tol > 0:
+            history.requires(population_points=1,point=1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.population_points) < 1:
+            return False
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "radius(population/scale) < %g" % (self.tol)
+        else:
+            return "radius(population) < %g" % (self.tol)
+
+class Rf(Condition):
+    """
+    Range size
+
+    This condition measures the size of the population range::
+
+        (max f(y) - min f(y))/scale < tol
+
+    for y in the current population
+    scale is mean(|f(y)|)
+
+    Parameters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0, scaled=True):
+        self.tol = tol
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        Pf = np.asarray(history.population_values)
+        scale = np.mean(abs(Pf))
+        if scale == 0: scale = 1
+        r = float(np.max(Pf) - np.min(Pf))/scale
+        #print "Rf = %g, scale=%g"%(r,scale)
+        return r
+    def _raw_condition(self, history):
+        P = np.asarray(history.population_values)
+        r = np.max(P) - np.min(P)
+        #print "Rf = %g"%r
+        return r
+    def config_history(self, history):
+        """
+        Needs the previous n points from history.
+        """
+        if self.tol > 0:
+            history.requires(population_values=1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.population_values) < 1:
+            return False
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "(max(F(p)) - min(F(p))/mean(|F(p)|) < %g" % (self.tol)
+        else:
+            return "max(F(p)) - min(F(p)) < %g" % (self.tol)
+
+
+class Cx(Condition):
+    """
+    Target point
+
+    This condition measures the distance from the best point
+    to some target point::
+
+       ||(x_k - Z)/scale|| < tol
+
+    scale is fit range if given,  |Z_i|, or 1 if Z_i=0
+
+    Paramaters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *point* (array = 0)
+            target point
+        *norm* ( f(vector): float  =  norm_2)
+            norm to use to measure the size of x.  Predefined norms
+            include norm_1, norm_2, norm_info, norm_min and norm_p(p)
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0.001, point=0, norm=norm_2, scaled=True):
+        self.tol = tol
+        self.point = point
+        self.norm = norm_2
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        x = history.point[0]
+        scale = history.upper_bound - history.lower_bound
+        scale[isinf(scale)] = abs(self.point)[isinf(scale)]
+        scale[scale == 0] = 1
+        return self.norm((x - self.point)/scale)
+    def _raw_condition(self, history):
+        x = history.point[0]
+        return self.norm(x - self.point)
+    def config_history(self, history):
+        """
+        Needs the previous point from history.
+        """
+        if self.tol > 0:
+            history.requires(point=1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.point) < 1:
+            return False
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "||(x[k] - Z)/range|| < %g"%(self.tol)
+        else:
+            return "||x[k] - Z|| < %g"%(self.tol)
+
+
+class Cf(Condition):
+    """
+    Target value
+
+    This condition measures the distance from the best value
+    to some target value::
+
+       |(f_k - A)/scale| < tol
+
+    scale is |A| or 1 if A=0
+
+    Paramaters::
+
+        *tol* (float = 0.001)
+            tolerance to test against
+        *value* (float = 0)
+            target value
+        *scaled* (boolean = True)
+            whether to use raw or scaled differences in the norm
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, tol=0.001, value=0, scaled=True):
+        self.tol = tol
+        self.value = value
+        if value == 0: scaled = False
+        self.scaled = scaled
+    def _scaled_condition(self, history):
+        value = history.value[0]
+        return abs(float(value - self.value)/self.value)
+    def _raw_condition(self, history):
+        value = history.value[0]
+        return abs(value - self.value)
+    def config_history(self, history):
+        """
+        Needs the previous point from history.
+        """
+        if self.tol > 0:
+            history.requires(value=1)
+    def __call__(self, history):
+        """
+        Returns True if the tolerance is met.
+        """
+        if self.tol == 0 or len(history.value) < 1:
+            return False
+        elif self.scaled:
+            return self._scaled_condition(history) < self.tol
+        else:
+            return self._raw_condition(history) < self.tol
+    def __str__(self):
+        if self.scaled:
+            return "|(F[k] - A)/A| < %g"%(self.tol)
+        else:
+            return "|F[k] - A| < %g"%(self.tol)
+
+
+
+class Steps(Condition):
+    """
+    Specific number of iterations
+
+    This condition test the number of iterations of a fit::
+
+        k >= steps
+
+    Parameters::
+
+        *steps* int (1000)
+            total number of steps
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, steps=np.inf):
+        self.steps = steps
+    def __call__(self, history):
+        if len(history.step) < 1: return False
+        return history.step[0] >= self.steps
+    def config_history(self, history):
+        history.requires(step=1)
+    def __str__(self):
+        return "steps >= %d"%self.steps
+
+class Calls(Condition):
+    """
+    Specific number of function calls
+
+    This condition tests the number of function evaluations::
+
+        n_k >= calls
+
+    Parameters::
+
+        *calls* int (inf)
+            total number of function calls
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, calls=np.inf):
+        self.calls = calls
+    def __call__(self, history):
+        if len(history.calls) < 1: return False
+        return history.calls[0] >= self.calls
+    def config_history(self, history):
+        history.requires(calls=1)
+    def __str__(self):
+        return "calls >= %d"%self.calls
+
+class Time(Condition):
+    """
+    Wall clock time.
+
+    This condition tests wall clock time::
+
+        t_k >= time
+
+    Parameters::
+
+        *time* float (inf)
+            Time since start of job in seconds
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, time=inf):
+        self.time = time
+    def __call__(self, history):
+        if len(history.time) < 1: return False
+        return history.time[0] >= self.time
+    def config_history(self, history):
+        history.requires(time=1)
+    def __str__(self):
+        return "time >= %g"%self.time
+
+
+class CPU(Condition):
+    """
+    CPU time.
+
+    This condition tests CPU time::
+
+        t(CPU)_k >= time
+
+    Parameters::
+
+        *time* float (inf)
+            time since start of job in seconds
+
+    Returns::
+
+        *condition* (f(history) : boolean)
+            a callable returning true if the condition is met
+    """
+    def __init__(self, time=np.inf):
+        self.time = time
+    def __call__(self, history):
+        if len(history.cpu_time) < 1: return False
+        return history.cpu_time[0] >= self.time
+    def config_history(self, history):
+        history.requires(cpu_time=1)
+    def __str__(self):
+        return "cpu_time >= %g"%self.time
+
+"""
+class Feasible: value can be used ** Not implemented **
+
+    f_k satisfies soft constraints
+
+class Invalid: values are well defined
+
+    isinf(y) or isinf(f(y)) or isnan(y) or isnan(f(y))
+
+    for y in population
+"""
+
+
+def parse_condition(cond):
+    import math
+    from . import stop 
+    return eval(cond, stop.__dict__.copy().update(math.__dict__))
+
diff --git a/bumps/mystic/util.py b/bumps/mystic/util.py
new file mode 100644
index 0000000..30516a8
--- /dev/null
+++ b/bumps/mystic/util.py
@@ -0,0 +1,81 @@
+# This code is in the public domain.
+# choose_without_replacement is copyright (2007) Anne Archibald
+"""
+Utility functions
+
+choose_without_replacement(m,n,repeats=None)
+
+    sample from a set without replacement
+
+runlength(v)
+
+    return the values and lengths of runs in a vector
+
+countunique(A)
+
+    return the element and frequency of each unique value in an array
+"""
+
+import numpy as np
+
+# Author: Anne Archibald
+# Re: [Numpy-discussion] Generating random samples without repeats
+# Fri, 19 Sep 2008 12:19:22 -0700
+def choose_without_replacement(m,n,repeats=None):
+    """
+    Choose n nonnegative integers less than m without replacement
+
+    Returns an array of shape n, or (n,repeats).
+    """
+    if repeats is None:
+        r = 1
+    else:
+        r = repeats
+    if n>m:
+        raise ValueError("Cannot find %d nonnegative integers less than %d" %(n,m))
+    elif n>m/2:
+        res = np.sort(np.random.rand(m,r).argsort(axis=0)[:n,:],axis=0)
+    else:
+        res = np.random.random_integers(0,m-1,size=(n,r))
+        while True:
+            res = np.sort(res,axis=0)
+            w = np.nonzero(np.diff(res,axis=0)==0)
+            nr = len(w[0])
+            if nr==0:
+                break
+            res[w] = np.random.random_integers(0,m-1,size=nr)
+
+    if repeats is None:
+        return res[:,0]
+    else:
+        return res
+
+
+def runlength(v):
+    """
+    Return the run lengths for repeated values in a vector v.
+
+    See also countunique.
+    """
+    if len(v) == 0: return [],[]
+    diffs = np.diff(v)
+    steps = np.where(diffs!=0)[0]+1
+    ends = np.hstack([[0],steps,[len(v)]])
+    vals = v[ends[:-1]]
+    lens = np.diff(ends)
+    return vals, lens
+
+def countunique(A):
+    """
+    Returns the unique elements in an array and their frequency.
+    """
+    return runlength(np.sort(A.flatten()))
+
+def zscore(A, axis=None):
+    """
+    Convert an array of data to zscores.
+
+    Use *axis* to limit the calculation of mean and standard deviation to
+    a particular axis.
+    """
+    return (A - np.mean(A,axis=axis)) / np.std(A,axis=axis,ddof=1)
diff --git a/bumps/names.py b/bumps/names.py
new file mode 100644
index 0000000..d4fe4cb
--- /dev/null
+++ b/bumps/names.py
@@ -0,0 +1,46 @@
+"""
+Exported names.
+
+In model definition scripts, rather than importing symbols one by one,
+you can simply perform::
+
+    from bumps.names import *
+
+This is bad style for library and applications but convenient for
+model scripts.
+
+The following symbols are defined:
+
+- *np* for the `numpy <http://docs.scipy.org/doc/numpy/reference>`_ array package
+- *sys* for the python `sys <https://docs.python.org/2/library/sys.html>`_ module
+- *inf* for infinity
+- :mod:`pmath <bumps.pmath>` for parameter expressions like *2\*pmath.sin(M.theta)*
+- :class:`Parameter <bumps.parameter.Parameter>` for defining parameters
+- :class:`FreeVariables <bumps.parameter.FreeVariables>` for defining shared parameters
+- :class:`Distribution <bumps.bounds.Distribution>` for indicating prior
+  probability for a model parameter
+- :class:`Curve <bumps.curve.Curve>` for defining models from functions
+- :class:`PoissonCurve <bumps.curve.PoissonCurve>` for modelling data with Poisson uncertainty
+- :class:`PDF <bumps.pdfwrapper.PDF>` for fitting a probability distribution directly
+- :func:`FitProblem <bumps.fitproblem.FitProblem>` for defining the fit (see
+  :class:`BaseFitProblem <bumps.fitproblem.BaseFitProblem>` or
+  :class:`MultiFitProblem <bumps.fitproblem.MultiFitProblem>` for details,
+  depending on whether you are fitting a single model or multiple models
+  simultaneously).
+"""
+
+#__all__ = [ 'sys', 'np', 'inf', 'pmath',
+#    'Parameter', 'FreeVariables', 'Distribution', 'PDF', 'Curve', 'PoissonCurve',
+#        'FitProblem', 'MultiFitProblem' ]
+
+import sys
+import numpy as np
+from numpy import inf
+
+from . import pmath
+from .parameter import Parameter, FreeVariables
+from .bounds import Distribution
+from .pdfwrapper import PDF, VectorPDF, DirectProblem
+from .curve import Curve, PoissonCurve
+from .fitproblem import FitProblem, MultiFitProblem
+
diff --git a/bumps/numdifftools/LICENSE.txt b/bumps/numdifftools/LICENSE.txt
new file mode 100644
index 0000000..6d7b472
--- /dev/null
+++ b/bumps/numdifftools/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2014, Per A. Brodtkorb, John D'Errico
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of the {organization} nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/bumps/numdifftools/README.txt b/bumps/numdifftools/README.txt
new file mode 100644
index 0000000..a4ce041
--- /dev/null
+++ b/bumps/numdifftools/README.txt
@@ -0,0 +1,3 @@
+This is a stripped down version of the numdifftools package 
+by Per A. Brodtkorb and John D'Errico which includes only
+the numerical derivates pacakge.
diff --git a/bumps/numdifftools/__init__.py b/bumps/numdifftools/__init__.py
new file mode 100644
index 0000000..6628b40
--- /dev/null
+++ b/bumps/numdifftools/__init__.py
@@ -0,0 +1,5 @@
+# 2016-04-19 PAK minimal imports
+from .info import __doc__
+from .core import *
+__version__ = '0.9.14'
+
diff --git a/bumps/numdifftools/core.py b/bumps/numdifftools/core.py
new file mode 100644
index 0000000..482a048
--- /dev/null
+++ b/bumps/numdifftools/core.py
@@ -0,0 +1,1365 @@
+# !/usr/bin/env python
+"""numerical differentiation functions:
+
+Derivative, Gradient, Jacobian, and Hessian
+
+Author:      Per A. Brodtkorb
+
+Created:     01.08.2008
+Copyright:   (c) pab 2008
+Licence:     New BSD
+
+Based on matlab functions derivest.m gradest.m hessdiag.m, hessian.m
+and jacobianest.m version 1.0 released 12/27/2006 by  John D'Errico
+(e-mail: woodchips at rochester.rr.com)
+
+Also based on the python functions approx_fprime, approx_fprime_cs,
+approx_hess_cs, approx_hess1, approx_hess2 and approx_hess3 in the
+statsmodels.tools.numdiff module released in 2014 written by Josef Perktold.
+
+"""
+
+from __future__ import division, print_function
+import numpy as np
+from collections import namedtuple
+from numpy import linalg
+from scipy import misc
+import warnings
+
+# 2016-04-19 PAK use relative imports
+from .multicomplex import bicomplex
+from .extrapolation import Richardson, dea3, convolve
+
+__all__ = ('dea3', 'Derivative', 'Jacobian', 'Gradient', 'Hessian', 'Hessdiag',
+           'MinStepGenerator', 'MaxStepGenerator', 'Richardson')
+# NOTE: we only do double precision internally so far
+_TINY = np.finfo(float).tiny
+_EPS = np.finfo(float).eps
+EPS = np.MachAr().eps
+_SQRT_J = (1j + 1.0) / np.sqrt(2.0)  # = 1j**0.5
+
+_CENTRAL_WEIGHTS_AND_POINTS = {
+    (1, 3): (np.array([-1, 0, 1]) / 2.0, np.arange(-1, 2)),
+    (1, 5): (np.array([1, -8, 0, 8, -1]) / 12.0, np.arange(-2, 3)),
+    (1, 7): (np.array([-1, 9, -45, 0, 45, -9, 1]) / 60.0, np.arange(-3, 4)),
+    (1, 9): (np.array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0,
+             np.arange(-4, 5)),
+    (2, 3): (np.array([1, -2.0, 1]), np.arange(-1, 2)),
+    (2, 5): (np.array([-1, 16, -30, 16, -1]) / 12.0, np.arange(-2, 3)),
+    (2, 7): (np.array([2, -27, 270, -490, 270, -27, 2]) / 180.0,
+             np.arange(-3, 4)),
+    (2, 9): (np.array([-9, 128, -1008, 8064, -14350,
+                      8064, -1008, 128, -9]) / 5040.0,
+             np.arange(-4, 5))}
+
+
+def fornberg_weights_all(x, x0, M=1):
+    """
+    Return finite difference weights_and_points for derivatives of all orders.
+
+    Parameters
+    ----------
+    x : vector, length n
+        x-coordinates for grid points
+    x0 : scalar
+        location where approximations are to be accurate
+    m : scalar integer
+        highest derivative that we want to find weights_and_points for
+
+    Returns
+    -------
+    C :  array, shape n x m+1
+        contains coefficients for the j'th derivative in column j (0 <= j <= m)
+
+    See also:
+    ---------
+    fornberg_weights
+
+    Reference
+    ---------
+    B. Fornberg (1998)
+    "Calculation of weights_and_points in finite difference formulas",
+    SIAM Review 40, pp. 685-691.
+
+    http://www.scholarpedia.org/article/Finite_difference_method
+    """
+    N = len(x)
+    if M >= N:
+        raise ValueError('length(x) must be larger than m')
+
+    c1, c4 = 1, x[0] - x0
+    C = np.zeros((N, M + 1))
+    C[0, 0] = 1
+    for n in range(1, N):
+        m = np.arange(0, min(n, M) + 1)
+        c2, c5, c4 = 1, c4, x[n] - x0
+        for v in range(n):
+            c3 = x[n] - x[v]
+            c2, c6, c7 = c2 * c3, m * C[v, m-1], C[v, m]
+            C[v, m] = (c4 * c7 - c6) / c3
+        C[n, m] = c1 * (c6 - c5 * c7) / c2
+        c1 = c2
+    return C
+
+
+def fornberg_weights(x, x0, m=1):
+    """
+    Return weights for finite difference approximation of the m'th derivative
+    U^m(x0), evaluated at x0, based on n values of U at x[0], x[1],... x[n-1]:
+
+        U^m(x0) = sum weights[i] * U(x[i])
+
+    Parameters
+    ----------
+    x : vector
+        abscissas used for the evaluation for the derivative at x0.
+    x0 : scalar
+        location where approximations are to be accurate
+    m : integer
+        order of derivative. Note for m=0 this can be used to evaluate the
+        interpolating polynomial itself.
+
+    Notes
+    -----
+    The x values can be arbitrarily spaced but must be distinct and len(x) > m.
+
+    The Fornberg algorithm is much more stable numerically than regular
+    vandermonde systems for large values of n.
+
+    See also
+    --------
+    fornberg_weights_all
+    """
+    return fornberg_weights_all(x, x0, m)[:, -1]
+
+
+def _make_exact(h):
+    """Make sure h is an exact representable number
+
+    This is important when calculating numerical derivatives and is
+    accomplished by adding 1 and then subtracting 1..
+    """
+    return (h + 1.0) - 1.0
+
+
+def default_scale(method='forward', n=1, order=2):
+    # is_odd = (n % 2) == 1
+    high_order = int(n > 1 or order >= 4)
+    order2 = max(order // 2-1, 0)
+    n4 = n // 4
+    return (dict(multicomplex=1.35, complex=1.35).get(method, 2.5) +
+            int((n - 1)) * dict(multicomplex=0, complex=0.0).get(method, 1.3) +
+            order2 * dict(central=3, forward=2, backward=2).get(method, 0) +
+            # is_odd * dict(complex=2.65*int(n//2)).get(method, 0) +
+            (n % 4 == 1) * high_order * dict(complex=3.65 + n4 * (5 + 1.5**n4)
+                                             ).get(method, 0) +
+            (n % 4 == 3) * dict(complex=3.65*2 + n4 * (5 + 2.1**n4)
+                                ).get(method, 0) +
+            (n % 4 == 2) * dict(complex=3.65 + n4 * (5 + 1.7**n4)
+                                ).get(method, 0) +
+            (n % 4 == 0) * dict(complex=(n//4) * (10 + 1.5*int(n > 10))
+                                ).get(method, 0))
+
+
+def valarray(shape, value=np.NaN, typecode=None):
+    """Return an array of all value."""
+    if typecode is None:
+        typecode = bool
+    out = np.ones(shape, dtype=typecode) * value
+
+    if not isinstance(out, np.ndarray):
+        out = np.asarray(out)
+    return out
+
+
+def nom_step(x=None):
+    """Return nominal step."""
+    if x is None:
+        return 1.0
+    return np.maximum(np.log1p(np.abs(x)), 1.0)
+
+
+def _default_base_step(x, scale, epsilon=None):
+    if epsilon is None:
+        h = EPS ** (1. / scale) * nom_step(x)
+    else:
+        h = valarray(x.shape, value=epsilon)
+    return h
+
+
+class MinStepGenerator(object):
+
+    """
+    Generates a sequence of steps
+
+    where steps = base_step * step_ratio ** (np.arange(num_steps) + offset)
+
+    Parameters
+    ----------
+    base_step : float, array-like, optional
+        Defines the base step, if None, then base_step is set to
+        EPS**(1/scale)*max(log(1+|x|), 1) where x is supplied at runtime
+        through the __call__ method.
+    step_ratio : real scalar, optional, default 2
+        Ratio between sequential steps generated.
+        Note: Ratio > 1
+        If None then step_ratio is 2 for n=1 otherwise step_ratio is 1.6
+    num_steps : scalar integer, optional, default  n + order - 1 + num_extrap
+        defines number of steps generated. It should be larger than
+        n + order - 1
+    offset : real scalar, optional, default 0
+        offset to the base step
+    scale : real scalar, optional
+        scale used in base step. If not None it will override the default
+        computed with the default_scale function.
+    """
+
+    def __init__(self, base_step=None, step_ratio=2, num_steps=None,
+                 offset=0, scale=None, num_extrap=0, use_exact_steps=True,
+                 check_num_steps=True):
+        self.base_step = base_step
+        self.num_steps = num_steps
+        self.step_ratio = step_ratio
+        self.offset = offset
+        self.scale = scale
+        self.check_num_steps = check_num_steps
+        self.use_exact_steps = use_exact_steps
+        self.num_extrap = num_extrap
+
+    def __repr__(self):
+        class_name = self.__class__.__name__
+        kwds = ['%s=%s' % (name, str(getattr(self, name)))
+                for name in self.__dict__.keys()]
+        return """%s(%s)""" % (class_name, ','.join(kwds))
+
+    def _default_scale(self, method, n, order):
+        scale = self.scale
+        if scale is None:
+            scale = default_scale(method, n, order)
+        return scale
+
+    def _default_base_step(self, xi, method, n, order=2):
+        scale = self._default_scale(method, n, order)
+        base_step = _default_base_step(xi, scale, self.base_step)
+        if self.use_exact_steps:
+            base_step = _make_exact(base_step)
+        return base_step
+
+    def _min_num_steps(self, method, n, order):
+        num_steps = n + order - 1
+
+        if method in ['central', 'central2', 'complex', 'multicomplex']:
+            step = 2
+            if method == 'complex':
+                step = 4 if n > 2 or order >= 4 else 2
+            num_steps = (n + order-1) // step
+        return max(int(num_steps), 1)
+
+    def _default_num_steps(self, method, n, order):
+        min_num_steps = self._min_num_steps(method, n, order)
+        if self.num_steps is not None:
+            num_steps = int(self.num_steps)
+            if self.check_num_steps:
+                num_steps = max(num_steps, min_num_steps)
+            return num_steps
+        return min_num_steps + int(self.num_extrap)
+
+    def _default_step_ratio(self, n):
+        if self.step_ratio is None:
+            step_ratio = {1: 2.0}.get(n, 1.6)
+        else:
+            step_ratio = float(self.step_ratio)
+        if self.use_exact_steps:
+            step_ratio = _make_exact(step_ratio)
+        return step_ratio
+
+    def __call__(self, x, method='central', n=1, order=2):
+        xi = np.asarray(x)
+        base_step = self._default_base_step(xi, method, n, order)
+        step_ratio = self._default_step_ratio(n)
+
+        num_steps = self._default_num_steps(method, n, order)
+        offset = self.offset
+        for i in range(num_steps-1, -1, -1):
+            h = (base_step * step_ratio**(i + offset))
+            if (np.abs(h) > 0).all():
+                yield h
+
+
+class MinMaxStepGenerator(object):
+    """
+    Generates a sequence of steps
+
+    where
+        steps = logspace(log10(step_min), log10(step_max), num_steps)
+
+    Parameters
+    ----------
+    step_min : float, array-like, optional
+       Defines the minimim step. Default value is:
+           EPS**(1/scale)*max(log(1+|x|), 1)
+       where x and scale are supplied at runtime through the __call__ method.
+    step_max : real scalar, optional
+        maximum step generated. Default value is:
+            exp(log(step_min) * scale / (scale + 1.5))
+    num_steps : scalar integer, optional
+        defines number of steps generated.
+    scale : real scalar, optional
+        scale used in base step. If set to a value it will override the scale
+        supplied at runtime.
+    """
+
+    def __init__(self, step_min=None, step_max=None, num_steps=10, scale=None,
+                 num_extrap=0):
+        self.step_min = step_min
+        self.num_steps = num_steps
+        self.step_max = step_max
+        self.scale = scale
+        self.num_extrap = num_extrap
+
+    def __repr__(self):
+        class_name = self.__class__.__name__
+        kwds = ['%s=%s' % (name, str(getattr(self, name)))
+                for name in self.__dict__.keys()]
+        return """%s(%s)""" % (class_name, ','.join(kwds))
+
+    def __call__(self, x, method='forward', n=1, order=None):
+        if self.scale is not None:
+            scale = self.scale
+        xi = np.asarray(x)
+        step_min, step_max = self.step_min, self.step_max
+        delta = _default_base_step(xi, scale, step_min)
+        if step_min is None:
+            step_min = (10 * EPS)**(1. / scale)
+        if step_max is None:
+            step_max = np.exp(np.log(step_min) * scale / (scale + 1.5))
+        steps = np.logspace(0, np.log10(step_max) - np.log10(step_min),
+                            self.num_steps)[::-1]
+
+        for step in steps:
+            h = _make_exact(delta * step)
+            if (np.abs(h) > 0).all():
+                yield h
+
+
+class MaxStepGenerator(MinStepGenerator):
+    """
+    Generates a sequence of steps
+
+    where
+        steps = base_step * step_ratio ** (-np.arange(num_steps) + offset)
+        base_step = step_max * step_nom
+
+    Parameters
+    ----------
+    max_step : float, array-like, optional default 2
+       Defines the maximum step
+    step_ratio : real scalar, optional, default 2
+        Ratio between sequential steps generated.
+        Note: Ratio > 1
+    num_steps : scalar integer, optional, default  n + order - 1 + num_extrap
+        defines number of steps generated. It should be larger than
+        n + order - 1
+    step_nom :  default maximum(log1p(abs(x)), 1)
+        Nominal step.
+    offset : real scalar, optional, default 0
+        offset to the base step: max_step * nom_step
+    """
+
+    def __init__(self, step_max=2.0, step_ratio=2.0, num_steps=15,
+                 step_nom=None, offset=0, num_extrap=0,
+                 use_exact_steps=False, check_num_steps=True):
+        self.base_step = None
+        self.step_max = step_max
+        self.step_ratio = step_ratio
+        self.num_steps = num_steps
+        self.step_nom = step_nom
+        self.offset = offset
+        self.num_extrap = num_extrap
+        self.check_num_steps = check_num_steps
+        self.use_exact_steps = use_exact_steps
+
+    def _default_step_nom(self, x):
+        if self.step_nom is None:
+            return nom_step(x)
+        return valarray(x.shape, value=self.step_nom)
+
+    def _default_base_step(self, xi, method, n):
+        base_step = self.base_step
+        if base_step is None:
+            base_step = self.step_max * self._default_step_nom(xi)
+        if self.use_exact_steps:
+            base_step = _make_exact(base_step)
+        return base_step
+
+    def __call__(self, x, method='forward', n=1, order=None):
+        xi = np.asarray(x)
+
+        offset = self.offset
+
+        base_step = self._default_base_step(xi, method, n)
+        step_ratio = self._default_step_ratio(n)
+
+        num_steps = self._default_num_steps(method, n, order)
+        for i in range(num_steps):
+            h = base_step * step_ratio**(-i + offset)
+            if (np.abs(h) > 0).all():
+                yield h
+
+
+
+
+_cmn_doc = """
+    Calculate %(derivative)s with finite difference approximation
+
+    Parameters
+    ----------
+    f : function
+       function of one array f(x, `*args`, `**kwds`)
+    step : float, array-like or StepGenerator object, optional
+       Defines the spacing used in the approximation.
+       Default is  MinStepGenerator(base_step=step, step_ratio=None)
+       if step or method in in ['complex', 'multicomplex'], otherwise
+       MaxStepGenerator(step_ratio=None, num_extrap=14)
+       The results are extrapolated if the StepGenerator generate more than 3
+       steps.
+    method : {'central', 'complex', 'multicomplex', 'forward', 'backward'}
+        defines the method used in the approximation%(extra_parameter)s
+    full_output : bool, optional
+        If `full_output` is False, only the derivative is returned.
+        If `full_output` is True, then (der, r) is returned `der` is the
+        derivative, and `r` is a Results object.
+
+    Call Parameters
+    ---------------
+    x : array_like
+       value at which function derivative is evaluated
+    args : tuple
+        Arguments for function `f`.
+    kwds : dict
+        Keyword arguments for function `f`.
+    %(returns)s
+    Notes
+    -----
+    Complex methods are usually the most accurate provided the function to
+    differentiate is analytic. The complex-step methods also requires fewer
+    steps than the other methods and can work very close to the support of
+    a function.
+    The complex-step derivative has truncation error O(steps**2) for `n=1` and
+    O(steps**4) for `n` larger, so truncation error can be eliminated by
+    choosing steps to be very small.
+    Especially the first order complex-step derivative avoids the problem of
+    round-off error with small steps because there is no subtraction. However,
+    this method fails if f(x) does not support complex numbers or involves
+    non-analytic functions such as e.g.: abs, max, min.
+    Central difference methods are almost as accurate and has no restriction on
+    type of function. For this reason the 'central' method is the default
+    method, but sometimes one can only allow evaluation in forward or backward
+    direction.
+
+    For all methods one should be careful in decreasing the step size too much
+    due to round-off errors.
+    %(extra_note)s
+    Reference
+    ---------
+    Ridout, M.S. (2009) Statistical applications of the complex-step method
+        of numerical differentiation. The American Statistician, 63, 66-74
+
+    K.-L. Lai, J.L. Crassidis, Y. Cheng, J. Kim (2005), New complex step
+        derivative approximations with application to second-order
+        kalman filtering, AIAA Guidance, Navigation and Control Conference,
+        San Francisco, California, August 2005, AIAA-2005-5944.
+
+    Lyness, J. M., Moler, C. B. (1966). Vandermonde Systems and Numerical
+                     Differentiation. *Numerische Mathematik*.
+
+    Lyness, J. M., Moler, C. B. (1969). Generalized Romberg Methods for
+                     Integrals of Derivatives. *Numerische Mathematik*.
+    %(example)s
+    %(see_also)s
+    """
+
+
+class _Derivative(object):
+
+    info = namedtuple('info', ['error_estimate', 'final_step', 'index'])
+
+    def __init__(self, f, step=None, method='central',  order=2, n=1,
+                 full_output=False):
+        self.f = f
+        self.n = n
+        self.order = order
+        self.method = method
+        self.full_output = full_output
+        self.richardson_terms = 2
+        self.step = self._make_generator(step)
+
+    def _make_generator(self, step):
+        if hasattr(step, '__call__'):
+            return step
+        if step is None and self.method not in ['complex', 'multicomplex']:
+            return MaxStepGenerator(step_ratio=None, num_extrap=14)
+        return MinStepGenerator(base_step=step, step_ratio=None, num_extrap=0)
+
+    def _get_arg_min(self, errors):
+        shape = errors.shape
+        try:
+            arg_mins = np.nanargmin(errors, axis=0)
+            min_errors = np.nanmin(errors, axis=0)
+        except ValueError as msg:
+            warnings.warn(str(msg))
+            ix = np.arange(shape[1])
+            return ix
+
+        for i, min_error in enumerate(min_errors):
+            idx = np.flatnonzero(errors[:, i] == min_error)
+            arg_mins[i] = idx[idx.size // 2]
+        ix = np.ravel_multi_index((arg_mins, np.arange(shape[1])), shape)
+        return ix
+
+    def _add_error_to_outliers(self, der, trim_fact=10):
+        try:
+            median = np.nanmedian(der, axis=0)
+            p75 = np.nanpercentile(der, 75, axis=0)
+            p25 = np.nanpercentile(der, 25, axis=0)
+            iqr = np.abs(p75-p25)
+        except ValueError as msg:
+            warnings.warn(str(msg))
+            return 0 * der
+
+        a_median = np.abs(median)
+        outliers = (((abs(der) < (a_median / trim_fact)) +
+                    (abs(der) > (a_median * trim_fact))) * (a_median > 1e-8) +
+                    ((der < p25-1.5*iqr) + (p75+1.5*iqr < der)))
+        errors = outliers * np.abs(der - median)
+        return errors
+
+    def _get_best_estimate(self, der, errors, steps, shape):
+        errors += self._add_error_to_outliers(der)
+        ix = self._get_arg_min(errors)
+        final_step = steps.flat[ix].reshape(shape)
+        err = errors.flat[ix].reshape(shape)
+        return der.flat[ix].reshape(shape), self.info(err, final_step, ix)
+
+    @property
+    def _method_order(self):
+        step = self._richardson_step()
+        # Make sure it is even and at least 2 or 4
+        order = max((self.order // step) * step, step)
+        return order
+
+    def _complex_high_order(self):
+        return self.method == 'complex' and (self.n > 1 or self.order >= 4)
+
+    def _richardson_step(self):
+        # complex_step = 4 if self.n % 2 == 0 else 2
+        complex_step = 4 if self._complex_high_order() else 2
+
+        return dict(central=2, central2=2, complex=complex_step,
+                    multicomplex=2).get(self.method, 1)
+
+    def _set_richardson_rule(self, step_ratio, num_terms=2):
+        order = self._method_order
+        step = self._richardson_step()
+        self._richardson_extrapolate = Richardson(step_ratio=step_ratio,
+                                                  step=step, order=order,
+                                                  num_terms=num_terms)
+
+    def _wynn_extrapolate(self, der, steps):
+        der, errors = dea3(der[0:-2], der[1:-1], der[2:], symmetric=False)
+        return der, errors, steps[2:]
+
+    def _extrapolate(self, results, steps, shape):
+        der, errors, steps = self._richardson_extrapolate(results, steps)
+        if len(der) > 2:
+            # der, errors, steps = self._richardson_extrapolate(results, steps)
+            der, errors, steps = self._wynn_extrapolate(der, steps)
+        der, info = self._get_best_estimate(der, errors, steps, shape)
+        return der, info
+
+    def _get_function_name(self):
+        name = '_%s' % self.method
+        even_derivative_order = self._is_even_derivative()
+        if even_derivative_order and self.method in ('central', 'complex'):
+            name = name + '_even'
+            if self.method in ('complex') and self._is_fourth_derivative():
+                name = name + '_higher'
+        else:
+            if self._complex_high_order() and self._is_odd_derivative():
+                name = name + '_odd'
+                if self._is_third_derivative():
+                    name = name + '_higher'
+            elif self.method == 'multicomplex' and self.n > 1:
+                if self.n == 2:
+                    name = name + '2'
+                else:
+                    raise ValueError('Multicomplex method only support first '
+                                     'and second order derivatives.')
+        return name
+
+    def _get_functions(self):
+        name = self._get_function_name()
+        return getattr(self, name), self.f
+
+    def _get_steps(self, xi):
+        method, n, order = self.method, self.n, self._method_order
+        return [step for step in self.step(xi, method, n, order)]
+
+    def _is_odd_derivative(self):
+        return self.n % 2 == 1
+
+    def _is_even_derivative(self):
+        return self.n % 2 == 0
+
+    def _is_third_derivative(self):
+        return self.n % 4 == 3
+
+    def _is_fourth_derivative(self):
+        return self.n % 4 == 0
+
+    def _eval_first_condition(self):
+        even_derivative = self._is_even_derivative()
+        return ((even_derivative and self.method in ('central', 'central2')) or
+                self.method in ['forward', 'backward'] or
+                self.method == 'complex' and self._is_fourth_derivative())
+
+    def _eval_first(self, f, x, *args, **kwds):
+        if self._eval_first_condition():
+            return f(x, *args, **kwds)
+        return 0.0
+
+    def _vstack(self, sequence, steps):
+        # sequence = np.atleast_2d(sequence)
+        original_shape = np.shape(sequence[0])
+        f_del = np.vstack(list(np.ravel(r)) for r in sequence)
+        h = np.vstack(list(np.ravel(np.ones(original_shape)*step))
+                      for step in steps)
+        if f_del.size != h.size:
+            raise ValueError('fun did not return data of correct size ' +
+                             '(it must be vectorized)')
+        return f_del, h, original_shape
+
+    def _compute_step_ratio(self, steps):
+        if len(steps) < 2:
+            return 1
+        return np.unique(steps[0]/steps[1]).mean()
+
+    def __call__(self, x, *args, **kwds):
+        xi = np.asarray(x)
+        results = self._derivative(xi, args, kwds)
+        derivative, info = self._extrapolate(*results)
+        if self.full_output:
+            return derivative, info
+        return derivative
+
+
+class Derivative(_Derivative):
+    __doc__ = _cmn_doc % dict(
+        derivative='n-th derivative',
+        extra_parameter="""
+    order : int, optional
+        defines the order of the error term in the Taylor approximation used.
+        For 'central' and 'complex' methods, it must be an even number.
+    n : int, optional
+        Order of the derivative.""",
+        extra_note="""
+    Higher order approximation methods will generally be more accurate, but may
+    also suffer more from numerical problems. First order methods is usually
+    not recommended.
+    """, returns="""
+    Returns
+    -------
+    der : ndarray
+       array of derivatives
+    """, example="""
+    Example
+    -------
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+
+    # 1'st derivative of exp(x), at x == 1
+
+    >>> fd = nd.Derivative(np.exp)
+    >>> np.allclose(fd(1), 2.71828183)
+    True
+
+    >>> d2 = fd([1, 2])
+    >>> np.allclose(d2, [ 2.71828183,  7.3890561 ])
+    True
+
+    >>> def f(x):
+    ...     return x**3 + x**2
+
+    >>> df = nd.Derivative(f)
+    >>> np.allclose(df(1), 5)
+    True
+    >>> ddf = nd.Derivative(f, n=2)
+    >>> np.allclose(ddf(1), 8)
+    True
+    """, see_also="""
+    See also
+    --------
+    Gradient,
+    Hessian
+    """)
+    """
+    Find the n-th derivative of a function at a point.
+
+    Given a function, use a difference formula with spacing `dx` to
+    compute the `n`-th derivative at `x0`.
+
+    Parameters
+    ----------
+    f : function
+        Input function.
+    x0 : float
+        The point at which `n`-th derivative is found.
+    dx : float, optional
+        Spacing.
+    method : Method of estimation.  Valid options are:
+        'central', 'forward' or 'backward'.          (Default 'central')
+    n : int, optional (Default 1)
+        Order of the derivative.
+    order : int, optional       (Default 2)
+        defining order of basic method used.
+        For 'central' methods, it must be an even number eg. [2,4].
+
+    Notes
+    -----
+    Decreasing the step size too small can result in round-off error.
+
+    Note on order: higher order methods will generally be more accurate,
+             but may also suffer more from numerical problems. First order
+             methods would usually not be recommended.
+    Complex methods are usually the most accurate provided the function to
+        differentiate is analytic. The complex-step methods also requires fewer
+        steps than the other methods and can work very close to the support of
+        a function. Central difference methods are almost as accurate and has
+        no restriction on type of function, but sometimes one can only allow
+        evaluation in forward or backward direction.
+
+
+    """
+    @property
+    def n(self):
+        return self._n
+
+    @n.setter
+    def n(self, n):
+        self._n = n
+
+        if n == 0:
+            self._derivative = self._derivative_zero_order
+        else:
+            self._derivative = self._derivative_nonzero_order
+
+    @staticmethod
+    def _fd_matrix(step_ratio, parity, nterms):
+        """
+        Return matrix for finite difference and complex step derivation.
+
+        Parameters
+        ----------
+        step_ratio : real scalar
+            ratio between steps in unequally spaced difference rule.
+        parity : scalar, integer
+            0 (one sided, all terms included but zeroth order)
+            1 (only odd terms included)
+            2 (only even terms included)
+            3 (only every 4'th order terms included starting from order 2)
+            4 (only every 4'th order terms included starting from order 4)
+        nterms : scalar, integer
+            number of terms
+        """
+        try:
+            step = [1, 2, 2, 4, 4, 4, 4][parity]
+        except Exception as e:
+            msg = '%s. Parity must be 0, 1, 2, 3, 4, 5 or 6! (%d)' % (str(e),
+                                                                      parity)
+            raise ValueError(msg)
+        inv_sr = 1.0 / step_ratio
+        offset = [1, 1, 2, 2, 4, 1, 3][parity]
+        c0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]
+        c = c0/misc.factorial(np.arange(offset, step * nterms + offset, step))
+        [i, j] = np.ogrid[0:nterms, 0:nterms]
+        return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))
+
+    def _flip_fd_rule(self):
+        n = self.n
+        return ((self._is_even_derivative() and (self.method == 'backward')) or
+                (self.method == 'complex' and (n % 8 in [3, 4, 5, 6])))
+
+    def _get_finite_difference_rule(self, step_ratio):
+        """
+        Generate finite differencing rule in advance.
+
+        The rule is for a nominal unit step size, and will
+        be scaled later to reflect the local step size.
+
+        Member methods used
+        -------------------
+        _fd_matrix
+
+        Member variables used
+        ---------------------
+        n
+        order
+        method
+        """
+        method = self.method
+        if method in ('multicomplex', ) or self.n == 0:
+            return np.ones((1,))
+
+        order, method_order = self.n - 1, self._method_order
+        parity = 0
+        if (method.startswith('central') or
+                (method.startswith('complex') and self.n == 1 and
+                 method_order < 4)):
+            parity = (order % 2) + 1
+        elif self.method == 'complex':
+            if self._is_odd_derivative():
+                parity = 6 if self._is_third_derivative() else 5
+            else:
+                parity = 4 if self._is_fourth_derivative() else 3
+
+        step = self._richardson_step()
+        num_terms, ix = (order + method_order) // step, order // step
+        fd_mat = self._fd_matrix(step_ratio, parity, num_terms)
+        fd_rule = linalg.pinv(fd_mat)[ix]
+
+        if self._flip_fd_rule():
+            fd_rule *= -1
+        return fd_rule
+
+    def _apply_fd_rule(self, fd_rule, sequence, steps):
+        """
+        Return derivative estimates of f at x0 for a sequence of stepsizes h
+
+        Member variables used
+        ---------------------
+        n
+        """
+        f_del, h, original_shape = self._vstack(sequence, steps)
+
+        ne = h.shape[0]
+        if ne < fd_rule.size:
+            raise ValueError('num_steps (%d) must  be larger than '
+                             '(%d) n + order - 1 = %d + %d -1'
+                             ' (%s)' % (ne, fd_rule.size, self.n, self.order,
+                                        self.method)
+                             )
+        nr = (fd_rule.size-1)
+        f_diff = convolve(f_del, fd_rule[::-1], axis=0, origin=nr//2)
+
+        der_init = f_diff / (h ** self.n)
+        ne = max(ne - nr, 1)
+        return der_init[:ne], h[:ne], original_shape
+
+    def _derivative_zero_order(self, xi, args, kwds):
+        steps = [np.zeros_like(xi)]
+        results = [self.f(xi, *args, **kwds)]
+        self._set_richardson_rule(2, 0)
+        return self._vstack(results, steps)
+
+    def _derivative_nonzero_order(self, xi, args, kwds):
+        diff, f = self._get_functions()
+        steps = self._get_steps(xi)
+        fxi = self._eval_first(f, xi, *args, **kwds)
+        results = [diff(f, fxi, xi, h, *args, **kwds) for h in steps]
+        step_ratio = self._compute_step_ratio(steps)
+
+        self._set_richardson_rule(step_ratio, self.richardson_terms)
+        fd_rule = self._get_finite_difference_rule(step_ratio)
+        return self._apply_fd_rule(fd_rule, results, steps)
+
+    @staticmethod
+    def _central_even(f, f_x0i, x0i, h, *args, **kwds):
+        return (f(x0i + h, *args, **kwds) +
+                f(x0i - h, *args, **kwds)) / 2.0 - f_x0i
+
+    @staticmethod
+    def _central(f, f_x0i, x0i, h, *args, **kwds):
+        return (f(x0i + h, *args, **kwds) -
+                f(x0i - h, *args, **kwds)) / 2.0
+
+    @staticmethod
+    def _forward(f, f_x0i, x0i, h, *args, **kwds):
+        return (f(x0i + h, *args, **kwds) - f_x0i)
+
+    @staticmethod
+    def _backward(f, f_x0i, x0i, h, *args, **kwds):
+        return (f_x0i - f(x0i - h, *args, **kwds))
+
+    @staticmethod
+    def _complex(f, fx, x, h, *args, **kwds):
+        return f(x + 1j * h, *args, **kwds).imag
+
+    @staticmethod
+    def _complex_odd(f, fx, x, h, *args, **kwds):
+        ih = h * _SQRT_J
+        return ((_SQRT_J/2.) * (f(x + ih, *args, **kwds) -
+                                f(x - ih, *args, **kwds))).imag
+
+    @staticmethod
+    def _complex_odd_higher(f, fx, x, h, *args, **kwds):
+        ih = h * _SQRT_J
+        return ((3 * _SQRT_J) * (f(x + ih, *args, **kwds) -
+                                 f(x - ih, *args, **kwds))).real
+
+    @staticmethod
+    def _complex_even(f, fx, x, h, *args, **kwds):
+        ih = h * _SQRT_J
+        return (f(x + ih, *args, **kwds) +
+                f(x - ih, *args, **kwds)).imag
+
+    @staticmethod
+    def _complex_even_higher(f, fx, x, h, *args, **kwds):
+        ih = h * _SQRT_J
+        return 12.0 * (f(x + ih, *args, **kwds) +
+                       f(x - ih, *args, **kwds) - 2 * fx).real
+
+    @staticmethod
+    def _multicomplex(f, fx, x, h, *args, **kwds):
+        z = bicomplex(x + 1j * h, 0)
+        return f(z, *args, **kwds).imag
+
+    @staticmethod
+    def _multicomplex2(f, fx, x, h, *args, **kwds):
+        z = bicomplex(x + 1j * h, h)
+        return f(z, *args, **kwds).imag12
+
+
+class Gradient(Derivative):
+    def __init__(self, f, step=None, method='central', order=2,
+                 full_output=False):
+        super(Gradient, self).__init__(f, step=step, method=method, n=1,
+                                       order=order, full_output=full_output)
+    __doc__ = _cmn_doc % dict(
+        derivative='Gradient',
+        extra_parameter="""
+    order : int, optional
+        defines the order of the error term in the Taylor approximation used.
+        For 'central' and 'complex' methods, it must be an even number.""",
+        returns="""
+    Returns
+    -------
+    grad : array
+        gradient
+    """, extra_note="""
+    Higher order approximation methods will generally be more accurate, but may
+    also suffer more from numerical problems. First order methods is usually
+    not recommended.
+    """, example="""
+    Example
+    -------
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+    >>> fun = lambda x: np.sum(x**2)
+    >>> dfun = nd.Gradient(fun)
+    >>> dfun([1,2,3])
+    array([ 2.,  4.,  6.])
+
+    # At [x,y] = [1,1], compute the numerical gradient
+    # of the function sin(x-y) + y*exp(x)
+
+    >>> sin = np.sin; exp = np.exp
+    >>> z = lambda xy: sin(xy[0]-xy[1]) + xy[1]*exp(xy[0])
+    >>> dz = nd.Gradient(z)
+    >>> grad2 = dz([1, 1])
+    >>> grad2
+    array([ 3.71828183,  1.71828183])
+
+    # At the global minimizer (1,1) of the Rosenbrock function,
+    # compute the gradient. It should be essentially zero.
+
+    >>> rosen = lambda x : (1-x[0])**2 + 105.*(x[1]-x[0]**2)**2
+    >>> rd = nd.Gradient(rosen)
+    >>> grad3 = rd([1,1])
+    >>> np.allclose(grad3,[0, 0])
+    True""", see_also="""
+    See also
+    --------
+    Derivative, Hessian, Jacobian
+    """)
+
+    @staticmethod
+    def _central(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(f(x + hi, *args, **kwds) - f(x - hi, *args, **kwds)) / 2.0
+                    for hi in increments]
+        return np.array(partials).T
+
+    @staticmethod
+    def _backward(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(fx - f(x - hi, *args, **kwds)) for hi in increments]
+        return np.array(partials).T
+
+    @staticmethod
+    def _forward(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(f(x + hi, *args, **kwds) - fx) for hi in increments]
+        return np.array(partials).T
+
+    @staticmethod
+    def _complex(f, fx, x, h, *args, **kwds):
+        # From Guilherme P. de Freitas, numpy mailing list
+        # http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
+        n = len(x)
+        increments = np.identity(n) * 1j * h
+        partials = [f(x + ih, *args, **kwds).imag for ih in increments]
+        return np.array(partials).T
+
+    @staticmethod
+    def _complex_odd(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * _SQRT_J * h
+        partials = [((_SQRT_J/2.) * (f(x + ih, *args, **kwds) -
+                                     f(x - ih, *args, **kwds))).imag
+                    for ih in increments]
+        return np.array(partials).T
+
+    @staticmethod
+    def _multicomplex(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * 1j * h
+        partials = [f(bicomplex(x + hi, 0), *args, **kwds).imag
+                    for hi in increments]
+        return np.array(partials).T
+
+    def __call__(self, x, *args, **kwds):
+        return super(Gradient, self).__call__(np.atleast_1d(x), *args, **kwds)
+
+
+class Jacobian(Gradient):
+    __doc__ = _cmn_doc % dict(
+        derivative='Jacobian',
+        extra_parameter="""
+    order : int, optional
+        defines the order of the error term in the Taylor approximation used.
+        For 'central' and 'complex' methods, it must be an even number.""",
+        returns="""
+    Returns
+    -------
+    jacob : array
+        Jacobian
+    """, extra_note="""
+    Higher order approximation methods will generally be more accurate, but may
+    also suffer more from numerical problems. First order methods is usually
+    not recommended.
+
+    If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
+    by f (e.g., with a value for each observation), it returns a 3d array
+    with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
+    the Jacobian of the first observation would be [:, 0, :]
+    """, example="""
+    Example
+    -------
+    >>> from bumps import numdifftools as nd
+
+    #(nonlinear least squares)
+
+    >>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
+    >>> ydata = 1+2*np.exp(0.75*xdata)
+    >>> fun = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
+
+    >>> Jfun = nd.Jacobian(fun)
+    >>> val = Jfun([1,2,0.75])
+    >>> np.allclose(val, np.zeros((10,3)))
+    True
+
+    >>> fun2 = lambda x : x[0]*x[1]*x[2] + np.exp(x[0])*x[1]
+    >>> Jfun3 = nd.Jacobian(fun2)
+    >>> Jfun3([3.,5.,7.])
+    array([ 135.42768462,   41.08553692,   15.        ])
+    """, see_also="""
+    See also
+    --------
+    Derivative, Hessian, Gradient
+    """)
+
+
+class Hessdiag(Derivative):
+    def __init__(self, f, step=None, method='central', order=2,
+                 full_output=False):
+        super(Hessdiag, self).__init__(f, step=step, method=method, n=2,
+                                       order=order, full_output=full_output)
+    __doc__ = _cmn_doc % dict(
+        derivative='Hessian diagonal',
+        extra_parameter="""order : int, optional
+        defines the order of the error term in the Taylor approximation used.
+        For 'central' and 'complex' methods, it must be an even number.""",
+        returns="""
+    Returns
+    -------
+    hessdiag : array
+        hessian diagonal
+    """, extra_note="""
+    Higher order approximation methods will generally be more accurate, but may
+    also suffer more from numerical problems. First order methods is usually
+    not recommended.
+    """, example="""
+    Example
+    -------
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+    >>> fun = lambda x : x[0] + x[1]**2 + x[2]**3
+    >>> Hfun = nd.Hessdiag(fun, full_output=True)
+    >>> hd, info = Hfun([1,2,3])
+    >>> np.allclose(hd, [  0.,   2.,  18.])
+    True
+
+    >>> info.error_estimate < 1e-11
+    array([ True,  True,  True], dtype=bool)
+    """, see_also="""
+    See also
+    --------
+    Derivative, Hessian, Jacobian, Gradient
+    """)
+
+    @staticmethod
+    def _central2(f, fx, x, h, *args, **kwds):
+        """Eq. 8"""
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(f(x + 2*hi, *args, **kwds) +
+                    f(x - 2*hi, *args, **kwds) + 2*fx -
+                    2*f(x + hi, *args, **kwds) -
+                    2*f(x - hi, *args, **kwds)) / 4.0
+                    for hi in increments]
+        return np.array(partials)
+
+    @staticmethod
+    def _central_even(f, fx, x, h, *args, **kwds):
+        """Eq. 9"""
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(f(x + hi, *args, **kwds) +
+                     f(x - hi, *args, **kwds)) / 2.0 - fx
+                    for hi in increments]
+        return np.array(partials)
+
+    @staticmethod
+    def _backward(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(fx - f(x - hi, *args, **kwds)) for hi in increments]
+        return np.array(partials)
+
+    @staticmethod
+    def _forward(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [(f(x + hi, *args, **kwds) - fx) for hi in increments]
+        return np.array(partials)
+
+    @staticmethod
+    def _multicomplex2(f, fx, x, h, *args, **kwds):
+        n = len(x)
+        increments = np.identity(n) * h
+        partials = [f(bicomplex(x + 1j * hi, hi), *args, **kwds).imag12
+                    for hi in increments]
+        return np.array(partials)
+
+    @staticmethod
+    def _complex_even(f, fx, x, h, *args, **kwargs):
+        n = len(x)
+        increments = np.identity(n) * h * (1j+1) / np.sqrt(2)
+        partials = [(f(x + hi, *args, **kwargs) +
+                     f(x - hi, *args, **kwargs)).imag
+                    for hi in increments]
+        return np.array(partials)
+
+    def __call__(self, x, *args, **kwds):
+        return super(Hessdiag, self).__call__(np.atleast_1d(x), *args, **kwds)
+
+
+class Hessian(_Derivative):
+    def __init__(self, f, step=None, method='central', full_output=False):
+        order = dict(backward=1, forward=1, complex=2).get(method, 2)
+        super(Hessian, self).__init__(f, n=2, step=step, method=method,
+                                      order=order, full_output=full_output)
+
+    __doc__ = _cmn_doc % dict(
+        derivative='Hessian',
+        extra_parameter="",
+        returns="""
+    Returns
+    -------
+    hess : ndarray
+       array of partial second derivatives, Hessian
+    """, extra_note="""
+    Computes the Hessian according to method as:
+    'forward' :eq:`7`, 'central' :eq:`9` and 'complex' :eq:`10`:
+
+    .. math::
+        \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j))) / (d_j d_k)
+        :label: 7
+
+    .. math::
+        \quad  ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -  (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k)
+        :label: 9
+
+    .. math::
+        imag(f(x + i d_j e_j + d_k e_k) - f(x + i d_j e_j - d_k e_k)) /(2 d_j d_k)
+        :label: 10
+
+    where :math:`e_j` is a vector with element :math:`j` is one and the rest
+    are zero and :math:`d_j` is a scalar spacing :math:`steps_j`.
+    """, example="""
+    Example
+    -------
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+
+    # Rosenbrock function, minimized at [1,1]
+
+    >>> rosen = lambda x : (1.-x[0])**2 + 105*(x[1]-x[0]**2)**2
+    >>> Hfun = nd.Hessian(rosen)
+    >>> h = Hfun([1, 1])
+    >>> h
+    array([[ 842., -420.],
+           [-420.,  210.]])
+
+    # cos(x-y), at (0,0)
+
+    >>> cos = np.cos
+    >>> fun = lambda xy : cos(xy[0]-xy[1])
+    >>> Hfun2 = nd.Hessian(fun)
+    >>> h2 = Hfun2([0, 0])
+    >>> h2
+    array([[-1.,  1.],
+           [ 1., -1.]])""", see_also="""
+    See also
+    --------
+    Derivative, Hessian
+    """)
+
+    def _complex_high_order(self):
+        return False
+
+    def _derivative(self, xi, args, kwds):
+        xi = np.atleast_1d(xi)
+        diff, f = self._get_functions()
+        steps = self._get_steps(xi)
+
+        fxi = self._eval_first(f, xi, *args, **kwds)
+        results = [diff(f, fxi, xi, h, *args, **kwds) for h in steps]
+        step_ratio = self._compute_step_ratio(steps)
+        self._set_richardson_rule(step_ratio, self.richardson_terms)
+        return self._vstack(results, steps)
+
+    @staticmethod
+    def _complex_even(f, fx, x, h, *args, **kwargs):
+        """
+        Calculate Hessian with complex-step derivative approximation
+
+        The stepsize is the same for the complex and the finite difference part
+        """
+        n = len(x)
+        # h = _default_base_step(x, 3, base_step, n)
+        ee = np.diag(h)
+        hes = 2. * np.outer(h, h)
+
+        for i in range(n):
+            for j in range(i, n):
+                hes[i, j] = (f(x + 1j * ee[i] + ee[j], *args, **kwargs) -
+                             f(x + 1j * ee[i] - ee[j], *args, **kwargs)
+                             ).imag / hes[j, i]
+                hes[j, i] = hes[i, j]
+        return hes
+
+    @staticmethod
+    def _multicomplex2(f, fx, x, h, *args, **kwargs):
+        """Calculate Hessian with bicomplex-step derivative approximation"""
+        n = len(x)
+        ee = np.diag(h)
+        hess = np.outer(h, h)
+        for i in range(n):
+            for j in range(i, n):
+                zph = bicomplex(x + 1j * ee[i, :], ee[j, :])
+                hess[i, j] = (f(zph, *args, **kwargs)).imag12 / hess[j, i]
+                hess[j, i] = hess[i, j]
+        return hess
+
+    @staticmethod
+    def _central_even(f, fx, x, h, *args, **kwargs):
+        """Eq 9."""
+        n = len(x)
+        # h = _default_base_step(x, 4, base_step, n)
+        ee = np.diag(h)
+        hess = np.outer(h, h)
+
+        for i in range(n):
+            hess[i, i] = (f(x + 2*ee[i, :], *args, **kwargs) - 2*fx +
+                          f(x - 2*ee[i, :], *args, **kwargs)
+                          ) / (4. * hess[i, i])
+            for j in range(i+1, n):
+                hess[i, j] = (f(x + ee[i, :] + ee[j, :], *args, **kwargs) -
+                              f(x + ee[i, :] - ee[j, :], *args, **kwargs) -
+                              f(x - ee[i, :] + ee[j, :], *args, **kwargs) +
+                              f(x - ee[i, :] - ee[j, :], *args, **kwargs)
+                              ) / (4. * hess[j, i])
+                hess[j, i] = hess[i, j]
+        return hess
+
+    @staticmethod
+    def _central2(f, fx, x, h, *args, **kwargs):
+        """Eq. 8"""
+        n = len(x)
+        # NOTE: ridout suggesting using eps**(1/4)*theta
+        # h = _default_base_step(x, 3, base_step, n)
+        ee = np.diag(h)
+        dtype = np.result_type(fx)
+        g = np.empty(n, dtype=dtype)
+        gg = np.empty(n, dtype=dtype)
+        for i in range(n):
+            g[i] = f(x + ee[i], *args, **kwargs)
+            gg[i] = f(x - ee[i], *args, **kwargs)
+
+        hess = np.empty((n, n), dtype=dtype)
+        np.outer(h, h, out=hess)
+        for i in range(n):
+            for j in range(i, n):
+                hess[i, j] = (f(x + ee[i, :] + ee[j, :], *args, **kwargs) -
+                              g[i] - g[j] + fx +
+                              f(x - ee[i, :] - ee[j, :], *args, **kwargs) -
+                              gg[i] - gg[j] + fx) / (2 * hess[j, i])
+                hess[j, i] = hess[i, j]
+
+        return hess
+
+    @staticmethod
+    def _forward(f, fx, x, h, *args, **kwargs):
+        """Eq. 7"""
+        n = len(x)
+        ee = np.diag(h)
+
+        dtype = np.result_type(fx)
+        g = np.empty(n, dtype=dtype)
+        for i in range(n):
+            g[i] = f(x + ee[i, :], *args, **kwargs)
+
+        hess = np.empty((n, n), dtype=dtype)
+        np.outer(h, h, out=hess)
+        for i in range(n):
+            for j in range(i, n):
+                hess[i, j] = (f(x + ee[i, :] + ee[j, :], *args, **kwargs) -
+                              g[i] - g[j] + fx) / hess[j, i]
+                hess[j, i] = hess[i, j]
+        return hess
+
+    def _backward(self, f, fx, x, h, *args, **kwargs):
+        return self._forward(f, fx, x, -h, *args, **kwargs)
+
+
+def test_docstrings():
+    import doctest
+    doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
+
+
+if __name__ == '__main__':  # pragma : no cover
+    test_docstrings()
diff --git a/bumps/numdifftools/extrapolation.py b/bumps/numdifftools/extrapolation.py
new file mode 100644
index 0000000..6084b47
--- /dev/null
+++ b/bumps/numdifftools/extrapolation.py
@@ -0,0 +1,346 @@
+"""
+Created on 28. aug. 2015
+
+ at author: pab
+"""
+from __future__ import division, print_function
+import numpy as np
+from scipy import linalg
+from scipy.ndimage.filters import convolve1d
+import warnings
+EPS = np.finfo(float).eps
+_EPS = EPS
+_TINY = np.finfo(float).tiny
+
+
+def convolve(sequence, rule, **kwds):
+    """Wrapper around scipy.ndimage.convolve1d that allows complex input."""
+    if np.iscomplexobj(sequence):
+        return (convolve1d(sequence.real, rule, **kwds) + 1j *
+                convolve1d(sequence.imag, rule, **kwds))
+    return convolve1d(sequence, rule, **kwds)
+
+
+class Dea(object):
+    """
+    LIMEXP  is the maximum number of elements the
+    epsilon table data can contain. The epsilon table
+    is stored in the first (LIMEXP+2) entries of EPSTAB.
+
+
+    LIST OF MAJOR VARIABLES
+    -----------------------
+    E0,E1,E2,E3 - DOUBLE PRECISION
+                  The 4 elements on which the computation of
+                  a new element in the epsilon table is based.
+    NRES   - INTEGER
+             Number of extrapolation results actually
+             generated by the epsilon algorithm in prior
+             calls to the routine.
+    NEWELM - INTEGER
+             Number of elements to be computed in the
+             new diagonal of the epsilon table. The
+             condensed epsilon table is computed. Only
+             those elements needed for the computation of
+             the next diagonal are preserved.
+    RES    - DOUBLE PREISION
+             New element in the new diagonal of the
+             epsilon table.
+    ERROR  - DOUBLE PRECISION
+             An estimate of the absolute error of RES.
+             Routine decides whether RESULT=RES or
+             RESULT=SVALUE by comparing ERROR with
+             ABSERR from the previous call.
+    RES3LA - DOUBLE PREISION
+             Vector of DIMENSION 3 containing at most
+             the last 3 results.
+    """
+    def __init__(self, limexp=3):
+        self.limexp = 2 * (limexp // 2) + 1
+        self.epstab = np.zeros(limexp+5)
+        self.ABSERR = 10.
+        self._n = 0
+        self._nres = 0
+        if (limexp < 3):
+            raise ValueError('LIMEXP IS LESS THAN 3')
+
+    def _compute_error(self, RES3LA, NRES, RES):
+        fact = [6.0, 2.0, 1.0][min(NRES-1, 2)]
+        error = fact * np.abs(RES - RES3LA[:NRES]).sum()
+        return error
+
+    def _shift_table(self, EPSTAB, N, NEWELM, NUM):
+        i_0 = 1 if ((NUM // 2) * 2 == NUM - 1) else 0
+        i_n = 2 * NEWELM + 2
+        EPSTAB[i_0:i_n:2] = EPSTAB[i_0 + 2:i_n + 2:2]
+
+        if (NUM != N):
+            i_n = NUM - N
+            EPSTAB[:N + 1] = EPSTAB[i_n:i_n + N + 1]
+        return EPSTAB
+
+    def _update_RES3LA(self, RES3LA, RESULT, NRES):
+        if NRES > 2:
+            RES3LA[:2] = RES3LA[1:]
+            RES3LA[2] = RESULT
+        else:
+            RES3LA[NRES] = RESULT
+
+    def __call__(self, SVALUE):
+
+        EPSTAB = self.epstab
+        RES3LA = EPSTAB[-3:]
+        RESULT = SVALUE
+        N = self._n
+        NRES = self._nres
+        EPSTAB[N] = SVALUE
+        if (N == 0):
+            ABSERR = abs(RESULT)
+        elif (N == 1):
+            ABSERR = 6.0 * abs(RESULT - EPSTAB[0])
+        else:
+            ABSERR = self.ABSERR
+            EPSTAB[N + 2] = EPSTAB[N]
+            NEWELM = N // 2
+            NUM = N
+            K1 = N
+            for I in range(NEWELM):
+                E0 = EPSTAB[K1 - 2]
+                E1 = EPSTAB[K1 - 1]
+                E2 = RES = EPSTAB[K1 + 2]
+                DELTA2, DELTA3 = E2 - E1, E1 - E0
+                ERR2, ERR3 = abs(DELTA2), abs(DELTA3)
+                TOL2 = max(abs(E2), abs(E1)) * _EPS
+                TOL3 = max(abs(E1), abs(E0)) * _EPS
+                converged = (ERR2 <= TOL2 and ERR3 <= TOL3)
+                if converged:
+                    ABSERR = ERR2 + ERR3
+                    RESULT = RES
+                    break
+                if (I != 0):
+                    E3 = EPSTAB[K1]
+                    DELTA1 = E1 - E3
+                    ERR1 = abs(DELTA1)
+                    TOL1 = max(abs(E1), abs(E3)) * _EPS
+                    converged = (ERR1 <= TOL1 or ERR2 <= TOL2 or
+                                 ERR3 <= TOL3)
+                    if not converged:
+                        SS = 1.0 / DELTA1 + 1.0 / DELTA2 - 1.0 / DELTA3
+                else:
+                    converged = (ERR2 <= TOL2 or ERR3 <= TOL3)
+                    if not converged:
+                        SS = 1.0 / DELTA2 - 1.0 / DELTA3
+                EPSTAB[K1] = E1
+                if (converged or abs(SS * E1) <= 1e-04):
+                    N = 2 * I
+                    if (NRES == 0):
+                        ABSERR = ERR2 + ERR3
+                        RESULT = RES
+                    else:
+                        RESULT = RES3LA[min(NRES-1, 2)]
+                    break
+                RES = E1 + 1.0 / SS
+                EPSTAB[K1] = RES
+                K1 = K1 - 2
+                if (NRES == 0):
+                    ABSERR = ERR2 + abs(RES - E2) + ERR3
+                    RESULT = RES
+                    continue
+                ERROR = self._compute_error(RES3LA, NRES, RES)
+
+                if (ERROR > 10.0 * ABSERR):
+                    continue
+                ABSERR = ERROR
+                RESULT = RES
+            else:
+                ERROR = self._compute_error(RES3LA, NRES, RES)
+
+            # 50
+            if (N == self.limexp - 1):
+                N = 2 * (self.limexp // 2) - 1
+            EPSTAB = self._shift_table(EPSTAB, N, NEWELM, NUM)
+            self._update_RES3LA(RES3LA, RESULT, NRES)
+
+            ABSERR = max(ABSERR, 10.0*_EPS * abs(RESULT))
+            NRES = NRES + 1
+
+        N += 1
+        self._n = N
+        self._nres = NRES
+        # EPSTAB[-3:] = RES3LA
+        self.ABSERR = ABSERR
+        return RESULT, ABSERR
+
+
+def test_dea():
+    def linfun(i):
+        return np.linspace(0, np.pi/2., 2**i+1)
+    dea = Dea(limexp=11)
+    print('NO. PANELS      TRAP. APPROX          APPROX W/EA           ABSERR')
+    for k in np.arange(10):
+        x = linfun(k)
+        val = np.trapz(np.sin(x), x)
+        vale, err = dea(val)
+        print('%5d %20.8f  %20.8f  %20.8f' % (len(x)-1, val, vale, err))
+
+
+def dea3(v0, v1, v2, symmetric=False):
+    """
+    Extrapolate a slowly convergent sequence
+
+    Parameters
+    ----------
+    v0, v1, v2 : array-like
+        3 values of a convergent sequence to extrapolate
+
+    Returns
+    -------
+    result : array-like
+        extrapolated value
+    abserr : array-like
+        absolute error estimate
+
+    Description
+    -----------
+    DEA3 attempts to extrapolate nonlinearly to a better estimate
+    of the sequence's limiting value, thus improving the rate of
+    convergence. The routine is based on the epsilon algorithm of
+    P. Wynn, see [1]_.
+
+     Example
+     -------
+     # integrate sin(x) from 0 to pi/2
+
+     >>> import numpy as np
+     >>> from bumps import numdifftools as nd
+     >>> Ei= np.zeros(3)
+     >>> linfun = lambda i : np.linspace(0, np.pi/2., 2**(i+5)+1)
+     >>> for k in np.arange(3):
+     ...    x = linfun(k)
+     ...    Ei[k] = np.trapz(np.sin(x),x)
+     >>> [En, err] = nd.dea3(Ei[0], Ei[1], Ei[2])
+     >>> truErr = Ei-1.
+     >>> (truErr, err, En)
+     (array([ -2.00805680e-04,  -5.01999079e-05,  -1.25498825e-05]),
+     array([ 0.00020081]), array([ 1.]))
+
+     See also
+     --------
+     dea
+
+     Reference
+     ---------
+     .. [1] C. Brezinski (1977)
+            "Acceleration de la convergence en analyse numerique",
+            "Lecture Notes in Math.", vol. 584,
+            Springer-Verlag, New York, 1977.
+    """
+    E0, E1, E2 = np.atleast_1d(v0, v1, v2)
+    abs, max = np.abs, np.maximum  # @ReservedAssignment
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")  # ignore division by zero and overflow
+        delta2, delta1 = E2 - E1, E1 - E0
+        err2, err1 = abs(delta2), abs(delta1)
+        tol2, tol1 = max(abs(E2), abs(E1)) * _EPS, max(abs(E1), abs(E0)) * _EPS
+        delta1[err1 < _TINY] = _TINY
+        delta2[err2 < _TINY] = _TINY  # avoid division by zero and overflow
+        ss = 1.0 / delta2 - 1.0 / delta1 + _TINY
+        smalle2 = (abs(ss * E1) <= 1.0e-3)
+        converged = (err1 <= tol1) & (err2 <= tol2) | smalle2
+        result = np.where(converged, E2 * 1.0, E1 + 1.0 / ss)
+        abserr = err1 + err2 + np.where(converged, tol2 * 10, abs(result-E2))
+    if symmetric and len(result) > 1:
+        return result[:-1], abserr[1:]
+    return result, abserr
+
+
+class Richardson(object):
+    """
+    Extrapolates as sequence with Richardsons method
+
+    Notes
+    -----
+    Suppose you have series expansion that goes like this
+
+    L = f(h) + a0 * h^p_0 + a1 * h^p_1+ a2 * h^p_2 + ...
+
+    where p_i = order + step * i  and f(h) -> L as h -> 0, but f(0) != L.
+
+    If we evaluate the right hand side for different stepsizes h
+    we can fit a polynomial to that sequence of approximations.
+    This is exactly what this class does.
+
+    Example
+    -------
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+    >>> n = 3
+    >>> Ei = np.zeros((n,1))
+    >>> h = np.zeros((n,1))
+    >>> linfun = lambda i : np.linspace(0, np.pi/2., 2**(i+5)+1)
+    >>> for k in np.arange(n):
+    ...    x = linfun(k)
+    ...    h[k] = x[1]
+    ...    Ei[k] = np.trapz(np.sin(x),x)
+    >>> En, err, step = nd.Richardson(step=1, order=1)(Ei, h)
+    >>> truErr = Ei-1.
+    >>> (truErr, err, En)
+    (array([[ -2.00805680e-04],
+           [ -5.01999079e-05],
+           [ -1.25498825e-05]]), array([[ 0.00320501]]), array([[ 1.]]))
+
+    """
+    def __init__(self, step_ratio=2.0, step=1, order=1, num_terms=2):
+        self.num_terms = num_terms
+        self.order = order
+        self.step = step
+        self.step_ratio = step_ratio
+
+    def _r_matrix(self, num_terms):
+        step = self.step
+        i, j = np.ogrid[0:num_terms+1, 0:num_terms]
+        r_mat = np.ones((num_terms + 1, num_terms + 1))
+        r_mat[:, 1:] = (1.0 / self.step_ratio) ** (i*(step*j + self.order))
+        return r_mat
+
+    def _get_richardson_rule(self, sequence_length=None):
+        if sequence_length is None:
+            sequence_length = self.num_terms + 1
+        num_terms = min(self.num_terms, sequence_length - 1)
+        if num_terms > 0:
+            r_mat = self._r_matrix(num_terms)
+            return linalg.pinv(r_mat)[0]
+        return np.ones((1,))
+
+    def _estimate_error(self, new_sequence, old_sequence, steps, rule):
+        m, _n = new_sequence.shape
+
+        if m < 2:
+            return (np.abs(new_sequence) * EPS + steps) * 10.0
+        cov1 = np.sum(rule**2)  # 1 spare dof
+        fact = np.maximum(12.7062047361747 * np.sqrt(cov1), EPS * 10.)
+        err = np.abs(np.diff(new_sequence, axis=0)) * fact
+        tol = np.maximum(np.abs(new_sequence[1:]),
+                         np.abs(new_sequence[:-1])) * EPS * fact
+        converged = err <= tol
+        abserr = err + np.where(converged, tol * 10,
+                                abs(new_sequence[:-1]-old_sequence[1:])*fact)
+        # abserr = err1 + err2 + np.where(converged, tol2 * 10, abs(result-E2))
+        # abserr = s * fact + np.abs(new_sequence) * EPS * 10.0
+        return abserr
+
+    def extrapolate(self, sequence, steps):
+        return self.__call__(sequence, steps)
+
+    def __call__(self, sequence, steps):
+        ne = sequence.shape[0]
+        rule = self._get_richardson_rule(ne)
+        nr = rule.size - 1
+        m = ne - nr
+        new_sequence = convolve(sequence, rule[::-1], axis=0, origin=(nr // 2))
+        abserr = self._estimate_error(new_sequence, sequence, steps, rule)
+        return new_sequence[:m], abserr[:m], steps[:m]
+
+
+if __name__ == '__main__':
+    pass
diff --git a/bumps/numdifftools/info.py b/bumps/numdifftools/info.py
new file mode 100644
index 0000000..fae3bcc
--- /dev/null
+++ b/bumps/numdifftools/info.py
@@ -0,0 +1,93 @@
+"""
+Introduction to Numdifftools
+============================
+
+Numdifftools is a suite of tools written in Python to solve automatic numerical
+differentiation problems in one or more variables. Finite differences are used
+in an adaptive manner, coupled with a Richardson extrapolation methodology to
+provide a maximally accurate result. The user can configure many options like;
+changing the order of the method or the extrapolation, even allowing the user
+to specify whether `complex`, `multicomplex`, `central`, `forward` or
+`backward` differences are used. The methods provided are:
+
+*Derivative:*
+    Computates the derivative of order 1 through 10 on any scalar function.
+
+*Gradient:*
+    Computes the gradient vector of a scalar function of one or more variables.
+
+*Jacobian:*
+    Computes the Jacobian matrix of a vector valued function of one or more
+    variables.
+
+*Hessian:*
+    Computes the Hessian matrix of all 2nd partial derivatives of a scalar
+    function of one or more variables.
+
+*Hessdiag:*
+    Computes only the diagonal elements of the Hessian matrix
+
+All of these methods also produce error estimates on the result.
+
+Numdifftools also provide an easy to use interface to derivatives calculated
+with AlgoPy. Algopy stands for Algorithmic Differentiation in Python.
+The purpose of AlgoPy is the evaluation of higher-order derivatives in the
+`forward` and `reverse` mode of Algorithmic Differentiation (AD) of functions
+that are implemented as Python programs.
+
+Documentation is at: http://numdifftools.readthedocs.org/
+
+Code and issue tracker is at https://github.com/pbrod/numdifftools.
+
+Latest stable release is at http://pypi.python.org/pypi/Numdifftools.
+
+To test if the toolbox is working paste the following in an interactive
+python session::
+
+   import numdifftools as nd
+   nd.test(coverage=True, doctests=True)
+
+Getting Started
+---------------
+Compute 1'st and 2'nd derivative of exp(x), at x == 1::
+
+    >>> import numpy as np
+    >>> from bumps import numdifftools as nd
+    >>> fd = nd.Derivative(np.exp)        # 1'st derivative
+    >>> fdd = nd.Derivative(np.exp, n=2)  # 2'nd derivative
+    >>> np.allclose(fd(1), 2.7182818284590424)
+    True
+    >>> np.allclose(fdd(1), 2.7182818284590424)
+    True
+
+Nonlinear least squares::
+
+    >>> xdata = np.reshape(np.arange(0,1,0.1),(-1,1))
+    >>> ydata = 1+2*np.exp(0.75*xdata)
+    >>> fun = lambda c: (c[0]+c[1]*np.exp(c[2]*xdata) - ydata)**2
+    >>> Jfun = nd.Jacobian(fun)
+    >>> np.allclose(np.abs(Jfun([1,2,0.75])), 0) # should be numerically zero
+    True
+
+Compute gradient of sum(x**2)::
+
+    >>> fun = lambda x: np.sum(x**2)
+    >>> dfun = nd.Gradient(fun)
+    >>> dfun([1,2,3])
+    array([ 2.,  4.,  6.])
+
+See also
+--------
+scipy.misc.derivative
+
+"""
+
+# 2016-04-19 PAK remove docs on algopy interface
+
+def test_docstrings():
+    import doctest
+    doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
+
+
+if __name__ == '__main__':
+    test_docstrings()
diff --git a/bumps/numdifftools/multicomplex.py b/bumps/numdifftools/multicomplex.py
new file mode 100644
index 0000000..8141ab6
--- /dev/null
+++ b/bumps/numdifftools/multicomplex.py
@@ -0,0 +1,370 @@
+"""
+Created on 22. apr. 2015
+
+ at author: pab
+
+References
+----------
+A METHODOLOGY FOR ROBUST OPTIMIZATION OF
+LOW-THRUST TRAJECTORIES IN MULTI-BODY
+ENVIRONMENTS
+Gregory Lantoine (2010)
+Phd thesis, Georgia Institute of Technology
+
+USING MULTICOMPLEX VARIABLES FOR AUTOMATIC
+COMPUTATION OF HIGH-ORDER DERIVATIVES
+Gregory Lantoine, Ryan P. Russell , and Thierry Dargent
+ACM Transactions on Mathematical Software, Vol. 38, No. 3, Article 16,
+April 2012, 21 pages,
+
+M.E. Luna-Elizarraras, M. Shapiro, D.C. Struppa1, A. Vajiac (2012)
+CUBO A Mathematical Journal
+Vol. 14, No 2, (61-80). June 2012.
+
+Computation of higher-order derivatives using the multi-complex
+step method
+Adriaen Verheyleweghen, (2014)
+Project report, NTNU
+
+"""
+from __future__ import division
+import numpy as np
+
+_TINY = np.finfo(float).machar.tiny
+
+
+def c_atan2(x, y):
+    a, b = np.real(x), np.imag(x)
+    c, d = np.real(y), np.imag(y)
+    return np.arctan2(a, c) + 1j * (c * b - a * d) / (a**2 + c**2)
+
+
+def c_max(x, y):
+    return np.where(x.real < y.real, y, x)
+
+
+def c_min(x, y):
+    return np.where(x.real > y.real, y, x)
+
+
+def c_abs(z):
+    return np.where(np.real(z) >= 0, z, -z)
+
+
+class bicomplex(object):
+    """
+    BICOMPLEX(z1, z2)
+
+    Creates an instance of a bicomplex object.
+    zeta = z1 + j*z2, where z1 and z2 are complex numbers.
+    """
+
+    def __init__(self, z1, z2):
+        z1, z2 = np.broadcast_arrays(z1, z2)
+        self.z1 = np.asanyarray(z1, dtype=np.complex128)
+        self.z2 = np.asanyarray(z2, dtype=np.complex128)
+
+    @property
+    def shape(self):
+        return self.z1.shape
+
+    @property
+    def size(self):
+        return self.z1.size
+
+    def mod_c(self):
+        """Complex modulus"""
+        r12, r22 = self.z1*self.z1, self.z2*self.z2
+        r = np.sqrt(r12 + r22)
+        return r
+        # return np.where(r == 0, np.sqrt(r12 - r22), r)
+
+    def norm(self):
+        z1, z2 = self.z1, self.z2
+        return np.sqrt(z1.real**2 + z2.real**2 + z1.imag**2 + z2.imag**2)
+
+    @property
+    def real(self):
+        return self.z1.real
+
+    @property
+    def imag(self):
+        return self.z1.imag
+
+    @property
+    def imag1(self):
+        return self.z1.imag
+
+    @property
+    def imag2(self):
+        return self.z2.real
+
+    @property
+    def imag12(self):
+        return self.z2.imag
+
+    @staticmethod
+    def asarray(other):
+        z1, z2 = other.z1, other.z2
+        return np.vstack((np.hstack((z1, -z2)),
+                          np.hstack((z2, z1))))
+
+    @staticmethod
+    def _coerce(other):
+        if not isinstance(other, bicomplex):
+            return bicomplex(other, np.zeros(np.shape(other)))
+        return other
+
+    @staticmethod
+    def mat2bicomp(arr):
+        shape = np.array(arr.shape)
+        shape[:2] = shape[:2] // 2
+        z1 = arr[:shape[0]]
+        z2 = arr[shape[0]:]
+        slices = tuple([slice(None, None, 1)] + [slice(n) for n in shape[1:]])
+        return bicomplex(z1[slices], z2[slices])
+
+    def __array_wrap__(self, result):
+        if isinstance(result, bicomplex):
+            return result
+        shape = result.shape
+        result = np.atleast_1d(result)
+        z1 = np.array([cls.z1 for cls in result.ravel()])
+        z2 = np.array([cls.z2 for cls in result.ravel()])
+        return bicomplex(z1.reshape(shape), z2.reshape(shape))
+
+    def __repr__(self):
+        name = self.__class__.__name__
+        return """%s(z1=%s, z2=%s)""" % (name, str(self.z1), str(self.z2))
+
+    def __lt__(self, other):
+        other = self._coerce(other)
+        return self.z1.real < other.z1.real
+
+    def __le__(self, other):
+        other = self._coerce(other)
+        return self.z1.real <= other.z1.real
+
+    def __gt__(self, other):
+        other = self._coerce(other)
+        return self.z1.real > other.z1.real
+
+    def __ge__(self, other):
+        other = self._coerce(other)
+        return self.z1.real >= other.z1.real
+
+    def __eq__(self, other):
+        other = self._coerce(other)
+        return (self.z1 == other.z1) * (self.z2 == other.z2)
+
+    def __getitem__(self, index):
+        return bicomplex(self.z1[index], self.z2[index])
+
+    def __setitem__(self, index, value):
+        value = self._coerce(value)
+        if index in ['z1', 'z2']:
+            setattr(self, index, value)
+        else:
+            self.z1[index] = value.z1
+            self.z2[index] = value.z2
+
+    def __abs__(self):
+        z1, z2 = self.z1, self.z2
+        mask = self >= 0
+        return bicomplex(np.where(mask, z1, -z1), np.where(mask, z2, -z2))
+
+    def __neg__(self):
+        return bicomplex(-self.z1, -self.z2)
+
+    def __add__(self, other):
+        other = self._coerce(other)
+        return bicomplex(self.z1 + other.z1, self.z2 + other.z2)
+
+    def __sub__(self, other):
+        other = self._coerce(other)
+        return bicomplex(self.z1 - other.z1, self.z2 - other.z2)
+
+    def __rsub__(self, other):
+        return - self.__sub__(other)
+
+    def __div__(self, other):
+        """elementwise division"""
+        return self * other ** -1  # np.exp(-np.log(other))
+
+    __truediv__ = __div__
+
+    def __rdiv__(self, other):
+        """elementwise division"""
+        return other * self ** -1
+
+    def __mul__(self, other):
+        """elementwise multiplication"""
+        other = self._coerce(other)
+        return bicomplex(self.z1 * other.z1 - self.z2 * other.z2,
+                         (self.z1 * other.z2 + self.z2 * other.z1))
+
+    def _pow_singular(self, other):
+        z1, z2 = self.z1, self.z2
+        z01 = 0.5 * (z1 - 1j * z2) ** other
+        z02 = 0.5 * (z1 + 1j * z2) ** other
+        return bicomplex(z01 + z02, (z01 - z02) * 1j)
+
+    def __pow__(self, other):
+        out = (self.log()*other).exp()
+        non_invertible = self.mod_c() == 0
+        if non_invertible.any():
+            out[non_invertible] = self[non_invertible]._pow_singular(other)
+        return out
+
+    def __rpow__(self, other):
+        return (np.log(other) * self).exp()
+
+    __radd__ = __add__
+    __rmul__ = __mul__
+
+    def __len__(self):
+        return len(self.z1)
+
+    def conjugate(self):
+        return bicomplex(self.z1, -self.z2)
+
+    def flat(self, index):
+        return bicomplex(self.z1.flat[index], self.z2.flat[index])
+
+    def dot(self, other):
+        other = self._coerce(other)
+        if self.size == 1 or other.size == 1:
+            return self * other
+        return self.mat2bicomp(self.asarray(self).dot(self.asarray(other).T))
+
+    def logaddexp(self, other):
+        other = self._coerce(other)
+        return self + np.log1p(np.exp(other-self))
+
+    def logaddexp2(self, other):
+        other = self._coerce(other)
+        return self + np.log2(1+np.exp2(other-self))
+
+    def sin(self):
+        z1 = np.cosh(self.z2) * np.sin(self.z1)
+        z2 = np.sinh(self.z2) * np.cos(self.z1)
+        return bicomplex(z1, z2)
+
+    def cos(self):
+        z1 = np.cosh(self.z2) * np.cos(self.z1)
+        z2 = -np.sinh(self.z2) * np.sin(self.z1)
+        return bicomplex(z1, z2)
+
+    def tan(self):
+        return self.sin() / self.cos()
+
+    def cot(self):
+        return self.cos() / self.sin()
+
+    def sec(self):
+        return 1. / self.cos()
+
+    def csc(self):
+        return 1. / self.sin()
+
+    def cosh(self):
+        z1 = np.cosh(self.z1) * np.cos(self.z2)
+        z2 = np.sinh(self.z1) * np.sin(self.z2)
+        return bicomplex(z1, z2)
+
+    def sinh(self):
+        z1 = np.sinh(self.z1) * np.cos(self.z2)
+        z2 = np.cosh(self.z1) * np.sin(self.z2)
+        return bicomplex(z1, z2)
+
+    def tanh(self):
+        return self.sinh() / self.cosh()
+
+    def coth(self):
+        return self.cosh() / self.sinh()
+
+    def sech(self):
+        return 1. / self.cosh()
+
+    def csch(self):
+        return 1. / self.sinh()
+
+    def exp2(self):
+        return np.exp(self * np.log(2))
+
+    def sqrt(self):
+        return self.__pow__(0.5)
+
+    def log10(self):
+        return self.log()/np.log(10)
+
+    def log2(self):
+        return self.log()/np.log(2)
+
+    def log1p(self):
+        return bicomplex(np.log1p(self.mod_c()), self.arg_c1p())
+
+    def expm1(self):
+        expz1 = np.expm1(self.z1)
+        return bicomplex(expz1 * np.cos(self.z2), expz1 * np.sin(self.z2))
+
+    def exp(self):
+        expz1 = np.exp(self.z1)
+        return bicomplex(expz1 * np.cos(self.z2), expz1 * np.sin(self.z2))
+
+    def log(self):
+        mod_c = self.mod_c()
+#         if (mod_c == 0).any():
+#             raise ValueError('mod_c is zero -> number not invertable!')
+        return bicomplex(np.log(mod_c + _TINY), self.arg_c())
+
+#     def _log_m(self, m=0):
+#         return np.log(self.mod_c() + _TINY) + 1j * \
+#             (self.arg_c() + 2 * m * np.pi)
+#
+#     def _log_mn(self, m=0, n=0):
+#         arg_c = self.arg_c()
+#         log_m = np.log(self.mod_c() + _TINY) + 1j * (2 * m * np.pi)
+#         return bicomplex(log_m, arg_c + 2 * n * np.pi)
+
+    def arcsin(self):
+        J = bicomplex(0, 1)
+        return -J * ((J*self + (1-self**2)**0.5).log())
+        # return (np.pi/2 - self.arccos())
+
+    def arccos(self):
+        return (np.pi/2 - self.arcsin())
+        # J = bicomplex(0, 1)
+        # return J * ((self - J * (1-self**2)**0.5).log())
+
+    def arctan(self):
+        J = bicomplex(0, 1)
+        arg1, arg2 = 1 - J * self, 1 + J * self
+        tmp = J * (arg1.log() - arg2.log()) * 0.5
+        return bicomplex(tmp.z1, tmp.z2)
+
+    def arccosh(self):
+        return ((self + (self**2-1)**0.5).log())
+
+    def arcsinh(self):
+        return ((self + (self**2+1)**0.5).log())
+
+    def arctanh(self):
+        return 0.5 * (((1+self)/(1-self)).log())
+
+    def _arg_c(self, z1, z2):
+        sign = np.where((z1.real == 0) * (z2.real == 0), 0,
+                        np.where(0 <= z2.real, 1, -1))
+        # clip to avoid nans for complex args
+        arg = z2 / (z1 + _TINY).clip(min=-1e150, max=1e150)
+        arg_c = np.arctan(arg) + sign * np.pi * (z1.real <= 0)
+        return arg_c
+
+    def arg_c1p(self):
+        z1, z2 = 1+self.z1, self.z2
+        return self._arg_c(z1, z2)
+
+    def arg_c(self):
+        return self._arg_c(self.z1, self.z2)
+
+
diff --git a/bumps/openmp_ext.py b/bumps/openmp_ext.py
new file mode 100644
index 0000000..6a02f52
--- /dev/null
+++ b/bumps/openmp_ext.py
@@ -0,0 +1,69 @@
+"""
+Compile openmp extensions with distutils.
+
+:func:`openmp_ext` returns a replacement *build_ext* command that adds
+OpenMP command line parameters to the C compiler for your system.  Use
+this is setup.py for any modules that need to be compiled for OpenMP.
+"""
+import sys
+from distutils.command.build_ext import build_ext
+
+__all__ = ['openmp_ext']
+
+
+def openmp_ext(default=True):
+    """
+    Enable openmp.
+
+    Add the following to setup.py::
+
+        setup(..., cmdclass={'build_ext': openmp_build_ext()}, ...)
+
+    Enable openmp using "--with-openmp" as a setup parameter, or disable
+    it using "--without-openmp".  If no option is specfied, the developer
+    *default* value will be used.
+
+    On OS X you will need to specify an openmp compiler::
+
+        CC=openmp-cc CXX=openmp-c++ python setup.py --with-openmp
+
+    Note: when using openmp, you should not use multiprocessing parallelism
+    otherwise python will hang.  This is a known bug in the current version
+    of python and gcc.  If your modeling code is compiled with openmp, you
+    can set OMP_NUM_THREADS=1 in the environment to suppress openmp threading
+    when you are running --parallel fits in batch.
+    """
+    with_openmp = default
+    if '--with-openmp' in sys.argv:
+        with_openmp = True
+        sys.argv.remove('--with-openmp')
+    elif '--without-openmp' in sys.argv:
+        with_openmp = False
+        sys.argv.remove('--without-openmp')
+
+    if not with_openmp:
+        return build_ext
+
+    compile_opts = {
+        'msvc': ['/openmp'],
+        'mingw32': ['-fopenmp'],
+        'unix': ['-fopenmp'],
+    }
+    link_opts = {
+        'mingw32': ['-fopenmp'],
+        'unix': ['-lgomp'],
+    }
+
+    class OpenMPExt(build_ext):
+
+        def build_extensions(self):
+            c = self.compiler.compiler_type
+            if c in compile_opts:
+                for e in self.extensions:
+                    e.extra_compile_args = compile_opts[c]
+            if c in link_opts:
+                for e in self.extensions:
+                    e.extra_link_args = link_opts[c]
+            build_ext.build_extensions(self)
+
+    return OpenMPExt
diff --git a/bumps/options.py b/bumps/options.py
new file mode 100644
index 0000000..c3c0ae8
--- /dev/null
+++ b/bumps/options.py
@@ -0,0 +1,432 @@
+"""
+Option parser for bumps command line
+"""
+
+import sys
+from .fitters import FITTERS, FIT_AVAILABLE_IDS, FIT_ACTIVE_IDS, FIT_DEFAULT_ID
+
+class ParseOpts:
+    """
+    Options parser.
+
+    Subclass should define *MINARGS*, *FLAGS*, *VALUES* and *USAGE*.
+
+    *MINARGS* is the minimum number of positional arguments.
+
+    *FLAGS* is a set of arguments that may be present or absent.
+
+    *VALUES* is a set of arguments that take values.  Value checking
+    can be done in the setter for each argument in the set.  Default
+    values should be set in the corresponding object attribute.
+
+    *USAGE* is the help string to display for option "help".
+
+    The constructor will invoke the command line parser, leaving the
+    values set by the command line as attribute values.   Flag options
+    will be True or False.
+    """
+    MINARGS = 0
+    FLAGS = set()
+    VALUES = set()
+    USAGE = ""
+
+    def __init__(self, args):
+        self._parse(args)
+
+    def _parse(self, args):
+        if self.VALUES & self.FLAGS:
+            raise TypeError("option used as both a flag and a value: %s"%
+                            ",".join(self.VALUES&self.FLAGS))
+        flagargs = [v
+                    for v in sys.argv[1:]
+                    if v.startswith('--') and not '=' in v]
+        flags = set(v[2:] for v in flagargs)
+        if 'help' in flags or '-h' in sys.argv[1:] or '-?' in sys.argv[1:]:
+            print(self.USAGE)
+            sys.exit()
+        unknown = flags - self.FLAGS
+        if any(unknown):
+            raise ValueError("Unknown options --%s.  Use -? for help."
+                             % ", --".join(unknown))
+        for f in self.FLAGS:
+            setattr(self, f, (f in flags))
+
+        valueargs = [v
+                     for v in sys.argv[1:]
+                     if v.startswith('--') and '=' in v]
+        for f in valueargs:
+            idx = f.find('=')
+            name = f[2:idx]
+            value = f[idx + 1:]
+            if name not in self.VALUES:
+                raise ValueError(
+                    "Unknown option --%s. Use -? for help." % name)
+            setattr(self, name, value)
+
+        positionargs = [v for v in sys.argv[1:] if not v.startswith('--')]
+        self.args = positionargs
+
+
+# === Fitter option parsing ===
+
+class ChoiceList(object):
+
+    def __init__(self, *choices):
+        self.choices = choices
+
+    def __call__(self, value):
+        if not value in self.choices:
+            raise ValueError('invalid option "%s": use %s'
+                             % (value, '|'.join(self.choices)))
+        else:
+            return value
+
+
+def yesno(value):
+    if value.lower() in ('true', 'yes', 'on', '1'):
+        return True
+    elif value.lower() in ('false', 'no', 'off', '0'):
+        return False
+    raise ValueError('invalid option "%s": use yes|no')
+
+
+def parse_int(value):
+    float_value = float(value)
+    if int(float_value) != float_value:
+        raise ValueError("integer expected")
+    return int(float_value)
+
+
+FIT_FIELDS = dict(
+    starts= ("Starts", parse_int),
+    steps = ("Steps", parse_int),
+    samples = ("Samples", parse_int),
+    xtol = ("x tolerance", float),
+    ftol = ("f(x) tolerance", float),
+    stop = ("Stopping criteria", str),
+    thin = ("Thinning", parse_int),
+    burn = ("Burn-in Steps", parse_int),
+    pop = ("Population", float),
+    init = ("Initializer", ChoiceList("eps", "lhs", "cov", "random")),
+    CR = ("Crossover ratio", float),
+    F = ("Scale", float),
+    nT = ("# Temperatures", parse_int),
+    Tmin = ("Min temperature", float),
+    Tmax = ("Max temperature", float),
+    radius = ("Simplex radius", float),
+    )
+
+# Make sure all settings are parseable
+for fit in FITTERS:
+    assert all(opt in FIT_FIELDS for opt,_ in fit.settings), \
+        "Fitter %s contains unknown settings"%fit.id
+del fit
+
+class FitConfig(object):
+    """
+    Fit settings configuration object.
+
+    The command line parser will define a FitConfig object which contains
+    the fitter that was given on the command line and all its options.  For
+    embedded bumps, which does not use the bumps command line parser, a
+    new FitConfig object can be created with its own selected options.
+
+    Attributes
+    ----------
+
+    *ids = [id, id, ...]* is a list available fitters in "preferred" order.
+    Depending on usage, you may want to sort them, or alternatively, sort
+    by long name with *[id for _,id in sorted((v,k) for k,v in self.names]*
+
+    *fitters = {id: fitclasss}* maps ids to fitters.
+
+    *names* = {id: name}* maps ids to long names
+
+    *settings = {id: [(option, default), ...]}* maps ids to default settings.
+    The order of the settings is the preferred order to present the settings
+    to the user in a GUI dialog for example.
+
+    *values = {id: {option: value, ...}}* maps ids to the settings for
+    each fitter.  Note that in the GUI, different fitters may have there
+    settings recorded and preserved even when not selected.
+
+    *active_ids = [id, id, ...]* is the list of fitters to show the user in
+    a GUI dialog for example.  The other fitters should still be available from
+    the command line.
+
+    *default_id = id* is the fitter to use by default.
+
+    *selected_id = id* is the fitter that was selected, either by command line
+    or by GUI.
+
+    *selected_values = {option: value}* returns the settings for the current
+    fitter.
+
+    *selected_name = name* returns the name of the selected fitter.
+
+    *selected_fitter = FitClass* returns the class of the selected fitter.
+
+    """
+    def __init__(self, default=FIT_DEFAULT_ID, active=FIT_ACTIVE_IDS):
+        # Keep a private copy of the configure settings rather than modifying
+        # the global defaults
+        self.ids = [fit.id for fit in FITTERS]
+        # FITTERS is a list of FitBase classes
+        # Each class has:
+        #     fit.id: the short name used on the command line
+        #     fit.name: the long name used in the GUI
+        #     fit.settings: available options: [(key,default value), ...]
+        self.fitters = dict((fit.id, fit) for fit in FITTERS)
+        self.names = dict((fit.id, fit.name) for fit in FITTERS)
+        self.settings = dict((fit.id,fit.settings) for fit in FITTERS)
+        self.values = dict((fit.id, dict(fit.settings)) for fit in FITTERS)
+        if not all(k in self.ids for k in active):
+            raise ValueError("Some active fitters are not available")
+        if default not in active:
+            raise ValueError("default fitter is not active")
+        self.active_ids = active
+        self.default_id = default
+        self.selected_id = default
+
+    def set_from_cli(self, opts):
+        """
+        Use the BumpsOpts command line parser values to set the selected
+        fitter and its configuration options.
+        """
+        fitter = opts.fit
+        self.selected_id = fitter
+        # Convert supplied options to the correct types and save them in value
+        for field, reset_value in self.settings[fitter]:
+            value = getattr(opts, field, None)
+            parse = FIT_FIELDS[field][1]
+            if value is not None:
+                try:
+                    self.values[fitter][field] = parse(value)
+                except Exception as exc:
+                    raise ValueError("error in --%s: %s" % (field, str(exc)))
+                    # print("options=%s"%(str(self.options)))
+
+    @property
+    def selected_values(self):
+        return self.values[self.selected_id]
+
+    @property
+    def selected_name(self):
+        return self.names[self.selected_id]
+
+    @property
+    def selected_fitter(self):
+        return self.fitters[self.selected_id]
+
+# FitConfig singleton for the common case in which only one config is needed.
+# There may be other use cases, such as saving the fit config along with the
+# rest of the state so that on resume the fit options are restored, but in that
+# case the application will not be using the singleton.
+FIT_CONFIG = FitConfig()
+
+# === Bumps options parsing ===
+class BumpsOpts(ParseOpts):
+    """
+    Option parser for bumps.
+    """
+    MINARGS = 1
+    FLAGS = set(("preview", "chisq", "profile", "time_model",
+                 "simulate", "simrandom", "shake", "worker",
+                 "batch", "noshow", "overwrite", "parallel", "stepmon",
+                 "err", "cov", "entropy",
+                 "remote", "staj", "edit", "mpi", "keep_best",
+                 # passed in when app is a frozen image
+                 "multiprocessing-fork",
+                 # passed when not running bumps, but instead using a
+                 # bundled application as a python distribution with domain
+                 # specific models pre-defined.
+                 "i",
+                 ))
+    VALUES = set(("plot", "store", "resume", "fit", "noise", "seed", "pars",
+                  "resynth", "transport", "notify", "queue", "time",
+                  "m", "c", "p", "view",
+                  ))
+    # Add in parameters from the fitters
+    VALUES |= set(FIT_FIELDS.keys())
+    pars = None
+    notify = ""
+    queue = "http://reflectometry.org/queue"
+    resynth = "0"
+    noise = "5"
+    starts = "1"
+    seed = ""
+    time = "inf"
+    view = None
+    PLOTTERS = "linear", "log", "residuals"
+    USAGE = """\
+Usage: bumps [options] modelfile [modelargs]
+
+The modelfile is a Python script (i.e., a series of Python commands)
+which sets up the data, the models, and the fittable parameters.
+The model arguments are available in the modelfile as sys.argv[1:].
+Model arguments may not start with '-'.
+
+Options:
+
+    --preview
+        display model but do not perform a fitting operation
+    --pars=filename
+        initial parameter values; fit results are saved as <modelname>.par
+    --plot=log      [%(plotter)s]
+        type of plot to display
+    --simulate
+        simulate a dataset using the initial problem parameters
+    --simrandom
+        simulate a dataset using random problem parameters
+    --shake
+        set random parameters before fitting
+    --noise=5%%
+        percent noise to add to the simulated data
+    --seed=integer
+        random number seed
+    --err
+        show uncertainty estimate from curvature at the minimum
+    --cov
+        show the covariance matrix for the model when done
+    --entropy
+        compute entropy for the model when done [dream only]
+    --staj
+        output staj file when done
+    --edit
+        start the gui
+    --view=linear|log
+        one of the predefined problem views; reflectometry also has fresnel,
+        logfresnel, q4 and residuals
+
+    --store=path
+        output directory for plots and models
+    --overwrite
+        if store already exists, replace it
+    --resume=path    [dream]
+        resume a fit from previous stored state
+    --parallel
+        run fit using multiprocessing for parallelism
+    --mpi
+        run fit using MPI for parallelism (use command "mpirun -n cpus ...")
+    --batch
+        batch mode; save output in .mon file and don't show plots after fit
+    --noshow
+        semi-batch; send output to console but don't show plots after fit
+    --remote
+        queue fit to run on remote server
+    --notify=user at email
+        remote fit notification
+    --queue=http://reflectometry.org
+        remote job queue
+    --time=inf
+        run for a maximum number of hours
+
+    --fit=amoeba    [%(fitter)s]
+        fitting engine to use; see manual for details
+    --steps=400    [%(fitter)s]
+        number of fit iterations after any burn-in time
+    --samples=1e4   [dream]
+        set steps so the target number of samples is drawn
+    --xtol=1e-4     [de, amoeba]
+        minimum population diameter
+    --ftol=1e-4     [de, amoeba]
+        minimum population flatness
+    --pop=10        [dream, de, rl, ps]
+        population size
+    --burn=100      [dream, pt]
+        number of burn-in iterations before accumulating stats
+    --thin=1        [dream]
+        number of fit iterations between steps
+    --nT=25
+    --Tmin=0.1
+    --Tmax=10       [pt]
+        temperatures vector; use a higher maximum temperature and a larger
+        nT if your fit is getting stuck in local minima
+    --CR=0.9        [de, rl, pt]
+        crossover ratio for population mixing
+    --starts=1      [newton, rl, amoeba]
+        number of times to run the fit from random starting points.
+    --keep_best
+        when running with multiple starts, restart from a point near the
+        last minimum rather than using a completely random starting point.
+    --init=eps      [dream]
+        population initialization method:
+          eps:    ball around initial parameter set
+          lhs:    latin hypercube sampling
+          cov:    normally distributed according to covariance matrix
+          random: uniformly distributed within parameter ranges
+    --stepmon
+        show details for each step
+    --resynth=0
+        run resynthesis error analysis for n generations
+
+    --time_model
+        run the model --steps times in order to estimate total run time.
+    --profile
+        run the python profiler on the model; use --steps to run multiple
+        models for better statistics
+    --chisq
+        print the model description and chisq value and exit
+    -m/-c/-p command
+        run the python interpreter with bumps on the path:
+            m: command is a module such as bumps.cli, run as __main__
+            c: command is a python one-line command
+            p: command is the name of a python script
+    -i
+        start the interactive interpreter
+    -?/-h/--help
+        display this help
+""" % {'fitter': '|'.join(sorted(FIT_AVAILABLE_IDS)),
+       'plotter': '|'.join(PLOTTERS),
+       }
+
+#    --transport=mp  {amqp|mp|mpi}
+#        use amqp/multiprocessing/mpi for parallel evaluation
+
+    _plot = 'log'
+
+    def _set_plot(self, value):
+        if value not in set(self.PLOTTERS):
+            raise ValueError("unknown plot type %s; use %s"
+                             % (value, "|".join(self.PLOTTERS)))
+        self._plot = value
+    plot = property(fget=lambda self: self._plot, fset=_set_plot)
+    store = None
+    resume = None
+    _fit = FIT_DEFAULT_ID
+    @property
+    def fit(self):
+        return self._fit
+    @fit.setter
+    def fit(self, value):
+        if value not in FIT_AVAILABLE_IDS:
+            raise ValueError("unknown fitter %s; use %s"
+                             % (value, "|".join(sorted(FIT_AVAILABLE_IDS))))
+        self._fit = value
+    fit_config = FIT_CONFIG
+    TRANSPORTS = 'amqp', 'mp', 'mpi', 'celery'
+    _transport = 'mp'
+
+    def _set_transport(self, value):
+        if value not in self.TRANSPORTS:
+            raise ValueError("unknown transport %s; use %s"
+                             % (value, "|".join(self.TRANSPORTS)))
+        self._transport = value
+    transport = property(
+        fget=lambda self: self._transport, fset=_set_transport)
+    meshsteps = 40
+
+
+def getopts():
+    """
+    Process command line options.
+
+    Option values will be stored as attributes in the returned object.
+    """
+    opts = BumpsOpts(sys.argv)
+    opts.resynth = int(opts.resynth)
+    opts.seed = int(opts.seed) if opts.seed != "" else None
+    opts.fit_config.set_from_cli(opts)
+    return opts
+
+
diff --git a/bumps/parameter.py b/bumps/parameter.py
new file mode 100644
index 0000000..fd93875
--- /dev/null
+++ b/bumps/parameter.py
@@ -0,0 +1,907 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Fitting parameter objects.
+
+Parameters are a big part of the interface between the model and the fitting
+engine.  By saving and retrieving values and ranges from the parameter, the
+fitting engine does not need to be aware of the structure of the model.
+
+Users can also perform calculations with parameters, tying together different
+parts of the model, or different models.
+"""
+#__all__ = [ 'Parameter']
+from six.moves import reduce
+import warnings
+from copy import copy
+
+from numpy import inf, isinf, isfinite
+
+from . import bounds as mbounds
+
+# TODO: avoid evaluation of subexpressions if parameters do not change.
+# This is especially important if the subexpression invokes an expensive
+# calculation via a parameterized function.  This will require a restructuring
+# of the parameter claas.  The park-1.3 solution is viable: given a parameter
+# set, figure out which order the expressions need to be evaluated by
+# building up a dependency graph.  With a little care, we can check which
+# parameters have actually changed since the last calculation update, and
+# restrict the dependency graph to just them.
+# TODO: support full aliasing, so that floating point model attributes can
+# be aliased to a parameter.  The same technique as subexpressions applies:
+# when the parameter is changed, the model will be updated and will need
+# to be re-evaluated.
+
+
+class BaseParameter(object):
+    """
+    Root of the parameter class, defining arithmetic on parameters
+    """
+
+    # Parameters are fixed unless told otherwise
+    fixed = True
+    fittable = False
+    discrete = False
+    _bounds = mbounds.Unbounded()
+    name = None
+
+    # Parameters may be dependent on other parameters, and the
+    # fit engine will need to access them.
+    def parameters(self):
+        return [self]
+
+    def pmp(self, *args):
+        """
+        Allow the parameter to vary as value +/- percent.
+
+        pmp(*percent*) -> [value*(1-percent/100), value*(1+percent/100)]
+
+        pmp(*plus*, *minus*) -> [value*(1+minus/100), value*(1+plus/100)]
+
+        In the *plus/minus* form, one of the numbers should be plus and the
+        other minus, but it doesn't matter which.
+
+        The resulting range is converted to "nice" numbers.
+        """
+        self.bounds = mbounds.Bounded(*mbounds.pmp(self.value, *args))
+        return self
+
+    def pm(self, *args):
+        """
+        Allow the parameter to vary as value +/- delta.
+
+        pm(*delta*) -> [value-delta, value+delta]
+
+        pm(*plus*, *minus*) -> [value+minus, value+plus]
+
+        In the *plus/minus* form, one of the numbers should be plus and the
+        other minus, but it doesn't matter which.
+
+        The resulting range is converted to "nice" numbers.
+        """
+        self.bounds = mbounds.Bounded(*mbounds.pm(self.value, *args))
+        return self
+
+    def dev(self, std, mean=0, limits=None, sigma=None, mu=None):
+        """
+        Allow the parameter to vary according to a normal distribution, with
+        deviations from the mean added to the overall cost function for the
+        model.
+
+        If *mean* is None, then it defaults to the current parameter value.
+
+        If *limits* are provide, then use a truncated normal distribution.
+
+        Note: *sigma* and *mu* have been replaced by *std* and *mean*, but
+        are left in for backward compatibility.
+        """
+        if sigma is not None or mu is not None:
+            # CRUFT: remove sigma and mu parameters
+            warnings.warn(DeprecationWarning("use std,mean instead of mu,sigma in Parameter.dev"))
+            if sigma is not None: std = sigma
+            if mu is not None: mean = mu
+        if mean is None:
+            mean = self.value # Note: value is an attribute of the derived class
+        if limits is None:
+            self.bounds = mbounds.Normal(mean, std)
+        else:
+            self.bounds = mbounds.BoundedNormal(mean, std, limits)
+        return self
+
+    def pdf(self, dist):
+        """
+        Allow the parameter to vary according to any continuous scipy.stats
+        distribution.
+        """
+        self.bounds = mbounds.Distribution(dist)
+        return self
+
+    def range(self, low, high):
+        """
+        Allow the parameter to vary within the given range.
+        """
+        self.bounds = mbounds.init_bounds((low, high))
+        return self
+
+    def soft_range(self, low, high, std):
+        """
+        Allow the parameter to vary within the given range, or with Gaussian
+        probability, stray from the range.
+        """
+        self.bounds = mbounds.SoftBounded(low, high, std)
+
+    @property
+    def bounds(self):
+        """Fit bounds"""
+        # print "getting bounds for",self,self._bounds
+        return self._bounds
+
+    @bounds.setter
+    def bounds(self, b):
+        # print "setting bounds for",self
+        if self.fittable:
+            self.fixed = (b is None)
+        self._bounds = b
+
+    # Functional form of parameter value access
+    def __call__(self):
+        return self.value
+
+    # Parameter algebra: express relationships between parameters
+    def __gt__(self, other):
+        return ConstraintGT(self, other)
+
+    def __ge__(self, other):
+        return ConstraintGE(self, other)
+    def __le__(self, other):
+        return ConstraintLE(self, other)
+
+    def __lt__(self, other):
+        return ConstraintLT(self, other)
+
+    # def __eq__(self, other):
+    #     return ConstraintEQ(self, other)
+
+    # def __ne__(self, other):
+    #     return ConstraintNE(self, other)
+
+    def __add__(self, other):
+        return OperatorAdd(self, other)
+
+    def __sub__(self, other):
+        return OperatorSub(self, other)
+
+    def __mul__(self, other):
+        return OperatorMul(self, other)
+
+    def __div__(self, other):
+        return OperatorDiv(self, other)
+
+    def __pow__(self, other):
+        return OperatorPow(self, other)
+
+    def __radd__(self, other):
+        return OperatorAdd(other, self)
+
+    def __rsub__(self, other):
+        return OperatorSub(other, self)
+
+    def __rmul__(self, other):
+        return OperatorMul(other, self)
+
+    def __rdiv__(self, other):
+        return OperatorDiv(other, self)
+
+    def __rpow__(self, other):
+        return OperatorPow(other, self)
+
+    def __abs__(self):
+        return _abs(self)
+
+    def __neg__(self):
+        return self * -1
+
+    def __pos__(self):
+        return self
+
+    def __float__(self):
+        return float(self.value)
+
+    __truediv__ = __div__
+    __rtruediv__ = __rdiv__
+
+    def nllf(self):
+        """
+        Return -log(P) for the current parameter value.
+        """
+        return self.bounds.nllf(self.value)
+
+    def residual(self):
+        """
+        Return the z score equivalent for the current parameter value.
+
+        That is, the given the value of the parameter in the underlying
+        distribution, find the equivalent value in the standard normal.
+        For a gaussian, this is the z score, in which you subtract the
+        mean and divide by the standard deviation to get the number of
+        sigmas away from the mean.  For other distributions, you need to
+        compute the cdf of value in the parameter distribution and invert
+        it using the ppf from the standard normal distribution.
+        """
+        return self.bounds.residual(self.value)
+
+    def valid(self):
+        """
+        Return true if the parameter is within the valid range.
+        """
+        return not isinf(self.nllf())
+
+    def format(self):
+        """
+        Format the parameter, value and range as a string.
+        """
+        return "%s=%g in %s" % (self, self.value, self.bounds)
+
+    def __str__(self):
+        name = self.name if self.name is not None else '?'
+        return name
+
+    def __repr__(self):
+        return "Parameter(%s)" % self
+
+
+class Constant(BaseParameter):
+    """
+    An unmodifiable value.
+    """
+    fittable = False
+    fixed = True
+
+    @property
+    def value(self):
+        return self._value
+
+    def __init__(self, value, name=None):
+        self._value = value
+        self.name = name
+
+
+class Parameter(BaseParameter):
+    """
+    A parameter is a symbolic value.
+
+    It can be fixed or it can vary within bounds.
+
+    p = Parameter(3).pmp(10)    # 3 +/- 10%
+    p = Parameter(3).pmp(-5,10) # 3 in [2.85,3.3] rounded to 2 digits
+    p = Parameter(3).pm(2)      # 3 +/- 2
+    p = Parameter(3).pm(-1,2)   # 3 in [2,5]
+    p = Parameter(3).range(0,5) # 3 in [0,5]
+
+    It has hard limits on the possible values, and a range that should live
+    within those hard limits.  The value should lie within the range for
+    it to be valid.  Some algorithms may drive the value outside the range
+    in order to satisfy soft It has a value which should lie within the range.
+
+    Other properties can decorate the parameter, such as tip for tool tip
+    and units for units.
+    """
+    fittable = True
+
+    @classmethod
+    def default(cls, value, **kw):
+        """
+        Create a new parameter with the *value* and *kw* attributes, or return
+        the existing parameter if *value* is already a parameter.
+
+        The attributes are the same as those for Parameter, or whatever
+        subclass *cls* of Parameter is being created.
+        """
+        # Need to constrain the parameter to fit within fixed limits and
+        # to receive a name if a name has not already been provided.
+        if isinstance(value, BaseParameter):
+            return value
+        else:
+            return cls(value, **kw)
+
+    def set(self, value):
+        """
+        Set a new value for the parameter, ignoring the bounds.
+        """
+        self.value = value
+
+    def clip_set(self, value):
+        """
+        Set a new value for the parameter, clipping it to the bounds.
+        """
+        low, high = self.bounds.limits
+        self.value = min(max(value, low), high)
+
+    def __init__(self, value=None, bounds=None, fixed=None, name=None, **kw):
+        # UI nicities:
+        # 1. check if we are started with value=range or bounds=range; if we
+        # are given bounds, then assume this is a fitted parameter, otherwise
+        # the parameter defaults to fixed; if value is not set, use the
+        # midpoint of the range.
+        if bounds is None:
+            try:
+                lo, hi = value
+                warnings.warn(DeprecationWarning("parameters can no longer be initialized with a fit range"))
+                bounds = lo, hi
+                value = None
+            except TypeError:
+                pass
+        if fixed is None:
+            fixed = (bounds is None)
+        bounds = mbounds.init_bounds(bounds)
+        if value is None:
+            value = bounds.start_value()
+
+        # Store whatever values the user needs to associate with the parameter
+        # Models should set units and tool tips so the user interface has
+        # something to work with.
+        limits = kw.get('limits', (-inf, inf))
+        for k, v in kw.items():
+            setattr(self, k, v)
+
+        # Initialize bounds, with limits clipped to the hard limits for the
+        # parameter
+        def clip(x, a, b):
+            return min(max(x, a), b)
+        self.bounds = bounds
+        self.bounds.limits = (clip(self.bounds.limits[0], *limits),
+                              clip(self.bounds.limits[1], *limits))
+        self.value = value
+        self.fixed = fixed
+        self.name = name
+
+    def randomize(self, rng=None):
+        """
+        Set a random value for the parameter.
+        """
+        self.value = self.bounds.rand(rng if rng is not None else mbounds.RNG)
+
+    def feasible(self):
+        """
+        Value is within the limits defined by the model
+        """
+        return self.limits[0] <= self.value <= self.limits[1]
+
+
+class Reference(Parameter):
+    """
+    Create an adaptor so that a model attribute can be treated as if it
+    were a parameter.  This allows only direct access, wherein the
+    storage for the parameter value is provided by the underlying model.
+
+    Indirect access, wherein the storage is provided by the parameter, cannot
+    be supported since the parameter has no way to detect that the model
+    is asking for the value of the attribute.  This means that model
+    attributes cannot be assigned to parameter expressions without some
+    trigger to update the values of the attributes in the model.
+    """
+
+    def __init__(self, obj, attr, **kw):
+        self.obj = obj
+        self.attr = attr
+        kw.setdefault('name', ".".join([obj.__class__.__name__, attr]))
+        Parameter.__init__(self, **kw)
+
+    @property
+    def value(self):
+        return getattr(self.obj, self.attr)
+
+    @value.setter
+    def value(self, value):
+        setattr(self.obj, self.attr, value)
+
+
+class ParameterSet(object):
+    """
+    A parameter that depends on the model.
+    """
+
+    def __init__(self, reference, names=None):
+        """
+        Create a parameter set, with one parameter for each model name.
+
+        *names* is the list of model names.
+
+        *reference* is the underlying :class:`parameter.Parameter` that will
+        be set when the model is selected.
+
+        *parameters* will be created, with one parameter per model.
+        """
+        self.names = names
+        self.reference = reference
+        self.parameters = [copy(reference) for _ in names]
+        # print self.reference, self.parameters
+        for p, n in zip(self.parameters, names):
+            p.name = " ".join((n, p.name))
+        # Reference is no longer directly fittable
+        self.reference.fittable = False
+
+    # Make the parameter set act like a list
+    def __getitem__(self, i):
+        """
+        Return the underlying parameter for the model index.  Index can
+        either be an integer or a model name.
+        """
+        # Try looking up the free variable by model name rather than model
+        # index. If this fails, assume index is a model index.
+        try:
+            i = self.names.index(i)
+        except ValueError:
+            pass
+        return self.parameters[i]
+
+    def __setitem__(self, i, v):
+        try:
+            i = self.names.index(i)
+        except ValueError:
+            pass
+        self.parameters[i] = v
+
+    def __iter__(self):
+        return iter(self.parameters)
+
+    def __len__(self):
+        return len(self.parameters)
+
+    def set_model(self, index):
+        """
+        Set the underlying model parameter to the value of the nth model.
+        """
+        self.reference.value = self.parameters[index].value
+
+    @property
+    def values(self):
+        return [p.value for p in self.parameters]
+
+    @values.setter
+    def values(self, values):
+        for p, v in zip(self.parameters, values):
+            p.value = v
+
+    def range(self, *args, **kw):
+        """
+        Like :meth:`parameter.Parameter.range`, but applied to all models.
+        """
+        for p in self.parameters:
+            p.range(*args, **kw)
+
+    def pm(self, *args, **kw):
+        """
+        Like :meth:`parameter.Parameter.pm`, but applied to all models.
+        """
+        for p in self.parameters:
+            p.pm(*args, **kw)
+
+    def pmp(self, *args, **kw):
+        """
+        Like :meth:`parameter.Parameter.pmp`, but applied to all models.
+        """
+        for p in self.parameters:
+            p.pmp(*args, **kw)
+
+
+class FreeVariables(object):
+    """
+    A collection of parameter sets for a group of models.
+
+    *names* is the set of model names.
+
+    The parameters themselves are specified as key=value pairs, with key
+    being the attribute name which is used to retrieve the parameter set
+    and value being a :class:`Parameter` containing the parameter that is
+    shared between the models.
+
+    In order to evaluate the log likelihood of all models simultaneously,
+    the fitting program will need to call set_model with the model index
+    for each model in turn in order to substitute the values from the free
+    variables into the model.  This allows us to share a common sample
+    across multiple data sets, with each dataset having its own values for
+    some of the sample parameters.  The alternative is to copy the entire
+    sample structure, sharing references to common parameters and creating
+    new parameters for each model for the free parameters.  Setting up
+    these copies was inconvenient.
+    """
+
+    def __init__(self, names=None, **kw):
+        if names is None:
+            raise TypeError("FreeVariables needs name=[model1, model2, ...]")
+        self.names = names
+
+        # Create slots to hold the free variables
+        self._parametersets = dict((k, ParameterSet(v, names=names))
+                                   for k, v in kw.items())
+
+    # Shouldn't need explicit __getstate__/__setstate__ but mpi4py pickle
+    # chokes without it.
+    def __getstate__(self):
+        return self.__dict__
+
+    def __setstate__(self, state):
+        self.__dict__ = state
+
+    def __getattr__(self, k):
+        """
+        Return the parameter set for the given free parameter.
+        """
+        try:
+            return self._parametersets[k]
+        except KeyError:
+            raise AttributeError('FreeVariables has no attribute %r' % k)
+
+    def parameters(self):
+        """
+        Return the set of free variables for all the models.
+        """
+        return dict((k, v.parameters) for k, v in self._parametersets.items())
+
+    def set_model(self, i):
+        """
+        Set the reference parameters for model *i*.
+        """
+        for p in self._parametersets.values():
+            p.set_model(i)
+
+
+# Current implementation computes values on the fly, so you only
+# need to plug the values into the parameters and the parameters
+# are automatically updated.
+#
+# This will not work well for wrapped models.  In those cases you
+# want to do a number of optimizations, such as only updating the
+#
+
+# ==== Comparison operators ===
+class Constraint:
+    """
+    Abstract base class for constraints.
+    """
+
+    def __bool__(self):
+        """
+        Returns True if the condition is satisfied
+        """
+        raise NotImplementedError
+    __nonzero__ = __bool__
+
+    def __str__(self):
+        """
+        Text description of the constraint
+        """
+        raise NotImplementedError
+
+
+def _gen_constraint(name, op):
+    """
+    Generate a comparison function from a comparison operator.
+    """
+    return '''\
+class Constraint%(name)s(Constraint):
+    """
+    Constraint operator %(op)s
+    """
+    def __init__(self, a, b):
+        self.a, self.b = a,b
+    def __bool__(self):
+        return float(self.a) %(op)s float(self.b)
+    __nonzero__ = __bool__
+    def __str__(self):
+        return "(%%s %(op)s %%s)"%%(self.a,self.b)
+''' % dict(name=name, op=op)
+
+exec(_gen_constraint('GT', '>'))
+exec(_gen_constraint('GE', '>='))
+exec(_gen_constraint('LE', '<='))
+exec(_gen_constraint('LT', '<'))
+exec(_gen_constraint('EQ', '=='))
+exec(_gen_constraint('NE', '!='))
+
+
+# ==== Arithmetic operators ===
+def _gen_binop(name, op):
+    """
+    Generate a comparison function from a comparison operator.
+    """
+    return '''\
+class Operator%(name)s(BaseParameter):
+    """
+    Parameter operator %(op)s
+    """
+    def __init__(self, a, b):
+        self.a, self.b = a,b
+        pars = []
+        if isinstance(a,BaseParameter): pars += a.parameters()
+        if isinstance(b,BaseParameter): pars += b.parameters()
+        self._parameters = pars
+        self.name = str(self)
+    def parameters(self):
+        return self._parameters
+    @property
+    def value(self):
+        return float(self.a) %(op)s float(self.b)
+    @property
+    def dvalue(self):
+        return float(self.a)
+    def __str__(self):
+        return "(%%s %(op)s %%s)"%%(self.a,self.b)
+''' % dict(name=name, op=op)
+
+exec(_gen_binop('Add', '+'))
+exec(_gen_binop('Sub', '-'))
+exec(_gen_binop('Mul', '*'))
+exec(_gen_binop('Div', '/'))
+exec(_gen_binop('Pow', '**'))
+
+
+def substitute(a):
+    """
+    Return structure a with values substituted for all parameters.
+
+    The function traverses lists, tuples and dicts recursively.  Things
+    which are not parameters are returned directly.
+    """
+    if isinstance(a, BaseParameter):
+        return float(a.value)
+    elif isinstance(a, tuple):
+        return tuple(substitute(v) for v in a)
+    elif isinstance(a, list):
+        return [substitute(v) for v in a]
+    elif isinstance(a, dict):
+        return dict((k, substitute(v)) for k, v in a.items())
+    else:
+        return a
+
+
+class Function(BaseParameter):
+    """
+    Delayed function evaluator.
+
+    f.value evaluates the function with the values of the
+    parameter arguments at the time f.value is referenced rather
+    than when the function was invoked.
+    """
+    __slots__ = ['op', 'args', 'kw']
+
+    def __init__(self, op, *args, **kw):
+        self.name = kw.pop('name', None)
+        self.op, self.args, self.kw = op, args, kw
+
+    def parameters(self):
+        # Figure out which arguments to the function are parameters
+        #deps = [p for p in self.args if isinstance(p,BaseParameter)]
+        deps = flatten((self.args, self.kw))
+        # Find out which other parameters these parameters depend on.
+        res = []
+        for p in deps:
+            res.extend(p.parameters())
+        return res
+
+    def _value(self):
+        # Expand args and kw, replacing instances of parameters
+        # with their values
+        return self.op(*substitute(self.args), **substitute(self.kw))
+    value = property(_value)
+
+    def __getstate__(self):
+        return self.name, self.op, self.args, self.kw
+
+    def __setstate__(self, state):
+        self.name, self.op, self.args, self.kw = state
+
+    def __str__(self):
+        if self.name is not None:
+            name = self.name
+        else:
+            args = [str(v) for v in self.args]
+            kw = [str(k) + "=" + str(v) for k, v in self.kw.items()]
+            name = self.op.__name__ + "(" + ", ".join(args + kw) + ")"
+        return "%s:%g" % (name, self.value)
+
+
+def function(op):
+    """
+    Convert a function into a delayed evaluator.
+
+    The value of the function is computed from the values of the parameters
+    at the time that the function value is requested rather than when the
+    function is created.
+    """
+    # Note: @functools.wraps(op) does not work with numpy ufuncs
+    # Note: @decorator does not work with builtins like abs
+    def function_generator(*args, **kw):
+        return Function(op, *args, **kw)
+    function_generator.__name__ = op.__name__
+    function_generator.__doc__ = op.__doc__
+    return function_generator
+_abs = function(abs)
+
+
+def flatten(s):
+    if isinstance(s, (tuple, list)):
+        return reduce(lambda a, b: a + flatten(b), s, [])
+    elif isinstance(s, set):
+        raise TypeError("parameter flattening cannot order sets")
+    elif isinstance(s, dict):
+        return reduce(lambda a, b: a + flatten(s[b]), sorted(s.keys()), [])
+    elif isinstance(s, BaseParameter):
+        return [s]
+    elif s is None:
+        return []
+    else:
+        raise TypeError("don't understand type %s for %r" % (type(s), s))
+
+
+def format(p, indent=0):
+    """
+    Format parameter set for printing.
+
+    Note that this only says how the parameters are arranged, not how they
+    relate to each other.
+    """
+    if isinstance(p, dict) and p != {}:
+        res = []
+        for k in sorted(p.keys()):
+            if k.startswith('_'):
+                continue
+            s = format(p[k], indent + 2)
+            label = " " * indent + "." + k
+            if s.endswith('\n'):
+                res.append(label + "\n" + s)
+            else:
+                res.append(label + " = " + s + '\n')
+        if '_index' in p:
+            res .append(format(p['_index'], indent))
+        return "".join(res)
+    elif isinstance(p, list) and p != []:
+        res = []
+        for k, v in enumerate(p):
+            s = format(v, indent + 2)
+            label = " " * indent + "[%d]" % k
+            if s.endswith('\n'):
+                res.append(label + '\n' + s)
+            else:
+                res.append(label + ' = ' + s + '\n')
+        return "".join(res)
+    # elif isinstance(p, tuple) and p != ():
+    #    return "".join(format(v, indent) for v in p)
+
+    elif isinstance(p, Parameter):
+        if p.fixed:
+            bounds = ""
+        else:
+            bounds = ", bounds=(%g,%g)" %  p.bounds.limits
+        return "Parameter(%g, name='%s'%s)" % (p.value, str(p), bounds)
+    elif isinstance(p, BaseParameter):
+        return str(p)
+    else:
+        return "None"
+
+
+def summarize(pars, sorted=False):
+    """
+    Return a stylized list of parameter names and values with range bars
+    suitable for printing.
+
+    If sorted, then print the parameters sorted alphabetically by name.
+    """
+    output = []
+    if sorted:
+        pars = sorted(pars, cmp=lambda x, y: cmp(x.name, y.name))
+    for p in pars:
+        if not isfinite(p.value):
+            bar = "*invalid* "
+        else:
+            position = int(p.bounds.get01(p.value) * 9.999999999)
+            bar = ['.'] * 10
+            if position < 0:
+                bar[0] = '<'
+            elif position > 9:
+                bar[9] = '>'
+            else:
+                bar[position] = '|'
+        output.append("%40s %s %10g in %s" %
+                      (p.name, "".join(bar), p.value, p.bounds))
+    return "\n".join(output)
+
+
+def unique(s):
+    """
+    Return the unique set of parameters
+
+    The ordering is stable.  The same parameters/dependencies will always
+    return the same ordering, with the first occurrence first.
+    """
+    # Walk structures such as dicts and lists
+    pars = flatten(s)
+    # print "====== flattened"
+    # print "\n".join("%s:%s"%(id(p),p) for p in pars)
+    # Also walk parameter expressions
+    pars = pars + flatten([p.parameters() for p in pars])
+    # print "====== extended"
+    # print "\n".join("%s:%s"%(id(p),p) for p in pars)
+
+    # TODO: implement n log n rather than n^2 uniqueness algorithm
+    # problem is that the sorting has to be unique across a pickle.
+    result = []
+    for p in pars:
+        if not any(p is q for q in result):
+            result.append(p)
+
+    # print "====== unique"
+    # print "\n".join("%s:%s"%(id(p),p) for p in result)
+    # Return the complete set of parameters
+    return result
+
+
+def fittable(s):
+    """
+    Return the list of fittable parameters in no paraticular order.
+
+    Note that some fittable parameters may be fixed during the fit.
+    """
+    return [p for p in unique(s) if not p.fittable]
+
+
+def varying(s):
+    """
+    Return the list of fitted parameters in the model.
+
+    This is the set of parameters that will vary during the fit.
+    """
+    return [p for p in unique(s) if not p.fixed]
+
+
+def randomize(s):
+    """
+    Set random values to the parameters in the parameter set, with
+    values chosen according to the bounds.
+    """
+    for p in s:
+        p.value = p.bounds.random(1)[0]
+
+
+def current(s):
+    return [p.value for p in s]
+
+# ========= trash ===================
+
+
+class IntegerParameter(Parameter):
+    discrete = True
+
+    def _get_value(self):
+        return self._value
+
+    def _set_value(self, value):
+        self._value = int(value)
+    value = property(_get_value, _set_value)
+
+
+class Alias(object):
+    """
+    Parameter alias.
+
+    Rather than modifying a model to contain a parameter slot,
+    allow the parameter to exist outside the model. The resulting
+    parameter will have the full parameter semantics, including
+    the ability to replace a fixed value with a parameter expression.
+
+    # TODO: how is this any different from Reference above?
+    """
+
+    def __init__(self, obj, attr, p=None, name=None):
+        self.obj = obj
+        self.attr = attr
+        if name is None:
+            name = ".".join([obj.__class__.__name__, attr])
+        self.p = Parameter.default(p, name=name)
+
+    def update(self):
+        setattr(self.obj, self.attr, self.p.value)
+
+    def parameters(self):
+        return self.p.parameters()
diff --git a/bumps/partemp.py b/bumps/partemp.py
new file mode 100644
index 0000000..0572c7f
--- /dev/null
+++ b/bumps/partemp.py
@@ -0,0 +1,343 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Parallel tempering for continuous function optimization and uncertainty analysis.
+
+The program performs Markov chain Monte Carlo exploration of a probability
+density function using a combination of random and differential evolution
+updates.
+"""
+from __future__ import division, print_function
+
+__all__ = ["parallel_tempering"]
+
+import numpy as np
+from numpy import asarray, zeros, ones, exp, diff, std, inf, \
+    array, nonzero, sqrt, zeros_like
+from numpy.linalg import norm
+from numpy.random import rand, randn, randint, permutation
+
+
+def every_ten(step, x, fx, P, E):
+    if step % 10:
+        print(step, fx[step], x[step])
+
+
+def parallel_tempering(nllf, p, bounds, T=None, steps=1000,
+                       CR=0.9, burn=1000,
+                       monitor=every_ten,
+                       logfile=None):
+    r"""
+    Perform a MCMC walk using multiple temperatures in parallel.
+
+    :Parameters:
+
+    *nllf* : function(vector) -> float
+        Negative log likelihood function to be minimized.  $\chi^2/2$ is a
+        good choice for curve fitting with no prior restraints on the possible
+        input parameters.
+    *p* : vector
+        Initial value
+    *bounds* : vector, vector
+        Box constraints on the parameter values.  No support for indefinite
+        or semi-definite programming at present
+    *T* : vector | 0 < T[0] < T[1] < ...
+        Temperature vector.  Something like logspace(-1,1,10) will give
+        you 10 logarithmically spaced temperatures between 0.1 and 10.  The
+        maximum temperature T[-1] determines the size of the barriers that
+        can be easily jumped.  Note that the number of temperature values
+        limits the amount of parallelism available in the algorithm, so it
+        may gather statistics more quickly, though it will not necessarily
+        converge any faster.
+    *steps* = 1000 : int
+        Length of the accumulation vector.  The returned history will store
+        this many values for each temperature.  These values can be used in
+        a weighted histogram to determine parameter uncertainty.
+    *burn* = 1000 : int | [0,inf)
+        Number of iterations to perform in addition to steps.  Only the
+        last *steps* points will be preserved for each temperature.  Since
+        the
+        value should be in the same order as *steps* to be sure that the
+        full history is acquired.
+    *CR* = 0.9 : float | [0,1]
+        Cross-over ratio.  This is the differential evolution crossover
+        ratio to use when computing step size and direction.  Use a small
+        value to step through the dimensions one at a time, or a large value
+        to step through all at once.
+    *monitor* = every_ten : function(int step, vector x, float fx) -> None
+        Function to called at every iteration with the step number the
+        best point and the best value.
+    *logfile* = None : string
+        Name of the file which will log the history of every accepted step.
+        Note that this includes all of the burn steps, so it can get very
+        large.
+
+    :Returns:
+
+    *history* : History
+        Structure containing *best*, *best_point* and *buffer*.  *best* is
+        the best nllf value seen and *best_point* is the parameter vector
+        which yielded *best*.  The list *buffer* contains lists of tuples
+        (step, temperature, nllf, x) for each temperature.
+    """
+    N = len(T)
+    history = History(logfile=logfile, streams=N, size=steps)
+    bounder = ReflectBounds(*bounds)
+    #stepper = RandStepper(bounds, tol=0.2/T[-1])
+    stepper = Stepper(bounds, history)
+    dT = diff(1. / asarray(T))
+    P = asarray([p] * N)   # Points
+    E = ones(N) * nllf(p)  # Values
+    history.save(step=0, temperature=T, energy=E, point=P)
+    total_accept = zeros(N)
+    total_swap = zeros(N - 1)
+    with np.errstate(over='ignore'):
+        for step in range(1, steps + burn):
+            # Take a step
+            R = rand()
+            if step < 20 or R < 0.2:
+                #action = 'jiggle'
+                Pnext = [stepper.jiggle(p, 0.01 * t / T[-1]) for p, t in zip(P, T)]
+            elif R < 0.4:
+                #action = 'direct'
+                Pnext = [stepper.direct(p, i) for i, p in enumerate(P)]
+            else:
+                #action = 'diffev'
+                Pnext = [stepper.diffev(p, i, CR=CR) for i, p in enumerate(P)]
+
+            # Test constraints
+            Pnext = asarray([bounder.apply(p) for p in Pnext])
+
+            # Temperature dependent Metropolis update
+            Enext = asarray([nllf(p) for p in Pnext])
+            accept = exp(-(Enext - E) / T) > rand(N)
+            # print step,action
+            # print "dP"," ".join("%.6f"%norm((pn-p)/stepper.step) for pn,p in zip(P,Pnext))
+            # print "dE"," ".join("%.1f"%(en-e) for en,e in zip(E,Enext))
+            # print "En"," ".join("%.1f"%e for e in Enext)
+            # print "accept",accept
+            E[accept] = Enext[accept]
+            P[accept] = Pnext[accept]
+            total_accept += accept
+
+            # Accumulate history for population based methods
+            history.save(step, temperature=T, energy=E, point=P, changed=accept)
+            # print "best",history.best
+
+            # Swap chains across temperatures
+            # Note that we are are shuffling from high to low so that if a good
+            # point is found at a high temperature which push it immediately as
+            # low as we can go rather than risk losing it at the next high temp
+            # step.
+            swap = zeros(N - 1)
+            for i in range(N - 2, -1, -1):
+                # print "swap",E[i+1]-E[i],dT[i],exp((E[i+1]-E[i])*dT[i])
+                if exp((E[i + 1] - E[i]) * dT[i]) > rand():
+                    swap[i] = 1
+                    E[i + 1], E[i] = E[i], E[i + 1]
+                    P[i + 1], P[i] = P[i] + 0, P[i + 1] + 0
+            total_swap += swap
+            #assert nllf(P[0]) == E[0]
+
+            # Monitoring
+            monitor(step, history.best_point, history.best, P, E)
+            interval = 100
+            if 0 and step % interval == 0:
+                print("max r",
+                      max(["%.1f" % norm(p - P[0]) for p in P[1:]]))
+                # print "min AR",argmin(total_accept),min(total_accept)
+                # print "min SR",argmin(total_swap),min(total_swap)
+                print("AR", total_accept)
+                print("SR", total_swap)
+                print("s(d)", [int(std([p[i] for p in P]))
+                               for i in (3, 7, 11, -1)])
+                total_accept *= 0
+                total_swap *= 0
+
+    return history
+
+
+class History(object):
+
+    def __init__(self, streams=None, size=1000, logfile=None):
+        # Allocate buffers
+        self.size = size
+        self.buffer = [[] for _ in range(streams)]
+        # Prepare log file
+        if logfile is not None:
+            self.log = open(logfile, 'w')
+            print("# Step Temperature Energy Point", file=self.log)
+        else:
+            self.log = None
+        # Track the optimum
+        self.best = inf
+
+    def save(self, step, temperature, energy, point, changed=None):
+        if changed is None:
+            changed = ones(len(temperature), 'b')
+        for i, a in enumerate(changed):
+            if a:
+                self._save_point(
+                    step, i, temperature[i], energy[i], point[i] + 0)
+
+    def _save_point(self, step, i, T, E, P):
+        # Save in buffer
+        S = self.buffer[i]
+        if len(S) >= self.size:
+            S.pop(0)
+        if len(S) > 0:
+            # print "P",P
+            # print "S",S[-1][3]
+            assert norm(P - S[-1][3]) != 0
+        S.append((step, T, E, P))
+        # print "stream",i,"now len",len(S)
+        # Track of the optimum
+        if E < self.best:
+            self.best = E
+            self.best_point = P
+        # Log to file
+        if self.log:
+            point_str = " ".join("%.6g" % v for v in P)
+            print(step, T, E, point_str, file=self.log)
+            self.log.flush()
+
+    def draw(self, stream, k):
+        """
+        Return a list of k items drawn from the given stream.
+
+        If the stream is too short, fewer than n items may be returned.
+        """
+        S = self.buffer[stream]
+        n = len(S)
+        return [S[i] for i in choose(n, k)] if n > k else S[:]
+
+
+class Stepper(object):
+
+    def __init__(self, bounds, history):
+        low, high = bounds
+        self.offset = low
+        self.step = (high - low)
+        self.history = history
+
+    def diffev(self, p, stream, CR=0.8, noise=0.05):
+        if len(self.history.buffer[stream]) < 20:
+            # print "jiggling",stream,stream,len(self.history.buffer[stream])
+            return self.jiggle(p, 1e-6)
+        # Ideas incorporated from DREAM by Vrugt
+        N = len(p)
+        # Select to number of vector pair differences to use in update
+        # using k ~ discrete U[1,max pairs]
+        k = randint(4) + 1
+
+        # Select 2*k members at random
+        parents = [v[3] for v in self.history.draw(stream, 2 * k)]
+        k = len(parents) // 2  # May not have enough parents
+        pop = array(parents)
+        # print "k",k
+        # print "parents",parents
+        # print "pop",pop
+
+        # Select the dims to update based on the crossover ratio, making
+        # sure at least one significant dim is selected
+        while True:
+            vars = nonzero(rand(N) < CR)
+            if len(vars) == 0:
+                vars = [randint(N)]
+            step = np.sum(pop[:k] - pop[k:], axis=0)
+            if norm(step[vars]) > 0:
+                break
+
+        # Weight the size of the jump inversely proportional to the
+        # number of contributions, both from the parameters being
+        # updated and from the population defining the step direction.
+        gamma = 2.38 / sqrt(2 * len(vars) * k)
+
+        # Apply that step with F scaling and noise
+        eps = 1 + noise * (2 * rand(N) - 1)
+        # print "j",j
+        # print "gamma",gamma
+        # print "step",step.shape
+        # print "vars",vars.shape
+        delta = zeros_like(p)
+        delta[vars] = gamma * (eps * step)[vars]
+        assert norm(delta) != 0
+        return p + delta
+
+    def direct(self, p, stream):
+        if len(self.history.buffer[stream]) < 20:
+            # print "jiggling",stream,len(self.history.buffer[stream])
+            return self.jiggle(p, 1e-6)
+        pair = self.history.draw(stream, 2)
+        delta = pair[0][3] - pair[1][3]
+        if norm(delta) == 0:
+            print("direct should never return identical points!!")
+            return self.random(p)
+        assert norm(delta) != 0
+        return p + delta
+
+    def jiggle(self, p, noise):
+        delta = randn(len(p)) * self.step * noise
+        assert norm(delta) != 0
+        return p + delta
+
+    def random(self, p):
+        delta = rand(len(p)) * self.step + self.offset
+        assert norm(delta) != 0
+        return p + delta
+
+    def subspace_jiggle(self, p, noise, k):
+        n = len(self.step)
+        if n < k:
+            idx = slice(None)
+            k = n
+        else:
+            idx = choose(n, k)
+        delta = zeros_like(p)
+        delta[idx] = randn(k) * self.step[idx] * noise
+        assert norm(delta) != 0
+        return p + delta
+
+
+class ReflectBounds(object):
+    """
+    Reflect parameter values into bounded region
+    """
+
+    def __init__(self, low, high):
+        self.low, self.high = [asarray(v, 'd') for v in (low, high)]
+
+    def apply(self, y):
+        """
+        Update x so all values lie within bounds
+
+        Returns x for convenience.  E.g., y = bounds.apply(x+0)
+        """
+        minn, maxn = self.low, self.high
+        # Reflect points which are out of bounds
+        idx = y < minn
+        y[idx] = 2 * minn[idx] - y[idx]
+        idx = y > maxn
+        y[idx] = 2 * maxn[idx] - y[idx]
+
+        # Randomize points which are still out of bounds
+        idx = (y < minn) | (y > maxn)
+        y[idx] = minn[idx] + rand(sum(idx)) * (maxn[idx] - minn[idx])
+        return y
+
+
+def choose(n, k):
+    """
+    Return an array of k things selected from a pool of n without replacement.
+    """
+    # At k == n/4, need to draw an extra 15% to get k unique draws
+    if k > n / 4 or n < 100:
+        idx = permutation(n)[:k]
+    else:
+        s = set(randint(n, size=k))
+        while len(s) < k:
+            s.add(randint(n))
+        idx = array([si for si in s])
+    if len(set(idx)) != len(idx):
+        print("choose(n,k) contains dups!!", n, k)
+    return idx
diff --git a/bumps/pdfwrapper.py b/bumps/pdfwrapper.py
new file mode 100644
index 0000000..73208ae
--- /dev/null
+++ b/bumps/pdfwrapper.py
@@ -0,0 +1,294 @@
+"""
+Build a bumps model from a function.
+
+The :class:`PDF` class uses introspection to convert a negative log
+likelihood function nllf(m1,m2,...) into a :class:`bumps.fitproblem.Fitness`
+class that has fittable parameters m1, m2, ....
+
+There is no attempt to manage data or uncertainties, except that an
+additional plot function can be provided to display the current value
+of the function in whatever way is meaningful.
+
+The note regarding user defined functions in :mod:`bumps.curve` apply
+here as well.
+"""
+import inspect
+
+import numpy as np
+
+from .parameter import Parameter
+from .fitproblem import Fitness
+from .bounds import init_bounds
+
+
+class PDF(object):
+    """
+    Build a model from a function.
+
+    This model can be fitted with any of the bumps optimizers.
+
+    *fn* is a function that returns the negative log likelihood of seeing
+    its input parameters.
+
+    The fittable parameters are derived from the parameter names in the
+    function definition, with *name* prepended to each parameter.
+
+    The optional *plot* function takes the same arguments as *fn*, with an
+    additional *view* argument which may be set from the bumps command
+    line.  If provide, it should provide a visual indication of the
+    function value and uncertainty on the current matplotlib.pyplot figure.
+
+    Additional keyword arguments are treated as the initial values for
+    the parameters, or initial ranges if par=(min,max).  Otherwise, the
+    default is taken from the function definition (if the function uses
+    par=value to define the parameter) or is set to zero if no default is
+    given in the function.
+    """
+    def __init__(self, fn, name="", plot=None, dof=1, **kw):
+        self.dof = dof
+        # Make every name a parameter; initialize the parameters
+        # with the default value if function is defined with keyword
+        # initializers; override the initializers with any keyword
+        # arguments specified in the fit function constructor.
+        labels, vararg, varkw, values = inspect.getargspec(fn)
+        if vararg or varkw:
+            raise TypeError(
+                "Function cannot have *args or **kwargs in declaration")
+        # Parameters default to zero
+        init = dict((p, 0.) for p in labels)
+        # If the function provides default values, use those
+        if values:
+            init.update(zip(labels[-len(values):], values))
+        # Regardless, use any values specified in the constructor, but first
+        # check that they exist as function parameters.
+        invalid = set(kw.keys()) - set(labels)
+        if invalid:
+            raise TypeError("Invalid initializers: %s" %
+                            ", ".join(sorted(invalid)))
+        init.update(kw)
+
+        # Build parameters out of ranges and initial values
+        pars = dict((p, Parameter.default(init[p], name=name + p))
+                    for p in labels)
+
+        # Make parameters accessible as model attributes
+        for k, v in pars.items():
+            if hasattr(self, k):
+                raise TypeError("Parameter cannot be named %s" % k)
+            setattr(self, k, v)
+
+        # Remember the function, parameters, and number of parameters
+        self._function = fn
+        self._labels = labels
+        self._plot = plot
+
+    def parameters(self):
+        return dict((p, getattr(self, p)) for p in self._labels)
+    parameters.__doc__ = Fitness.parameters.__doc__
+
+    def nllf(self):
+        kw = dict((p, getattr(self, p).value) for p in self._labels)
+        return self._function(**kw)
+    nllf.__doc__ = Fitness.__call__.__doc__
+
+    def chisq(self):
+        return self.nllf()/self.dof
+    #chisq.__doc__ = Fitness.chisq.__doc__
+
+    def chisq_str(self):
+        return "%g" % self.chisq()
+    #chisq_str.__doc__ = Fitness.chisq_str.__doc__
+
+    __call__ = chisq
+
+    def plot(self, view=None):
+        if self._plot:
+            kw = dict((p, getattr(self, p).value) for p in self._labels)
+            self._plot(view=view, **kw)
+    plot.__doc__ = Fitness.plot.__doc__
+
+    def numpoints(self):
+        return len(self._labels) + 1
+    numpoints.__doc__ = Fitness.numpoints.__doc__
+
+    def residuals(self):
+        return np.array([self.chisq()])
+    residuals.__doc__ = Fitness.residuals.__doc__
+
+
+class VectorPDF(object):
+    """
+    Build a model from a function.
+
+    This model can be fitted with any of the bumps optimizers.
+
+    *fn* is a function that returns the negative log likelihood of seeing
+    its input parameters.
+
+    Vector *p* of length *n* defines the initial value. Unlike :class:`PDF`,
+    *VectorPDF* operates on a parameter vector *p* rather than individual
+    parameters *p1*, *p2*, etc.  Default parameter values *p* must be
+    provided in order to determine the number of parameters.
+
+    *labels* are the names of the individual parameters.  If not present,
+    the name for parameter *k* defaults to *pk*.  Each label is prefixed by
+    *name*.
+
+    The optional *plot* function takes the same arguments as *fn*, with an
+    additional *view* argument which may be set from the bumps command
+    line.  If provide, it should provide a visual indication of the
+    function value and uncertainty on the current matplotlib.pyplot figure.
+
+    Additional keyword arguments are treated as the initial values for
+    the parameters, or initial ranges if par=(min,max).  Otherwise, the
+    default is taken from the function definition (if the function uses
+    par=value to define the parameter) or is set to zero if no default is
+    given in the function.
+    """
+    def __init__(self, fn, p, name="", plot=None, dof=1, labels=None, **kw):
+        self.dof = dof
+        if labels is None:
+            labels = ["p"+str(k) for k, _ in enumerate(p)]
+        init = dict(zip(labels, p))
+        init.update(kw)
+
+        # Build parameters out of ranges and initial values
+        pars = dict((k, Parameter.default(init[k], name=name + k))
+                    for k in labels)
+
+        # Make parameters accessible as model attributes
+        for k, v in pars.items():
+            if hasattr(self, k):
+                raise TypeError("Parameter cannot be named %s" % k)
+            setattr(self, k, v)
+
+        # Remember the function, parameters, and number of parameters
+        self._function = fn
+        self._labels = labels
+        self._plot = plot
+
+    def parameters(self):
+        return dict((k, getattr(self, k)) for k in self._labels)
+    parameters.__doc__ = Fitness.parameters.__doc__
+
+    def nllf(self):
+        pvec = np.array([getattr(self, k).value for k in self._labels])
+        return self._function(pvec)
+    nllf.__doc__ = Fitness.__call__.__doc__
+
+    def chisq(self):
+        return self.nllf()/self.dof
+    #chisq.__doc__ = Fitness.chisq.__doc__
+
+    def chisq_str(self):
+        return "%g" % self.chisq()
+    #chisq_str.__doc__ = Fitness.chisq_str.__doc__
+
+    __call__ = chisq
+
+    def plot(self, view=None):
+        if self._plot:
+            values = np.array([getattr(self, k).value for k in self._labels])
+            self._plot(values, view=view)
+    plot.__doc__ = Fitness.plot.__doc__
+
+    def numpoints(self):
+        return len(self._labels) + 1
+    numpoints.__doc__ = Fitness.numpoints.__doc__
+
+    def residuals(self):
+        return np.array([self.chisq()])
+    residuals.__doc__ = Fitness.residuals.__doc__
+
+
+
+class DirectProblem(object):
+    """
+    Build model from negative log likelihood function *f(p)*.
+
+    Vector *p* of length *n* defines the initial value.
+
+    *bounds* defines limiting values for *p* as
+    *[(p1_low, p1_high), (p2_low, p2_high), ...]*.  If all parameters are
+    have the same bounds, use *bounds=np.tile([low,high],[n,1])*.
+
+    Unlike :class:`PDF`, no parameter objects are defined for the elements
+    of *p*, so all are fitting parameters.
+    """
+    def __init__(self, f, p0, bounds=None, dof=1, labels=None, plot=None):
+        self.f = f
+        self.n = len(p0)
+        self.p = np.asarray(p0, 'd')
+        self.dof = dof
+        if bounds is not None:
+            self._bounds = np.asarray(bounds, 'd')
+        else:
+            self._bounds = np.tile((-np.inf, np.inf), (self.n, 1)).T
+
+        self._labels = labels if labels else ["p%d" % i for i,_ in enumerate(p0)]
+        self._plot = plot
+
+    def nllf(self, pvec=None):
+        # Nllf is the primary interface from the fitters.  We are going to
+        # make it as cheap as possible by not having to marshall values
+        # through parameter boxes.
+        return self.f(pvec) if pvec is not None else self.f(self.p)
+
+    def model_reset(self):
+        self._parameters = [Parameter(value=self.p[k],
+                                      bounds=self._bounds[:,k],
+                                      labels=self._labels[k])
+                            for k in range(len(self.p))]
+
+    def model_update(self):
+        self.p = np.array([p.value for p in self._parameters])
+
+    def model_parameters(self):
+        return self._parameters
+
+    def chisq(self):
+        return self.nllf()/self.dof
+
+    def chisq_str(self):
+        return "%g" % self.chisq()
+    __call__ = chisq
+
+    def setp(self, p):
+        # Note: setp is called
+        self.p = p
+        for parameter, value in zip(self._parameters, self.p):
+            parameter.value = value
+
+    def getp(self):
+        return self.p
+
+    def show(self):
+        print("[nllf=%g]" % self.nllf())
+        print(self.summarize())
+
+    def summarize(self):
+        return "\n".join("%40s %g"%(name, value)
+                         for name, value in zip(self._labels, self.getp()))
+
+    def labels(self):
+        return self._labels
+
+    def randomize(self, n=None):
+        bounds = [init_bounds(b) for b in self._bounds.T]
+        if n is not None:
+            return np.array([b.random(n) for b in bounds]).T
+        else:
+            # Need to go through setp when updating model.
+            self.setp([b.random(1)[0] for b in bounds])
+
+    def bounds(self):
+        return self._bounds
+
+    def plot(self, p=None, fignum=None, figfile=None, view=None):
+        if p is not None:
+            self.setp(p)
+        if self._plot:
+            values = np.array([getattr(self, p).value for p in self._labels])
+            self._plot(values, view=view)
+    plot.__doc__ = Fitness.plot.__doc__
+
diff --git a/bumps/plotutil.py b/bumps/plotutil.py
new file mode 100644
index 0000000..3a769b6
--- /dev/null
+++ b/bumps/plotutil.py
@@ -0,0 +1,184 @@
+"""
+Pylab plotting utilities.
+"""
+from __future__ import division
+
+__all__ = ["auto_shift",
+           "coordinated_colors", "dhsv", "next_color",
+           "plot_quantiles", "form_quantiles"]
+
+
+def auto_shift(offset):
+    """
+    Return a y-offset coordinate transform for the current axes.
+
+    Each call to auto_shift increases the y-offset for the next line by
+    the given number of points (with 72 points per inch).
+
+    Example::
+
+        from matplotlib import pyplot as plt
+        from bumps.plotutil import auto_shift
+        trans = auto_shift(plt.gca())
+        plot(x, y, hold=True, trans=trans)
+    """
+    from matplotlib.transforms import ScaledTranslation
+    import pylab
+    ax = pylab.gca()
+    if ax.lines and hasattr(ax, '_auto_shift'):
+        ax._auto_shift += offset
+    else:
+        ax._auto_shift = 0
+    trans = pylab.gca().transData
+    if ax._auto_shift:
+        trans += ScaledTranslation(0, ax._auto_shift/72.,
+                                   pylab.gcf().dpi_scale_trans)
+    return trans
+
+
+# ======== Color functions ========
+
+def next_color():
+    """
+    Return the next color in the plot color cycle.
+
+    Example::
+
+        from matplotlib import pyplot as plt
+        from bumps.plotutil import next_color, dhsv
+        color = next_color()
+        plt.errorbar(x, y, yerr=dy, fmt='.', color=color)
+        # Draw the theory line with the same color as the data, but darker
+        plt.plot(x, y, '-', color=dhsv(color, dv=-0.2))
+    """
+    import pylab
+    lines = pylab.gca()._get_lines
+    try:
+        base = next(lines.prop_cycler)['color']
+    except Exception:
+        try: # Cruft 1.4-1.6?
+            base = next(lines.color_cycle)
+        except Exception:  # Cruft 1.3 and earlier
+            base = lines._get_next_cycle_color()
+    return base
+
+
+def coordinated_colors(base=None):
+    """
+    Return a set of coordinated colors as c['base|light|dark'].
+
+    If *base* is not provided, use the next color in the color cycle as
+    the base.  Light is bright and pale, dark is dull and saturated.
+    """
+    if base is None:
+        base = next_color()
+    return dict(base=base,
+                light=dhsv(base, dv=+0.3, ds=-0.2),
+                dark=dhsv(base, dv=-0.25, ds=+0.35),
+                )
+
+
+def dhsv(color, dh=0., ds=0., dv=0., da=0.):
+    """
+    Modify color on hsv scale.
+
+    *dv* change intensity, e.g., +0.1 to brighten, -0.1 to darken.
+    *dh* change hue
+    *ds* change saturation
+    *da* change transparency
+
+    Color can be any valid matplotlib color.  The hsv scale is [0,1] in
+    each dimension.  Saturation, value and alpha scales are clipped to [0,1]
+    after changing.  The hue scale wraps between red to violet.
+
+    :Example:
+
+    Make sea green 10% darker:
+
+        >>> from bumps.plotutil import dhsv
+        >>> darker = dhsv('seagreen', dv=-0.1)
+        >>> print([int(v*255) for v in darker])
+        [37, 113, 71, 255]
+    """
+    from matplotlib.colors import colorConverter
+    from colorsys import rgb_to_hsv, hsv_to_rgb
+    from numpy import clip, array, fmod
+    r, g, b, a = colorConverter.to_rgba(color)
+    # print "from color",r,g,b,a
+    h, s, v = rgb_to_hsv(r, g, b)
+    s, v, a = [clip(val, 0., 1.) for val in (s + ds, v + dv, a + da)]
+    h = fmod(h + dh, 1.)
+    r, g, b = hsv_to_rgb(h, s, v)
+    # print "to color",r,g,b,a
+    return array((r, g, b, a))
+
+
+# ==== Specialized plotters =====
+
+def plot_quantiles(x, y, contours, color, alpha=None):
+    """
+    Plot quantile curves for a set of lines.
+
+    *x* is the x coordinates for all lines.
+
+    *y* is the y coordinates, one row for each line.
+
+    *contours* is a list of confidence intervals expressed as percents.
+
+    *color* is the color to use for the quantiles.  Quantiles are draw as
+    a filled region with alpha transparency.  Higher probability regions
+    will be covered with multiple contours, which will make them lighter
+    and more saturated.
+
+    *alpha* is the transparency level to use for all fill regions.  The
+    default value, alpha=2./(#contours+1), works pretty well.
+    """
+    _, q = form_quantiles(y, contours)
+    _plot_quantiles(x, q,  color, alpha)
+
+def _plot_quantiles(x, q, color, alpha):
+    import pylab
+    # print "p",p
+    # print "q",q[:,:,0]
+    # print "y",y[:,0]
+    if alpha is None:
+        alpha = 2. / (len(q) + 1)
+    edgecolor = dhsv(color, ds=-(1 - alpha), dv=(1 - alpha))
+    for lo, hi in q:
+        pylab.fill_between(x, lo, hi,
+                           facecolor=color, edgecolor=edgecolor,
+                           alpha=alpha, hold=True)
+
+def form_quantiles(y, contours):
+    """
+    Return quantiles and values for a list of confidence intervals.
+
+    *contours* is a list of confidence interfaces [a, b,...] expressed as
+    percents.
+
+    Returns:
+
+    *quantiles* is a list of intervals [[a_low, a_high], [b_low, b_high], ...]
+    in [0,1].
+
+    *values* is a list of intervals [[A_low, A_high], ...] with one entry in
+    A for each row in y.
+    """
+    from numpy import reshape
+    from scipy.stats.mstats import mquantiles
+    p = _convert_contours_to_probabilities(reversed(sorted(contours)))
+    q = mquantiles(y, prob=p, axis=0)
+    p = reshape(p, (2, -1))
+    q = reshape(q, (-1, 2, len(y[0])))
+    return p, q
+
+def _convert_contours_to_probabilities(contours):
+    """
+    Given confidence intervals [a, b,...] as percents, return probability
+    in [0,1] for each interval as [a_low, a_high, b_low, b_high, ...].
+    """
+    from numpy import hstack
+    # lower quantile for ci in percent = (100 - ci)/2
+    # upper quantile = 100 - lower quantile = 100 - (100-ci)/2 = (100 + ci)/2
+    # divide by an additional 100 to get proportion from 0 to 1
+    return hstack([(100.0 - p, 100.0 + p) for p in contours]) / 200.0
diff --git a/bumps/plugin.py b/bumps/plugin.py
new file mode 100644
index 0000000..844b1cc
--- /dev/null
+++ b/bumps/plugin.py
@@ -0,0 +1,112 @@
+"""
+Bumps plugin architecture.
+
+With sophisticated models, developers need to be able to provide tools
+such as model builders and data viewers.
+
+Some of these will be tools for the GUI, such as views.  Others will be
+tools to display results.
+
+This file defines the interface that can be defined by your own application
+so that it interacts with models of your type.  Define your own model
+package with a module plugin.py.
+
+Create a main program which looks like::
+
+
+    if __name__ == "__main__":
+        import multiprocessing
+        multiprocessing.freeze_support()
+
+        import bumps.cli
+        import mypackage.plugin
+        bumps.cli.install_plugin(mypackage.plugin)
+        bumps.cli.main()
+
+You should be able to use this as a driver program for your application.
+
+Note: the plugin architecture is likely to change radically as more models
+are added to the system, particularly so that we can accommodate simultaneous
+fitting of data taken using different experimental techniques.  For now, only
+only one plugin at a time is supported.
+"""
+
+__all__ = [
+    'new_model',
+    'load_model',
+    'calc_errors',
+    'show_errors',
+    'data_view',
+    'model_view',
+]
+
+# TODO: refl1d wants to do the following after cli.getopts()
+#
+#    from refl1d.probe import Probe
+#    Probe.view = opts.plot
+#
+# It also wants to modify the opts so that more plotters are available,
+# such as Fresnel.
+
+
+def new_model():
+    """
+    Return a new empty model or None.
+
+    Called in response to >File >New from the GUI.  Creates a new empty
+    model.  Also triggered if GUI is started without a model.
+    """
+    return None
+
+
+def load_model(filename):
+    """
+    Return a model stored within a file.
+
+    This routine is for specialized model descriptions not defined by script.
+
+    If the filename does not contain a model of the appropriate type (e.g.,
+    because the extension is incorrect), then return None.
+
+    No need to load pickles or script models.  These will be attempted if
+    load_model returns None.
+    """
+    return None
+
+
+def calc_errors(problem, sample):
+    """
+    Gather data needed to display uncertainty in the model and the data.
+
+    Returns an object to be passed later to :func:`show_errors`.
+    """
+    return None
+
+
+def show_errors(errs):
+    """
+    Display the model with uncertainty on the current figure.
+
+    *errs* is the data returned from calc_errs.
+    """
+    pass
+
+
+def data_view():
+    """
+    Panel factory for the data tab in the GUI.
+
+    If your model has an adequate show() function this should not be
+    necessary.
+    """
+    from .gui.data_view import DataView
+    return DataView
+
+
+def model_view():
+    """
+    Panel factory for the model tab in the GUI.
+
+    Return None if not present.
+    """
+    return None
diff --git a/bumps/pmath.py b/bumps/pmath.py
new file mode 100644
index 0000000..8caafd5
--- /dev/null
+++ b/bumps/pmath.py
@@ -0,0 +1,93 @@
+"""
+Standard math functions for parameter expressions.
+"""
+from six.moves import reduce, builtins
+import math
+from .parameter import function
+__all__ = [
+    'exp', 'log', 'log10', 'sqrt',
+    'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
+    'sind', 'cosd', 'tand', 'asind', 'acosd', 'atand', 'atan2d',
+    'sinh', 'cosh', 'tanh',
+    'degrees', 'radians',
+    'sum', 'prod',
+]
+
+def _cosd(v):
+    """Return the cosine of x (measured in in degrees)."""
+    return math.cos(math.radians(v))
+
+
+def _sind(v):
+    """Return the sine of x (measured in in degrees)."""
+    return math.sin(math.radians(v))
+
+
+def _tand(v):
+    """Return the tangent of x (measured in in degrees)."""
+    return math.tan(math.radians(v))
+
+
+def _acosd(v):
+    """Return the arc cosine (measured in in degrees) of x."""
+    return math.degrees(math.acos(v))
+
+
+def _asind(v):
+    """Return the arc sine (measured in in degrees) of x."""
+    return math.degrees(math.asin(v))
+
+
+def _atand(v):
+    """Return the arc tangent (measured in in degrees) of x."""
+    return math.degrees(math.atan(v))
+
+
+def _atan2d(dy, dx):
+    """Return the arc tangent (measured in in degrees) of y/x.
+    Unlike atan(y/x), the signs of both x and y are considered."""
+    return math.degrees(math.atan2(dy, dx))
+
+def _prod(s):
+    """Return the product of a sequence of numbers."""
+    return reduce(lambda x, y: x * y, s, 1)
+
+exp = function(math.exp)
+log = function(math.log)
+log10 = function(math.log10)
+sqrt = function(math.sqrt)
+
+degrees = function(math.degrees)
+radians = function(math.radians)
+
+sin = function(math.sin)
+cos = function(math.cos)
+tan = function(math.tan)
+asin = function(math.asin)
+acos = function(math.acos)
+atan = function(math.atan)
+atan2 = function(math.atan2)
+
+sind = function(_sind)
+cosd = function(_cosd)
+tand = function(_tand)
+asind = function(_asind)
+acosd = function(_acosd)
+atand = function(_atand)
+atan2d = function(_atan2d)
+
+sinh = function(math.sinh)
+cosh = function(math.cosh)
+tanh = function(math.tanh)
+
+sum = function(builtins.sum)
+prod = function(_prod)
+
+min = function(builtins.min)
+max = function(builtins.max)
+
+# Define pickler for numpy ufuncs
+#import copy_reg
+#def udump(f): return f.__name__
+#def uload(name): return getattr(np, name)
+#copy_reg.pickle(np.ufunc, udump, uload)
diff --git a/bumps/pymcfit.py b/bumps/pymcfit.py
new file mode 100644
index 0000000..57383da
--- /dev/null
+++ b/bumps/pymcfit.py
@@ -0,0 +1,153 @@
+r"""
+Bumps wrapper for PyMC models.
+"""
+from __future__ import print_function
+
+__all__ = ["PyMCProblem"]
+
+import numpy as np
+from numpy import inf, array, asarray
+
+# pyMC model attributes:
+# - deterministics                     [Intermediate variables]
+# - stochastics (with observed=False)  [Fitted parameters]
+# - data (stochastic variables with observed=True)
+# - variables                          [stochastics+
+# - potentials
+# - containers
+# - nodes
+# - all_objects
+# - status: Not useful for the Model base class, but may be used by subclasses.
+
+class PyMCProblem(object):
+    def __init__(self, input):
+        from pymc.Model import Model
+        from pymc.Node import ZeroProbability
+
+        self.model = Model(input)
+        # sort parameters by name
+        ordered_pairs = sorted((s.__name__,s) for s in self.model.stochastics)
+
+        # Record parameter, shape, size, offset as a list
+        pars = [v for k,v in ordered_pairs]
+        shape = [v.shape for v in pars]
+        size = [(np.prod(s) if s != () else 1) for s in shape]
+        offset = np.cumsum([0]+size[:-1])
+        self.pars = list(zip(pars,shape,size,offset))
+
+        # List of cost functions contains both parameter and data, but not
+        # intermediate deterministic points
+        self.costs = self.model.variables - self.model.deterministics
+
+        # Degrees of freedom is #data - #pars
+        points = sum((np.prod(p.shape) if p.shape != () else 1)
+                     for p in self.costs)
+        self.dof = points - 2*offset[-1]
+
+        self.ZeroProbability = ZeroProbability
+
+    def model_reset(self):
+        pass
+
+    def chisq(self):
+        return self.nllf() # /self.dof
+
+    def chisq_str(self):
+        return "%g"%self.chisq()
+    __call__ = chisq
+
+    def nllf(self, pvec=None):
+        if pvec is not None: self.setp(pvec)
+        try:
+            return -sum(c.logp for c in self.costs)
+        except self.ZeroProbability:
+            return inf
+
+    def setp(self, values):
+        for par, shape, size, offset in self.pars:
+            if shape == ():
+                par.value = values[offset]
+                offset += 1
+            else:
+                par.value = array(values[offset:offset+size]).reshape(shape)
+                offset += size
+
+    def getp(self):
+        return np.hstack([(par.value.flatten() if shape != () else par.value)
+                          for par, shape, size, offset in self.pars])
+
+    def show(self):
+        # maybe print graph of model
+        print("[chisq=%g, nllf=%g]" % (self.chisq(), self.nllf()))
+        print(self.summarize())
+
+    def summarize(self):
+        return "\n".join("%s=%s"%(par.__name__, par.value)
+                         for par, _, _, _ in self.pars)
+
+    def labels(self):
+        ret = []
+        for par, _, _, _  in self.pars:
+            ret.extend(_par_labels(par))
+        return ret
+
+    def randomize(self, N=None):
+        if N is None:
+            self.model.draw_from_prior()
+        else:
+            data = []
+            for _ in range(N):
+                self.model.draw_from_prior()
+                data.append(self.getp())
+            return asarray(data)
+
+    def bounds(self):
+        return np.vstack([_par_bounds(par) for par,_,_,_ in self.pars]).T
+                 
+    def plot(self, p=None, fignum=None, figfile=None):
+        pass
+
+    def __deepcopy__(self, memo):
+        return self
+
+
+def _par_bounds(par):
+    # Delay referencing
+    import pymc.distributions
+
+    UNBOUNDED = lambda p: (-inf, inf)
+    PYMC_BOUNDS = {
+        pymc.distributions.DiscreteUniform:
+            lambda p: (p['lower']-0.5,p['upper']+0.5),
+        pymc.distributions.Uniform:
+            lambda p: (p['lower'],p['upper']),
+        pymc.distributions.Exponential:
+            lambda p: (0,inf),
+    }
+    # pyMC doesn't provide info about bounds on the distributions
+    # so we need a big table.
+    bounds = PYMC_BOUNDS.get(par.__class__, UNBOUNDED)(par.parents)
+
+    ret = np.tile(bounds, par.shape).flatten().reshape(-1,2)
+    return ret
+
+def _par_labels(par):
+    name = par.__name__
+    dims = len(par.shape)
+    if dims == 0:
+        return [name]
+    elif dims == 1:
+        return ["%s[%d]"%(name, i)
+                for i in range(par.shape[0])]
+    elif dims == 2:
+        return ["%s[%d,%d]"%(name, i, j)
+                for i in range(par.shape[0])
+                for j in range(par.shape[1])]
+    elif dims == 3:
+        return ["%s[%d,%d]"%(name, i, j, k)
+                for i in range(par.shape[0])
+                for j in range(par.shape[1])
+                for k in range(par.shape[2])]
+    else:
+        raise ValueError("limited to 3 dims for now")
+
diff --git a/bumps/pytwalk.py b/bumps/pytwalk.py
new file mode 100644
index 0000000..7689cac
--- /dev/null
+++ b/bumps/pytwalk.py
@@ -0,0 +1,710 @@
+"""
+T-walk self adjusting MCMC.
+"""
+
+# Author: By Andres Christen.
+# see:  http://www.cimat.mx/~jac/twalk/
+
+# 2010-04-17 Paul Kienzle
+# * typo fixups
+# * move pylab import to the particular functions
+# * remove scipy dependence
+
+__all__ = ["pytwalk"]
+
+from numpy.random import uniform, normal
+from numpy import ones, zeros, cumsum, shape, mat, cov, mean, ceil, matrix, sqrt
+from numpy import floor, exp, log, sum, pi, arange
+
+# Some auxiliary functions and constants
+# square of the norm.
+
+
+def SqrNorm(x):
+    return sum(x * x)
+
+log2pi = log(2 * pi)
+
+
+class pytwalk:
+
+    """This is the t-walk class.
+
+    Initiates defining the dimension= n and -log of the objective function= U,
+    Supp defines the support, returns True if x within the support, eg:
+
+    Mytwalk = twalk( n=3, U=MyMinusLogf, Supp=MySupportFunction).
+
+    Then do: Mytwalk.Run?
+
+    Other parameter are:
+    ww= the prob. of choosing each kernel, aw, at, n1phi (see inside twalk.py)
+    with default values as in the paper, normally NOT needed to be changed."""
+
+    def __init__(self, n, U=(lambda x: sum(0.5 * x ** 2)), Supp=(lambda x: True),
+                 ww=[0.0000, 0.4918, 0.4918, 0.0082, 0.0082], aw=1.5, at=6.0, n1phi=4.0):
+
+        self.n = n
+        self.U = U
+        self.Supp = Supp
+        self.Output = zeros((1, n + 1))  # No data (MCMC output) yet
+        self.T = 1
+        # To save the acceptance rates of each kernel, and the global acc. rate
+        self.Acc = zeros(6)
+
+        # Kernel probabilities
+        self.Fw = cumsum(ww)
+
+        # Parameters for the propolsals
+        self.aw = aw  # For the walk move
+        self.at = at  # For the Traverse move
+
+        # n1phi = 5 ### expected value of parameters to move
+        self.pphi = min(n, n1phi) / (1.0 * n)  # Prob. of choosing each par.
+
+    def Run(self, T, x0, xp0):
+        """Run the twalk.
+
+           Run( T, x0, xp0),
+           T = Number of iterations.
+           x0, xp0, two initial points within the support,
+           ***each entry of x0 and xp0 most be different***.
+        """
+
+        print("twalk: Running the twalk with %d iterations." % T)
+        # Check x0 and xp0 in the support
+        x = x0  # Reference, so we can retrieve the last values used
+        if not(self.Supp(x)):
+            print(
+                "twalk: ERROR, initial point x0 = %12.4g out of support." % x0)
+            return 0
+        u = self.U(x)
+
+        xp = xp0
+        if not(self.Supp(xp)):
+            print(
+                "twalk: ERROR, initial point xp0 = %12.4g out of support." % xp0)
+            return 0
+        up = self.U(xp)
+
+        if any(abs(x0 - xp0) <= 0):
+            print("twalk: ERROR, not all entries of initial values different.")
+            return 0
+
+        # Set the array to place the iterations and the U's ... we donot save
+        # up's
+        self.Output = zeros((T + 1, self.n + 1))
+        self.T = T + 1
+        self.Acc = zeros(6)
+        kercall = zeros(6)  # Times each kernel is called
+
+        # Make local references for less writing
+        n = self.n
+        Output = self.Output
+        Acc = self.Acc
+
+        Output[0, 0:n] = x.copy()
+        Output[0, n] = u
+
+        # Sampling
+        for it in range(T):
+
+            y, yp, ke, A, u_prop, up_prop = self.onemove(x, u, xp, up)
+
+            kercall[ke] += 1
+            kercall[5] += 1
+            if (uniform() < A):
+                x = y.copy()  # Accept the proposal y
+                u = u_prop
+                xp = yp.copy()  # Accept the proposal yp
+                up = up_prop
+
+                Acc[ke] += 1
+                Acc[5] += 1
+
+            # To retrive the current values
+            self.x = x
+            self.xp = xp
+            self.u = u
+            self.up = up
+
+            Output[it + 1, 0:n] = x.copy()
+            Output[it + 1, n] = u
+
+        if (Acc[5] == 0):
+            print("twalk: WARNING,  all propolsals were rejected!")
+            return 0
+
+        for i in range(6):
+            if kercall[i] != 0:
+                Acc[i] /= kercall[i]
+        return 1
+
+    def onemove(self, x, u, xp, up):
+        """One move of the twalk.  This is basically the raw twalk kernel.
+           It is usefull if the twalk is needed inside a more complex MCMC.
+
+           onemove(x, u, xp, up),
+           x, xp, two points WITHIN the support ***each entry of x0 and xp0 must be different***.
+           and the value of the objective at x, and xp
+           u=U(x), up=U(xp).
+
+           It returns: [y, yp, ke, A, u_prop, up_prop]
+           y, yp: the proposed jump
+           ke: The kernel used, 0=nothing, 1=Walk, 2=Traverse, 3=Blow, 4=Hop
+           A: the M-H ratio
+           u_prop, up_prop: The values for the objective func. at the proposed jumps
+        """
+
+        # Make local references for less writing
+        U = self.U
+        Supp = self.Supp
+        Fw = self.Fw
+
+        ker = uniform()  # To choose the kernel to be used
+        ke = 1
+        A = 0
+
+        # Kernel nothing exchange x with xp
+        if ((0.0 <= ker) & (ker < Fw[0])):
+            ke = 0
+            y = xp.copy()
+            up_prop = u
+            yp = x.copy()
+            u_prop = up
+            # A is the MH acceptance ratio
+            A = 1.0
+            # always accepted
+
+        # The Walk move
+        if ((Fw[0] <= ker) & (ker < Fw[1])):
+
+            ke = 1
+
+            dir = uniform()
+
+            if ((0 <= dir) & (dir < 0.5)):  # x as pivot
+
+                yp = self.SimWalk(xp, x)
+
+                y = x.copy()
+                u_prop = u
+
+                if ((Supp(yp)) & (all(abs(yp - y) > 0))):
+                    up_prop = U(yp)
+                    A = exp(up - up_prop)
+                else:
+                    up_prop = None
+                    A = 0
+                    # out of support, not accepted
+
+            else:  # xp as pivot
+
+                y = self.SimWalk(x, xp)
+
+                yp = xp.copy()
+                up_prop = up
+
+                if ((Supp(y)) & (all(abs(yp - y) > 0))):
+                    u_prop = U(y)
+                    A = exp(u - u_prop)
+                else:
+                    u_prop = None
+                    A = 0
+                    # out of support, not accepted
+
+        # The Traverse move
+        if ((Fw[1] <= ker) & (ker < Fw[2])):
+
+            ke = 2
+            dir = uniform()
+
+            if ((0 <= dir) & (dir < 0.5)):  # x as pivot
+
+                beta = self.Simbeta()
+                yp = self.SimTraverse(xp, x, beta)
+
+                y = x.copy()
+                u_prop = u
+
+                if Supp(yp):
+                    up_prop = U(yp)
+                    if (self.nphi == 0):
+                        A = 1  # Nothing moved
+                    else:
+                        A = exp((up - up_prop) + (self.nphi - 2) * log(beta))
+                else:
+                    up_prop = None
+                    A = 0  # out of support, not accepted
+            else:  # xp as pivot
+
+                beta = self.Simbeta()
+                y = self.SimTraverse(x, xp, beta)
+
+                yp = xp.copy()
+                up_prop = up
+
+                if Supp(y):
+                    u_prop = U(y)
+                    if (self.nphi == 0):
+                        A = 1  # Nothing moved
+                    else:
+                        A = exp((u - u_prop) + (self.nphi - 2) * log(beta))
+                else:
+                    u_prop = None
+                    A = 0  # out of support, not accepted
+
+        # The Blow move
+        if ((Fw[2] <= ker) & (ker < Fw[3])):
+
+            ke = 3
+            dir = uniform()
+
+            if ((0 <= dir) & (dir < 0.5)):  # x as pivot
+                yp = self.SimBlow(xp, x)
+
+                y = x.copy()
+                u_prop = u
+                if ((Supp(yp)) & all(yp != x)):
+                    up_prop = U(yp)
+                    W1 = self.GBlowU(yp, xp,  x)
+                    W2 = self.GBlowU(xp, yp,  x)
+                    A = exp((up - up_prop) + (W1 - W2))
+                else:
+                    up_prop = None
+                    A = 0  # out of support, not accepted
+            else:  # xp as pivot
+                y = self.SimBlow(x, xp)
+
+                yp = xp.copy()
+                up_prop = up
+                if ((Supp(y)) & all(y != xp)):
+                    u_prop = U(y)
+                    W1 = self.GBlowU(y,  x, xp)
+                    W2 = self.GBlowU(x,  y, xp)
+                    A = exp((u - u_prop) + (W1 - W2))
+                else:
+                    u_prop = None
+                    A = 0  # out of support, not accepted
+
+        # The Hop move
+        if ((Fw[3] <= ker) & (ker < Fw[4])):
+
+            ke = 4
+            dir = uniform()
+
+            if ((0 <= dir) & (dir < 0.5)):  # x as pivot
+                yp = self.SimHop(xp, x)
+
+                y = x.copy()
+                u_prop = u
+                if ((Supp(yp)) & all(yp != x)):
+                    up_prop = U(yp)
+                    W1 = self.GHopU(yp, xp,  x)
+                    W2 = self.GHopU(xp, yp,  x)
+                    A = exp((up - up_prop) + (W1 - W2))
+                else:
+                    up_prop = None
+                    A = 0  # out of support, not accepted
+            else:  # xp as pivot
+                y = self.SimHop(x, xp)
+
+                yp = xp.copy()
+                up_prop = up
+                if ((Supp(y)) & all(y != xp)):
+                    u_prop = U(y)
+                    W1 = self.GHopU(y,  x, xp)
+                    W2 = self.GHopU(x,  y, xp)
+                    A = exp((u - u_prop) + (W1 - W2))
+                else:
+                    u_prop = None
+                    A = 0  # out of support, not accepted
+
+        return [y, yp, ke, A, u_prop, up_prop]
+
+
+##########################################################################
+# Auxiliaries for the kernels
+
+    # Used by the Walk kernel
+    def SimWalk(self, x, xp):
+        aw = self.aw
+        n = self.n
+
+        phi = (uniform(size=n) < self.pphi)  # parameters to move
+        self.nphi = sum(phi)
+        z = zeros(n)
+
+        for i in range(n):
+            if phi[i]:
+                u = uniform()
+                z[i] = (aw / (1 + aw)) * (aw * u ** 2.0 + 2.0 * u - 1.0)
+
+        return x + (x - xp) * z
+
+    # Used by the Traverse kernel
+    def Simbeta(self):
+        at = self.at
+        if (uniform() < (at - 1.0) / (2.0 * at)):
+            return exp(1.0 / (at + 1.0) * log(uniform()))
+        else:
+            return exp(1.0 / (1.0 - at) * log(uniform()))
+
+    def SimTraverse(self,  x, xp, beta):
+        n = self.n
+
+        phi = (uniform(size=n) < self.pphi)
+        self.nphi = sum(phi)
+
+        rt = x.copy()
+        for i in range(n):
+            if (phi[i]):
+                rt[i] = xp[i] + beta * (xp[i] - x[i])
+
+        return rt
+
+    # Used by the Blow kernel
+    def SimBlow(self, x, xp):
+        n = self.n
+
+        phi = (uniform(size=n) < self.pphi)
+        self.nphi = sum(phi)
+
+        self.sigma = max(phi * abs(xp - x))
+
+        rt = x.copy()
+        for i in range(n):
+            if (phi[i]):
+                rt[i] = x[i] + self.sigma * normal()
+
+        return rt
+
+    def GBlowU(self, h, x, xp):
+        nphi = self.nphi
+
+        if (nphi > 0):
+            return (nphi / 2.0) * log2pi + nphi * log(self.sigma) + 0.5 * SqrNorm(h - xp) / (self.sigma ** 2)
+        else:
+            return 0
+
+    # Used by the Hop kernel
+    def SimHop(self, x, xp):
+        n = self.n
+
+        phi = (uniform(size=n) < self.pphi)
+        self.nphi = sum(phi)
+
+        self.sigma = max(phi * abs(xp - x)) / 3.0
+
+        rt = x.copy()
+        for i in range(n):
+            if (phi[i]):
+                rt[i] = xp[i] + self.sigma * normal()
+
+        return rt
+
+    def GHopU(self, h, x, xp):  # It is actually equal to GBlowU!
+        nphi = self.nphi
+
+        if (nphi > 0):
+            return (nphi / 2.0) * log2pi + nphi * log(self.sigma) + 0.5 * SqrNorm(h - xp) / (self.sigma ** 2)
+        else:
+            return 0
+
+
+##########################################################################
+# Output analysis auxiliary methods
+
+    def IAT(self, par=-1, start=0, end=0, maxlag=0):
+        """Calculate the Integrated Autocorrelation Times of parameters par
+           the default value par=-1 is for the IAT of the U's"""
+        if (end == 0):
+            end = self.T
+
+        if (self.Acc[5] == 0):
+            print("twalk: IAT: WARNING,  all proposals were rejected!")
+            print(
+                "twalk: IAT: Cannot calculate IAT, fixing it to the sample size.")
+            return self.T
+
+        iat = IAT(self.Output, cols=par, maxlag=maxlag, start=start, end=end)
+
+        return iat
+
+    def TS(self, par=-1, start=0, end=0):
+        """Plot time series of parameter par (default = log f) etc."""
+        from pylab import plot, xlabel, ylabel
+
+        if par == -1:
+            par = self.n
+
+        if (end == 0):
+            end = self.T
+
+        if (par == self.n):
+            plot(arange(start, end), -1 * self.Output[start:end, par])
+            ylabel("Log of Objective")
+        else:
+            plot(arange(start, end), self.Output[start:end, par])
+            ylabel("Parameter %d" % par)
+        xlabel("Iteration")
+
+    def Ana(self, par=-1, start=0, end=0):
+        """Output Analysis, TS plots, acceptance rates, IAT etc."""
+        if par == -1:
+            par = self.n
+
+        if (end == 0):
+            end = self.T
+
+        print(
+            "Acceptance rates for the Walk, Traverse, Blow and Hop kernels:" + str(self.Acc[1:5]))
+        print("Global acceptance rate: %7.5f" % self.Acc[5])
+
+        iat = self.IAT(par=par, start=start, end=end)
+        print("Integrated Autocorrelation Time: %7.1f, IAT/n: %7.1f" %
+              (iat, iat / self.n))
+
+        self.TS(par=par, start=start, end=end)
+
+    def Hist(self, par=-1, start=0, end=0, g=(lambda x: x[0]), xlab="g", bins=20):
+        """Basic histograms and output analysis.  If par=-1, use g.
+           The function g provides a transformation to be applied to the data,
+           eg g=(lambda x: abs(x[0]-x[1]) would plot a histogram of the distance
+           between parameters 0 and 1, etc."""
+        from pylab import hist, xlabel
+
+        if (end == 0):
+            end = self.T
+
+        if (par == -1):
+            ser = zeros(end - start)
+            for it in range(end - start):
+                ser[it] = g(self.Output[it + start, :])
+            xlabel(xlab)
+            print("Mean for %s= %f" % (xlab, mean(ser)))
+        else:
+            ser = self.Output[start:end, par]
+            xlabel("Parameter %d" % par)
+            print("Mean for par %d= %f" % (par, mean(ser)))
+
+        hist(ser, bins=bins)
+        print("Do:\nfrom pylab import show\nshow()")
+
+    def Save(self, fnam, start=0, thin=1):
+        """Saves the Output as a text file, starting at start (burn in), with thinning (thin)."""
+        print(("Saving output, all pars. plus the U's in file", fnam))
+
+        from numpy import savetxt
+
+        savetxt(fnam, self.Output[start:, ])
+
+
+# A simple Random Walk M-H
+    def RunRWMH(self, T, x0, sigma):
+        """Run a simple Random Walk M-H"""
+
+        print(
+            "twalk: This is the Random Walk M-H running with %d iterations." % T)
+        # Local variables
+        x = x0.copy()
+        if not(self.Supp(x)):
+            print("twalk: ERROR, initial point x0 out of support.")
+            return 0
+
+        u = self.U(x)
+        n = self.n
+
+        # Set the array to place the iterations and the U's
+        self.Output = zeros((T + 1, n + 1))
+        self.Acc = zeros(6)
+
+        # Make local references for less writing
+        Output = self.Output
+        U = self.U
+        Supp = self.Supp
+        Acc = self.Acc
+
+        Output[0, 0:n] = x.copy()
+        Output[0, n] = u
+
+        y = x.copy()
+        for it in range(T):
+            y = x + normal(size=n) * sigma  # each entry with sigma[i] variance
+            if Supp(y):  # If it is within the support of the objective
+                uprop = U(y)  # Evaluate the objective
+                if (uniform() < exp(u - uprop)):
+                    x = y.copy()  # Accept the propolsal y
+                    u = uprop
+                    Acc[5] += 1
+
+            Output[it + 1, 0:n] = x
+            Output[it + 1, n] = u
+
+        if (Acc[5] == 0):
+            print("twalk: WARNING,  all propolsals were rejected!")
+            return 0
+
+        Acc[5] /= T
+        return 1
+
+
+##########################################################################
+# Auxiliary functions to calculate Integrated autocorrelation times of a
+# time series ####
+
+
+# Calculates an autocovariance 2x2 matrix at lag l in column c of matrix Ser with T rows
+# The variances of each series are in the diagonal and the
+# (auto)covariance in the off diag.
+def AutoCov(Ser, c, la, T=0):
+    if (T == 0):
+        T = shape(Ser)[0]  # Number of rows in the matrix (sample size)
+
+    return cov(Ser[0:(T - 1 - la), c], Ser[la:(T - 1), c], bias=1)
+
+
+# Calculates the autocorrelation from lag 0 to lag la of columns cols (list)
+# for matrix Ser
+def AutoCorr(Ser, cols=0, la=1):
+    T = shape(Ser)[0]  # Number of rows in the matrix (sample size)
+
+    ncols = shape(mat(cols))[1]  # Number of columns to analyse (parameters)
+
+    # if ncols == 1:
+    #    cols = [cols]
+
+    # Matrix to hold output
+    Out = matrix(ones((la) * ncols)).reshape(la, ncols)
+
+    for c in range(ncols):
+        for l in range(1, la + 1):
+            Co = AutoCov(Ser, cols[c], l, T)
+            Out[l - 1, c] = Co[0, 1] / (sqrt(Co[0, 0] * Co[1, 1]))
+
+    return Out
+
+
+# Makes an upper band matrix of ones, to add the autocorrelation matrix
+# gamma = auto[2*m+1,c]+auto[2*m+2,c] etc.
+# MakeSumMat(lag) * AutoCorr( Ser, cols=c, la=lag) to make the gamma matrix
+def MakeSumMat(lag):
+    rows = (lag) / 2  # Integer division!
+    Out = mat(zeros([rows, lag], dtype=int))
+
+    for i in range(rows):
+        Out[i, 2 * i] = 1
+        Out[i, 2 * i + 1] = 1
+
+    return Out
+
+
+# Finds the cutting time, when the gammas become negative
+def Cutts(Gamma):
+    cols = shape(Gamma)[1]
+    rows = shape(Gamma)[0]
+    Out = mat(zeros([1, cols], dtype=int))
+    Stop = mat(zeros([1, cols], dtype=bool))
+
+    if (rows == 1):
+        return Out
+
+    i = 0
+    # while (not(all(Stop)) & (i < (rows-1))):
+    for i in range(rows - 1):
+        for j in range(cols):  # while Gamma stays positive and decreasing
+            if (((Gamma[i + 1, j] > 0.0) & (Gamma[i + 1, j] < Gamma[i, j])) & (not Stop[0, j])):
+                Out[0, j] = i + 1  # the cutting time for colomn j is i+i
+            else:
+                Stop[0, j] = True
+        i += 1
+
+    return Out
+
+
+# Automatically find a maxlag for IAT calculations
+def AutoMaxlag(Ser, c, rholimit=0.05, maxmaxlag=20000):
+    Co = AutoCov(Ser, c, la=1)
+    rho = Co[0, 1] / Co[0, 0]  # lag one autocorrelation
+
+    # if autocorrelation is like exp(- lag/lam) then, for lag = 1
+    lam = -1.0 / log(abs(rho))
+
+    # Our initial guess for maxlag is 1.5 times lam (eg. three times the mean
+    # life)
+    maxlag = int(floor(3.0 * lam)) + 1
+
+    # We take 1% of lam to jump forward and look for the
+    # rholimit threshold
+    jmp = int(ceil(0.01 * lam)) + 1
+
+    T = shape(Ser)[0]  # Number of rows in the matrix (sample size)
+
+    while ((abs(rho) > rholimit) & (maxlag < min(T / 2, maxmaxlag))):
+        Co = AutoCov(Ser, c, la=maxlag)
+        rho = Co[0, 1] / Co[0, 0]
+        maxlag = maxlag + jmp
+        ###print("maxlag=", maxlag, "rho", abs(rho), "\n")
+
+    maxlag = int(floor(1.3 * maxlag))
+    # 30% more
+
+    if (maxlag >= min(T / 2, maxmaxlag)):  # not enough data
+        fixmaxlag = min(min(T / 2, maxlag), maxmaxlag)
+        print("AutoMaxlag: Warning: maxlag= %d > min(T/2,maxmaxlag=%d), fixing it to %d" %
+              (maxlag, maxmaxlag, fixmaxlag))
+        return fixmaxlag
+
+    if (maxlag <= 1):
+        fixmaxlag = 10
+        print("AutoMaxlag: Warning: maxlag= %d ?!, fixing it to %d" %
+              (maxlag, fixmaxlag))
+        return fixmaxlag
+
+    print("AutoMaxlag: maxlag= %d." % maxlag)
+    return maxlag
+
+
+# Find the IAT
+def IAT(Ser, cols=-1,  maxlag=0, start=0, end=0):
+
+    ncols = shape(mat(cols))[1]  # Number of columns to analyse (parameters)
+    if ncols == 1:
+        if (cols == -1):
+            cols = shape(Ser)[1] - 1  # default = last column
+        cols = [cols]
+
+    if (end == 0):
+        end = shape(Ser)[0]
+
+    if (maxlag == 0):
+        for c in cols:
+            maxlag = max(maxlag, AutoMaxlag(Ser[start:end, :], c))
+
+    #print("IAT: Maxlag=", maxlag)
+
+    #Ga = MakeSumMat(maxlag) * AutoCorr( Ser[start:end,:], cols=cols, la=maxlag)
+
+    Ga = mat(zeros((maxlag / 2, ncols)))
+    auto = AutoCorr(Ser[start:end, :], cols=cols, la=maxlag)
+
+    # Instead of producing the maxlag/2 X maxlag MakeSumMat matrix, we
+    # calculate the gammas like this
+    for c in range(ncols):
+        for i in range(maxlag / 2):
+            Ga[i, c] = auto[2 * i, c] + auto[2 * i + 1, c]
+
+    cut = Cutts(Ga)
+    nrows = shape(Ga)[0]
+
+    ncols = shape(cut)[1]
+    Out = -1.0 * mat(ones([1, ncols]))
+
+    if any((cut + 1) == nrows):
+        print("IAT: Warning: Not enough lag to calculate IAT")
+
+    for c in range(ncols):
+        for i in range(cut[0, c] + 1):
+            Out[0, c] += 2 * Ga[i, c]
+
+    return Out
diff --git a/bumps/quasinewton.py b/bumps/quasinewton.py
new file mode 100644
index 0000000..69343f3
--- /dev/null
+++ b/bumps/quasinewton.py
@@ -0,0 +1,844 @@
+# Copyright (C) 2009-2010, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Author: Ismet Sahin
+"""
+BFGS quasi-newton optimizer.
+
+All modules in this file are implemented from the book
+"Numerical Methods for Unconstrained Optimization and Nonlinear Equations" by
+J.E. Dennis and Robert B. Schnabel (Only a few minor modifications are done).
+
+The interface is through the :func:`quasinewton` function.  Here is an
+example call::
+
+    n = 2
+    x0 = [-0.9 0.9]'
+    fn = lambda p: (1-p[0])**2 + 100*(p[1]-p[0]**2)**2
+    grad = lambda p: array([-2*(1-p[0]) - 400*(p[1]-p[0]**2)*p[0], 200*p[1]])
+    Sx = ones(n,1)
+    typf = 1                       # todo. see what default value is the best
+    macheps = eps
+    eta = eps
+    maxstep = 100
+    gradtol = 1e-6
+    steptol = 1e-12                # do not let steptol larger than 1e-9
+    itnlimit = 1000
+    result = quasinewton(fn, x0, grad, Sx, typf,
+                         macheps, eta, maxstep, gradtola, steptol, itnlimit)
+    print("status code %d"%result['status'])
+    print("x_min=%s, f(x_min)=%g"%(str(result['x']),result['fx']))
+    print("iterations, function calls, linesearch function calls",
+          result['iterations'],result['evals'],result['linesearch_evals'])
+"""
+from __future__ import print_function
+
+__all__ = ["quasinewton"]
+
+from numpy import inf, sqrt, isnan, isinf, finfo, diag, zeros, ones
+from numpy import array, linalg, inner, outer, dot, amax, maximum
+
+STATUS = {
+    1: "Gradient < tolerance",
+    2: "Step size < tolerance",
+    3: "Invalid point in line search",
+    4: "Iterations exceeded",
+    5: "Max step taken --- function unbounded?",
+    6: "User abort",
+    7: "Iterations exceeded in line search",
+    8: "Line search step size is too small",
+    9: "Singular Hessian",
+}
+
+
+def quasinewton(fn, x0=None, grad=None, Sx=None, typf=1, macheps=None, eta=None,
+                maxstep=100, gradtol=1e-6, steptol=1e-12, itnlimit=2000,
+                abort_test=None, monitor=lambda **kw: True):
+    r"""
+    Run a quasinewton optimization on the problem.
+
+    *fn(x)* is the cost function, which takes a point x and returns a scalar fx.
+
+    *x0* is the initial point
+
+    *grad* is the analytic gradient (if available)
+
+    *Sx* is a scale vector indicating the typical values for parameters in
+    the fitted result. This is used for a variety of things such as setting
+    the step size in the finite difference approximation to the gradient, and
+    controlling numerical accuracy in calculating the Hessian matrix.  If for
+    example some of your model parameters are in the order of 1e-6, then Sx
+    for those parameters should be set to 1e-6. Default: [1, ...]
+
+    *typf* is the typical value for f(x) near the minimum.  This is used along
+    with gradtol to check the gradient stopping condition.  Default: 1
+
+    *macheps* is the minimum value that can be added to 1 to produce a number
+    not equal to 1.  Default: numpy.finfo(float).eps
+
+    *eta* adapts the numerical gradient calculations to machine precision.
+    Default: *macheps*
+
+    *maxstep* is the maximum step size in any gradient step, after normalizing
+    by *Sx*. Default: 100
+
+    *gradtol* is a stopping condition for the fit based on the amount of
+    improvement expected at the next step.  Default: 1e-6
+
+    *steptol* is a stopping condition for the fit based on the size
+    of the step. Default: 1e-12
+
+    *itnlimit* is the maximum number of steps to take before stopping.
+    Default: 2000
+
+    *abort_test* is a function which tests whether the user has requested
+    abort. Default: None.
+
+    *monitor(x,fx,step)* is called every iteration so that a user interface
+    function can monitor the progress of the fit.  Default: lambda \*\*kw: True
+
+
+    Returns the fit result as a dictionary:
+
+    *status* is a status code indicating why the fit terminated.  Turn the
+    status code into a string with *STATUS[result.status]*.  Status values
+    vary from 1 to 9, with 1 and 2 indicating convergence and the remaining
+    codes indicating some form of premature termination.
+
+    *x* is the minimum point
+
+    *fx* is the value fn(x) at the minimum
+
+    *H* is the approximate Hessian matrix, which is the inverse of the
+    covariance matrix
+
+    *L* is the cholesky decomposition of H+D, where D is a small correction
+    to force H+D to be positive definite.  To compute parameter uncertainty
+
+    *iterations* is the number of iterations
+
+    *evals* is the number of function evaluations
+
+    *linesearch_evals* is the number of function evaluations for line search
+    """
+    # print("starting QN")
+    # If some input parameters are not specified, define default values for them
+    # here. First and second parameters fn and x0 must be defined, others may be
+    # passed.  If you want to set a value to a parameter, say to typf, make
+    # sure all the parameters before this parameter are specified, in this
+    # case fn, x0, grad, and Sx if you want to have default values for grad
+    # and Sx, for each enter [].
+    # important for also computing fcount (function count)
+    n = len(x0)
+    if x0 is None:
+        x0 = zeros(n)
+
+    if grad is None:
+        analgrad = 0
+    else:
+        analgrad = 1
+
+    if Sx is None:
+        Sx = ones(n)
+        #Sx = x0 + (x0==0.)
+    elif len(Sx) != n:
+        raise ValueError("sizes of x0 and Sx must be the same")
+
+    if macheps is None:
+        # PAK: use finfo rather than macheps
+        macheps = finfo('d').eps
+
+    if eta is None:
+        eta = macheps
+
+    fcount = 0                    # total function count
+    fcount_ls = 0                # funciton count due to line search
+
+    # If analytic gradient is available then fn will return both function
+    # value and analytic gradient.  Otherwise, use finite difference method
+    # for estimating the gradient
+    if analgrad == 1:
+        fc = fn(x0)
+        gc = grad(x0)
+        fcount = fcount + 1
+    else:
+        fc = fn(x0)
+        gc = fdgrad(n, x0, fc, fn, Sx, eta)
+        fcount = fcount + n + 1
+
+    # Check if the initial guess is a local minimizer
+    termcode = umstop0(n, x0, fc, gc, Sx, typf, gradtol)
+    consecmax = 0
+
+    # Value to return if we fail early
+    # Approximately x0 is a critical point
+    xf = x0
+    ff = fc
+    H = L = None
+    if termcode == 0:
+        H = inithessunfac(n, fc, typf, Sx)
+
+    # STEP 9.
+    xc = x0
+
+    # Iterate until convergence in the following loop
+    itncount = 0
+    while termcode == 0:  # todo. increase itncount
+        # print("update",itncount)
+        itncount = itncount + 1
+
+        # disp(['Iteration = ' num2str(itncount)])
+        # Find Newton step sN
+        H, L = modelhess(n, Sx, macheps, H)
+        # the vector obtained in the middle
+        middle_step_v = linalg.solve(L, -gc)
+        sN = linalg.solve(L.transpose(), middle_step_v)   # the last step
+        if isnan(sN).any():
+            # print("H",H)
+            # print("L",L)
+            # print("v",middle_step_v)
+            # print("Sx",Sx)
+            # print("gc",gc)
+            termcode = 9
+            break
+
+        # Perform line search (Alg.6.3.1). todo. put param order as in the book
+        # print("calling linesearch",xc,fc,gc,sN,Sx,H,L,middle_step_v)
+        # print("linesearch",xc,fc)
+        retcode, xp, fp, maxtaken, fcnt \
+            = linesearch(fn, n, xc, fc, gc, sN, Sx, maxstep, steptol)
+        fcount += fcnt
+        fcount_ls += fcnt
+        #plot(xp(1), xp(2), 'g.')
+
+        # Evaluate gradient at new point xp
+        if analgrad == 1:
+            gp = grad(xp)
+        else:
+            gp = fdgrad(n, xp, fp, fn, Sx, eta)
+            fcount = fcount + n
+
+        # Check stopping criteria (alg.7.2.1)
+        consecmax = consecmax + 1 if maxtaken else 0
+        termcode = umstop(n, xc, xp, fp, gp, Sx, typf, retcode, gradtol,
+                          steptol, itncount, itnlimit, consecmax)
+
+        if abort_test():
+            termcode = 6
+
+        # STEP 10.6
+        # If termcode is larger than zero, we found a point satisfying one
+        # of the termination criteria, return from here.  Otherwise evaluate
+        # the next Hessian approximation (Alg. 9.4.1).
+        if termcode > 0:
+            xf = xp                                        # x final
+            ff = fp                                        # f final
+
+        elif not monitor(x=xp, fx=fp, step=itncount):
+            termcode = 6
+
+        else:
+            H = bfgsunfac(n, xc, xp, gc, gp, macheps, eta, analgrad, H)
+            xc = xp
+            fc = fp
+            gc = gp
+        # STOPHERE
+
+    result = dict(status=termcode, x=xf, fx=ff, H=H, L=L,
+                  iterations=itncount, evals=fcount, linesearch_evals=fcount_ls)
+    #print("result",result, steptol, macheps)
+    return result
+
+#------------------------------------------------------------------------------
+#@author: Ismet Sahin
+# Alg. 9.4.1
+
+# NOTE:
+# BFCG Hessian update is performed unless the following two conditions hold
+#    (i) y'*s < sqrt(macheps)*norm(s)*norm(y)
+#    (ii)
+
+
+def bfgsunfac(n, xc, xp, gc, gp, macheps, eta, analgrad, H):
+    s = xp - xc
+    y = gp - gc
+    temp1 = inner(y, s)
+    # ISMET : I added condition of having temp1 != 0
+    if temp1 >= sqrt(macheps) * linalg.norm(s) * linalg.norm(y) and temp1 != 0:
+        if analgrad == 1:
+            tol = eta
+        else:
+            tol = sqrt(eta)
+
+        # deal with noise levels in y
+        skipupdate = 1
+        t = dot(H, s)
+        temp_logicals = (abs(y - t) >= tol * maximum(abs(gc), abs(gp)))
+        if sum(temp_logicals):
+            skipupdate = 0
+
+        # do the BFGS update if skipdate is false
+        if skipupdate == 0:
+            temp2 = dot(s, t)
+            H = H + outer(y, y) / temp1 - outer(t, t) / temp2
+
+    return H
+
+
+#------------------------------------------------------------------------------
+'''
+ at author: Ismet Sahin
+'''
+
+
+def choldecomp(n, H, maxoffl, macheps):
+    minl = (macheps) ** (0.25) * maxoffl
+
+    if maxoffl == 0:
+        # H is known to be a positive definite matrix
+        maxoffl = sqrt(max(abs(diag(H))))
+
+    minl2 = sqrt(macheps) * maxoffl
+
+    # 3. maxadd is the number (R) specifying the maximum amount added to any
+    # diagonal entry of Hessian matrix H
+    maxadd = 0
+
+    # 4. form column j of L
+    L = zeros((n, n))
+    for j in range(1, n + 1):
+        L[j - 1, j - 1] = H[j - 1, j - 1] - sum(L[j - 1, 0:j - 1] ** 2)
+        minljj = 0
+        for i in range(j + 1, n + 1):
+            L[i - 1, j - 1] = H[j - 1, i - 1] - \
+                sum(L[i - 1, 0:j - 1] * L[j - 1, 0:j - 1])
+            minljj = max(abs(L[i - 1, j - 1]), minljj)
+
+        # 4.4
+        minljj = max(minljj / maxoffl, minl)
+
+        # 4.5
+        if L[j - 1, j - 1] > minljj ** 2:
+            # normal Cholesky iteration
+            L[j - 1, j - 1] = sqrt(L[j - 1, j - 1])
+        else:
+            # augment H[j-1,j-1]
+            if minljj < minl2:
+                minljj = minl2    # occurs only if maxoffl = 0
+
+            maxadd = max(maxadd, minljj ** 2 - L[j - 1, j - 1])
+            L[j - 1, j - 1] = minljj
+
+        # 4.6
+        L[j:n, j - 1] = L[j:n, j - 1] / L[j - 1, j - 1]
+
+    return L, maxadd
+
+#------------------------------------------------------------------------------
+# ALGORITHM 5.6.3
+
+# Ismet Sahin
+
+# function g = fdgrad(n, xc, fc, objfunc, sx, eta)
+# g = fdgrad(@obj_function1, 2, [1 -1]', 10, [1 1], eps)
+
+# NOTATION:
+#    N : Natural number
+#    R : Real number
+#    Rn: nx1 real vector
+#    Rnxm : nxm real matrix
+
+# INPUTS:
+#    n  : the dimension of the gradient vector (N)
+#    xc : the current point at which the value of gradient is computed (Rn)
+#    fc : function value at xc (R)
+#    objfunc : a function handle which is used to compute function values
+#    Sx : a n-dim vector, jth entry specifies the typical value of jth param.
+# (Rn)
+#    eta: equals to 1e-DIGITS where DIGITS is an integer specifying the
+# number of reliable digits (R)
+# OUTPUT:
+#    g : the n-dim finite difference gradient vector (Rn)
+
+# NOTES :
+#    hj : is the constant specifying the step size in the direction of jth
+# coordinate (R)
+#    ej : the unit vector, jth column of the identity matrix (Rn)
+
+# COMMENTS:
+#--- FIND STEP SIZE hj
+#    1.a : sign(x) does not work for us when x = 0 since this makes the step
+# size hj zero which is not allowed. (Step size = 0 => gj = inf.)
+#    1.b : evaluation of the step size
+#    1.c : a trick to reduce error due to finite precision.  The line xc(j) =
+# xc(j) + hj is equivalent to xc = xc + hj * ej where ej is the jth column
+# of identity matrix.
+#
+#--- EVALUATE APPR. GRADIENT
+# First evaluate function at xc + hj * ej and then estimate jth entry of
+# the gradient.
+
+
+def fdgrad(n, xc, fc, fn, Sx, eta):
+
+    # create memory for gradient
+    g = zeros(n)
+
+    sqrteta = sqrt(eta)
+    for j in range(1, n + 1):
+        #--- FIND STEP SIZE hj
+        if xc[j - 1] >= 0:
+            signxcj = 1
+        else:
+            signxcj = -1                # 1.a
+
+        # 1.b
+        hj = sqrteta * max(abs(xc[j - 1]), 1 / Sx[j - 1]) * signxcj
+
+        # 1.c
+        tempj = xc[j - 1]
+        xc[j - 1] = xc[j - 1] + hj
+        hj = xc[j - 1] - tempj
+
+        #--- EVALUATE APPR. GRADIENT
+        fj = fn(xc)
+        # PAK: hack for infeasible region: point the other way
+        if isinf(fj):
+            fj = fc + hj
+        g[j - 1] = (fj - fc) / hj
+        # if isinf(g[j-1]):
+        #    print("fc,fj,hj,Sx,xc",fc,fj,hj,Sx[j-1],xc[j-1])
+
+        # now reset the current
+        xc[j - 1] = tempj
+
+    #print("gradient", g)
+    return g
+
+
+#------------------------------------------------------------------------------
+# @author: Ismet Sahin
+# Example call:
+# H = inithessunfac(2, f, 1, [1 0.1]')
+
+def inithessunfac(n, f, typf, Sx):
+    temp = max(abs(f), typf)
+    H = diag(temp * Sx ** 2)
+    return H
+
+
+#------------------------------------------------------------------------------
+
+def linesearch(cost_func, n, xc, fc, g, p, Sx, maxstep, steptol):
+    """
+ALGORITHM 6.3.1
+
+Ismet Sahin
+
+THE PURPOSE
+
+    is to find a step size which yields the new function value smaller than the
+    current function value, i.e. f(xc + alfa*p) <= f(xc) + alfa * lambda * g'p
+
+CONDITIONS
+
+    g'p < 0
+    alfa < 0.5
+
+NOTATION:
+    N : Natural number
+    R : Real number
+    Rn: nx1 real vector
+    Rnxm : nxm real matrix
+    Str: a string
+
+INPUTS
+    n : dimensionality (N)
+    xc : the current point ( Rn)
+    fc : the function value at xc (R)
+    obj_func : the function handle to evaluate function values (str like :
+       '@costfunction1')
+    g : gradient (Rn)
+    p : the descent direction (Rn)
+    Sx : scale factors (Rn)
+    maxstep : maximum step size allowed (R)
+    steptol : step tolerance in order to break infinite loop in line search (R)
+
+OUTPUTS
+    retcode : boolean indicating a new point xp found (0) or not (1)    (N).
+    xp : the new point (Rn)
+    fp : function value at xp (R)
+    maxtaken : boolean (N)
+
+NOTES:
+    alfa : is used to prevent function value reductions which are too small.
+       Here we'll use a very small number in order to accept very small
+       reductions but not too small.
+"""
+
+    maxtaken = 0
+
+    # alfa specifies how much function value reduction is allowable.  The
+    # smaller the alfa, the smaller the function value reduction we allow.
+    alfa = 1e-4
+
+    # the magnitude of the Newton step
+    Newtlen = linalg.norm(Sx * p)
+
+    if Newtlen > maxstep:
+        # Newton step is larger than the max acceptable step size (maxstep).
+        # Make it equal or smaller than maxstep
+        p = p * (maxstep / Newtlen)
+        Newtlen = maxstep
+
+    initslope = inner(g, p)
+
+    # "Relative length of p as calculated in the stopping routine"
+    # rellength = amax(abs(p) / maximum(abs(xc), Sx))    # this was a bug
+    rellength = amax(abs(p) / maximum(abs(xc), 1 / Sx))
+
+    minlambda = steptol / rellength
+
+    lambdaM = 1.0
+
+    # In this loop, we try to find an acceptable next point
+    # xp = xc + lambda * p by finding an optimal lambda based on one
+    # dimensional quadratic and cubic models
+    fcount = 0
+    while True:                # 10 starts.
+        # next point candidate
+        xp = xc + lambdaM * p
+        if isnan(xp).any():
+            #print("nan xp")
+            retcode = 1
+            xp, fp = xc, fc
+            break
+        if fcount > 20:
+            #print("too many cycles in linesearch",xp)
+            retcode = 2
+            xp, fp = xc, fc
+            break
+        # function value at xp
+        fp = cost_func(xp)
+        #print("linesearch",fcount,xp,xc,lambdaM,p,fp,fc)
+        if isinf(fp):
+            fp = 2 * fc  # PAK: infeasible region hack
+        fcount = fcount + 1
+        if fp <= fc + alfa * lambdaM * initslope:
+            # satisfactory xp is found
+            retcode = 0
+            if lambdaM == 1.0 and Newtlen > 0.99 * maxstep:
+                maxtaken = 1
+            # return from here
+            break
+        elif lambdaM < minlambda:
+            # step length is too small, so a satisfactory xp cannot be found
+            #print("step",lambdaM,minlambda,steptol,rellength)
+            retcode = 3
+            xp, fp = xc, fc
+            break
+        else:                            # 10.3c starts
+            # reduce lambda by a factor between 0.1 and 0.5
+            if lambdaM == 1.0:
+                # first backtrack with one dimensional quadratic fit
+                lambda_temp = -initslope / (2.0 * (fp - fc - initslope))
+                #print("L1",lambda_temp)
+            else:
+                # perform second and following backtracks with cubic fit
+                Mt = array([[1.0/lambdaM**2, -1.0/lambda_prev**2],
+                            [-lambda_prev/lambdaM**2, lambdaM/lambda_prev**2]])
+                vt = array([[fp - fc - lambdaM * initslope],
+                            [fp_prev - fc - lambda_prev * initslope]])
+                ab = (1.0 / (lambdaM - lambda_prev)) * dot(Mt, vt)
+                # a = ab(1) and b = ab(2)
+                disc = ab[1, 0] ** 2 - 3.0 * ab[0, 0] * initslope
+                #print("Mt,vt,ab,disc",Mt,vt,ab,disc)
+                if ab[0, 0] == 0.0:
+                    # cubic model turn out to be a quadratic
+                    lambda_temp = -initslope / (2.0 * ab[1, 0])
+                    #print("L2",lambda_temp)
+                else:
+                    # the model is a legitimate cubic
+                    lambda_temp = (-ab[1, 0] + sqrt(disc)) / (3.0 * ab[0, 0])
+                    #print("L3",lambda_temp)
+
+                if lambda_temp > 0.5 * lambdaM:
+                    # larger than half of previous lambda is not allowed.
+                    lambda_temp = 0.5 * lambdaM
+                    #print("L4",lambda_temp)
+
+            lambda_prev = lambdaM
+            fp_prev = fp
+            if lambda_temp <= 0.1 * lambdaM:
+                # smaller than 1/10 th of previous lambda is not allowed.
+                lambdaM = 0.1 * lambdaM
+            else:
+                lambdaM = lambda_temp
+
+            #print('lambda = ', lambdaM)
+
+    # return xp, fp, retcode
+    return retcode, xp, fp, maxtaken, fcount
+
+
+#------------------------------------------------------------------------------
+
+# @author: Ismet Sahin
+# ALGORITHM 1.3.1
+def machineeps():
+    macheps = 1.0
+    while (macheps + 1) != 1:
+        macheps = macheps / 2
+
+    macheps = 2 * macheps
+    return macheps
+
+
+#------------------------------------------------------------------------------
+
+def modelhess(n, Sx, macheps, H):
+    """
+ at author: Ismet Sahin.
+Thanks to Christopher Meeting for his help in converting this module from
+Matlab to Python
+
+ALGORITHM 5.5.1
+
+NOTES:
+    Currently we are not implementing steps 1, 14, and 15 (TODO)
+
+This function performs perturbed Cholesky decomposition (CD) as if the input
+Hessian matrix is positive definite.  The code for perturbed CD resides in
+choldecomp.m file which returns the factored lower triangle matrix L and a
+number, maxadd, specifying the largest number added to a diagonal element of
+H during the CD decomposition.  This function checks if the decomposition is
+completed without adding any positive number to the diagonal elements of H,
+i.e. maxadd <= 0.  Otherwise, this function adds the least number to the
+diagonals of H which makes it positive definite based on maxadd and other
+entries in H.
+EXAMPLE CALLS::
+
+         A1 =[2     0    2.4
+              0     2     0
+              2.4     0     3]
+
+         A2 =[2     0    2.5
+               0     2     0
+              2.5     0     3]
+
+         A3 =[2     0    10
+               0     2     0
+              10     0     3]
+"""
+
+    # SCALING
+    scale_needed = 0                        # ISMET uses this parameter
+    if sum(Sx - ones(n)) != 0:
+        # scaling is requested by the user
+        scale_needed = 1
+        Dx = diag(Sx)
+        Dx_inv = diag(1.0 / Sx)
+        H = dot(Dx_inv, dot(H, Dx_inv))
+
+    # STEP I.
+    sqrteps = sqrt(macheps)
+
+    # 2-4.
+    H_diag = diag(H)
+    maxdiag = max(H_diag)
+    mindiag = min(H_diag)
+
+    # 5.
+    maxposdiag = max(0, maxdiag)
+
+    # 6. mu is the amount to be added to diagonal of H before the
+    # Cholesky decomp. If the minimum diagonal is much much smaller than
+    # the maximum diagonal element then adjust mu accordingly otherwise mu = 0.
+    if mindiag <= sqrteps * maxposdiag:
+        mu = 2 * (maxposdiag - mindiag) * sqrteps - mindiag
+        maxdiag = maxdiag + mu
+    else:
+        mu = 0
+
+    # 7. maximum of off-diagonal elements of H
+    diag_infinite = diag(inf * ones(n))
+    maxoff = (H - diag_infinite).max()
+
+    # 8. if maximum off diagonal element is much much larger than the maximum
+    # diagonal element of the Hessian H
+    if maxoff * (1 + 2 * sqrteps) > maxdiag:
+        mu = mu + (maxoff - maxdiag) + 2 * sqrteps * maxoff
+        maxdiag = maxoff * (1 + 2 * sqrteps)
+
+    # 9.
+    if maxdiag == 0:            # if H == 0
+        mu = 1
+        maxdiag = 1
+
+    # 10. mu>0 => need to add mu amount to the diagonal elements: H = H + mu*I
+    if mu > 0:
+        diag_mu = diag(mu * ones(n))
+        H = H + diag_mu
+
+    # 11.
+    maxoffl = sqrt(max(maxdiag, maxoff / n))
+
+    # STEP II. Perform perturbed Cholesky decomposition H + D = LL' where D is
+    # a diagonal matrix which is implicitly added to H if H is not positive
+    # definite. Matrix D has only positive elements. The output variable maxadd
+    # indicates the maximum number added to a diagonal entry of the Hesian,
+    # i.e. the maximum of D. If maxadd is returned 0, then H was indeed pd
+    # and L is the resulting factor.
+    # 12.
+    L, maxadd = choldecomp(n, H, maxoffl, macheps)
+
+    # STEP III.
+    # 13. If maxadd <= 0, we are done H was positive definite.
+    if maxadd > 0:
+        # H was not positive definite
+        # print('WARNING: Hessian is not pd. Max number added to H is ',maxadd)
+        maxev = H[0, 0]
+        minev = H[0, 0]
+        for i in range(1, n + 1):
+            offrow = sum(abs(H[0:i - 1, i - 1])) + sum(abs(H[i - 1, i:n]))
+            maxev = max(maxev, H[i - 1, i - 1] + offrow)
+            minev = min(minev, H[i - 1, i - 1] - offrow)
+
+        sdd = (maxev - minev) * sqrteps - minev
+        sdd = max(sdd, 0)
+        mu = min(maxadd, sdd)
+        H = H + diag(mu * ones(n))
+        L, maxadd = choldecomp(n, H, 0, macheps)
+
+    if scale_needed:                # todo. this calculation can be done faster
+        H = dot(Dx, dot(H, Dx))
+        L = dot(Dx, L)
+
+    return H, L
+
+
+#------------------------------------------------------------------------------
+def umstop(n, xc, xp, f, g, Sx, typf, retcode, gradtol, steptol,
+           itncount, itnlimit, consecmax):
+    """
+#@author: Ismet Sahin
+
+ALGORITHM 7.2.1
+
+Return codes:
+Note that return codes are nonnegative integers. When it is not zero, there is
+a termination condition which is satisfied.
+   0 : None of the termination conditions is satisfied
+   1 : Magnitude of scaled grad is less than gradtol; this is the primary
+       condition. The new point xp is most likely a local minimizer.  If gradtol
+       is too large, then this condition can be satisfied easier and therefore
+       xp may not be a local minimizer
+   2 : Scaled distance between last two points is less than steptol; xp might be
+       a local minimizer.  This condition may also be satisfied if step is
+       chosen too large or the algorithm is far from the minimizer and making
+       small progress
+   3 : The algorithm cannot find a new point giving smaller function value than
+       the current point.  The current may be a local minimizer, or analytic
+       gradient implementation has some mistakes, or finite difference gradient
+       estimation is not accurate, or steptol is too large.
+   4 : Maximum number of iterations are completed
+   5 : The maximum step length maxstep is taken for last ten consecutive
+       iterations.  This may happen if the function is not bounded from below,
+       or the function has a finite asymptote in some direction, or maxstep is
+       too small.
+    """
+
+    termcode = 0
+    if retcode == 1:
+        termcode = 3
+    elif retcode == 2:
+        termcode = 7
+    elif retcode == 3:
+        termcode = 8
+    elif retcode > 0:
+        raise ValueError("Unknown linesearch return code")
+    elif max(abs(g) * maximum(abs(xp), 1 / Sx) / max(abs(f), typf)) <= gradtol:
+        # maximum component of scaled gradient is smaller than gradtol.
+        # TODO: make sure not to use a too large typf value which leads to the
+        # satisfaction of this algorithm easily.
+        termcode = 1
+    elif max(abs(xp - xc) / maximum(abs(xp), 1 / Sx)) <= steptol:
+        # maximum component of scaled step is smaller than steptol
+        termcode = 2
+    elif itncount >= itnlimit:
+        # maximum number of iterations are performed
+        termcode = 4
+    elif consecmax == 10:
+        # not more than 10 steps will be taken consecutively.
+        termcode = 5
+
+    return termcode
+
+
+#------------------------------------------------------------------------------
+#@author: Ismet Sahin
+
+# This function checks whether initial conditions are acceptable for
+# continuing unconstrained optimization
+
+# f : the function value at x0, i.e. f = f(x0),  (R)
+# g : the gradient at x0, (Rn)
+
+# termcode = 0 : x0 is not a critical point of f(x), (Z)
+# termcode = 1 : x0 is a critical point of f(x), (Z)
+
+# Note that x0 may be a critical point of the function; in this case, it is
+# either a local minimizer or a saddle point of the function.  If the Hessian
+# at x0 is positive definite than it is indeed a local minimizer.  Instead of
+# checking Hessian, we can also restart the driver program umexample from
+# another point which is close to x0.  If x0 is the local minimizer, the
+# algorithm will approach it.
+
+def umstop0(n, x0, f, g, Sx, typf, gradtol):
+    #consecmax = 0
+    if max(abs(g) * maximum(abs(x0), 1./Sx)/max(abs(f), typf)) <= 1e-3*gradtol:
+        termcode = 1
+    else:
+        termcode = 0
+    return termcode
+
+
+#------------------------------------------------------------------------------
+
+def example_call():
+    print('***********************************')
+
+    # Rosenbrock function
+    fn = lambda p: (1 - p[0])**2 + 100*(p[1] - p[0]**2)**2
+    grad = lambda p: array([-2*(1 - p[0]) - 400*(p[1] - p[0]**2)*p[0],
+                            200*(p[1] - p[0]**2)])
+    x0 = array([2.320894, -0.534223])
+    # x0 = array([2.0,1.0])
+
+    result = quasinewton(fn=fn, x0=x0, grad=grad)
+    #result = quasinewton(fn=fn, x0=x0)
+
+    print('\n\nInitial point x0 = ', x0, ', f(x0) = ', fn(x0))
+    for k in sorted(result.keys()):
+        print(k, "=", result[k])
+
+
+if __name__ == "__main__":
+    example_call()
diff --git a/bumps/random_lines.py b/bumps/random_lines.py
new file mode 100644
index 0000000..78b31e3
--- /dev/null
+++ b/bumps/random_lines.py
@@ -0,0 +1,298 @@
+"""
+Random Lines Algorithm finds the optimal minimum of a function.
+
+Sahin, I. (2013). Minimization over randomly selected lines.  An
+International  Journal Of Optimization And Control: Theories &
+Applications (IJOCTA), 3(2), 111-119.
+http://dx.doi.org/10.11121/ijocta.01.2013.00167
+"""
+
+# Author : Ismet Sahin
+from __future__ import print_function
+
+__all__ = ["random_lines", "particle_swarm"]
+
+from itertools import count
+
+from numpy import zeros, ones, asarray, sqrt, arange, isfinite
+from numpy.random import rand, random_integers
+
+
+def print_every_five(step, x, fx, k):
+    if step % 5 == 0:
+        print(step, ":", fx[k], x[k])
+
+
+def random_lines(cfo, NP, CR=0.9, epsilon=1e-10, abort_test=None, maxiter=1000):
+    """
+    Random lines is a population based optimizer which using quadratic
+    fits along randomly oriented directions.
+
+    *cfo* is the cost function object.  This is a dictionary which contains
+    the following keys:
+
+        *cost* is the function to be optimized.  If *parallel_cost* exists,
+        it should accept a list of points, not just a single point on each
+        evaluation.
+
+        *n* is the problem dimension
+
+        *x0* is the initial point
+
+        *x1* and *x2* are lower and upper bounds for each parameter
+
+        *monitor* is a callable which is called each iteration using
+        *callback(step, x, fx, k)*, where *step* is the iteration number,
+        *x* is the population, *fx* is value of the cost function for each
+        member of the population and *k* is the index of the best point in
+        the population.
+
+        *f_opt* is the target value of the optimization
+
+    *NP* is the number of fit parameters
+
+    *CR* is the cross-over ratio, which is the proportion of dimensions
+    that participate in any random orientation vector.
+
+    *epsilon* is the convergence criterion.
+
+    *abort_test* is a callable which indicates whether an external processes
+    requests the fit to stop.
+
+    *maxiter* is the maximum number of generations
+
+    Returns success, num_evals, f(x_best), x_best.
+    """
+    if 'parallel_cost' in cfo:
+        mapper = lambda v: asarray(cfo['parallel_cost'](v.T), 'd')
+    else:
+        mapper = lambda v: asarray(list(map(cfo['cost'], v.T)), 'd')
+    monitor = cfo.get('monitor', print_every_five)
+
+    n = cfo['n']
+
+    X = rand(n, NP)            # will hold original vectors
+
+    # CREATE FIRST GENERATION WITH LEGAL PARAMETER VALUES AND EVALUATE COSTS
+    # m th member of the population
+    for m in range(0, NP):
+        X[:, m] = cfo['x1'] + (cfo['x2'] - cfo['x1']) * X[:, m]
+    if 'x0' in cfo:
+        X[:, 0] = cfo['x0']
+    f = mapper(X)
+
+    n_feval = NP
+    f_best, i_best = min(zip(f, count()))
+
+    # CHECK INITIAL STOPPING CRITERIA
+    if abs(cfo['f_opt'] - f_best) < epsilon:
+        satisfied_sc = 1
+        x_best = X[:, i_best]
+        return satisfied_sc, n_feval, f_best, x_best
+
+    for L in range(1, maxiter + 1):
+
+        # finding destination vector
+        i_Xj = random_integers(0, NP - 2, NP)
+        i_ge = (i_Xj >= arange(0, NP))
+        i_Xj[i_ge] += 1
+
+        # choosing muk
+        muk = 0.01 + 0.49 * rand(NP)
+        inx = rand(NP) < 0.5
+        muk[inx] = -muk[inx]
+
+        # find xk and fk s
+        Xi = X
+        Xj = X[:, i_Xj]
+        P = Xj - Xi
+        Xk = Xi + (ones((n, 1)) * muk) * P
+        fk = mapper(Xk)
+        n_feval = n_feval + NP
+
+        # find quadratic models
+        if any(muk == 0) or any(muk == 1):
+            satisfied_sc = 0
+            x_best = X[:, i_best]
+            print('muk cannot be zero or one !!!')
+            return satisfied_sc, n_feval, f_best, x_best
+
+        fi = f
+        fj = f[i_Xj]
+        b = (muk/(muk-1))*fj - ((muk+1)/muk)*fi - (1/(muk*(muk-1)))*fk
+        a = fj - fi - b
+
+        crossovers = []
+        for k in range(0, NP):
+            if (abs(a[k]) < 1e-30
+                or (a[k] < 0 and fk[k] > fi[k] and fk[k] > fj[k])
+                or not isfinite(a[k])):
+                # xi survives
+                continue
+            else:
+                # xi may not survive
+                mustar = -b[k] / (2 * a[k])
+                xstar = Xi[:, k] + mustar * P[:, k]
+
+                # choosing random numbers for crossover
+                rn = rand(n)
+                indi = (rn < 0.5 * (1 - CR))
+                indj = (rn > 0.5 * (1 + CR))
+                xstar[indi] = Xi[indi, k]
+                xstar[indj] = Xj[indj, k]
+
+                # map into feasible set
+                inx = xstar < cfo['x1']
+                xstar[inx] = cfo['x1'][inx]
+                inx = xstar > cfo['x2']
+                xstar[inx] = cfo['x2'][inx]
+
+                crossovers.append((k, xstar))
+
+        if len(crossovers) > 0:
+            idx, xstar = [asarray(v) for v in zip(*crossovers)]
+            fstar = mapper(xstar.T)
+            n_feval += len(crossovers)
+
+            # xi does not survive, xstar replaces it
+            update = fstar < fi[idx]
+            f[idx[update]] = fstar[update]
+            X[:, idx[update]] = xstar[update, :].T
+
+        # CHECKING STOPPING CRITERIA
+        f_best, i_best = min(zip(f, count()))
+        if abs(cfo['f_opt'] - f_best) < epsilon:
+            satisfied_sc = 1
+            x_best = X[:, i_best]
+            return satisfied_sc, n_feval, f_best, x_best
+        if abort_test():
+            break
+
+        monitor(L, X, f, i_best)
+
+    return 1, n_feval, f_best, X[:, i_best]
+
+
+def particle_swarm(cfo, NP, epsilon=1e-10, maxiter=1000):
+    """
+    Particle swarm is a population based optimizer which uses force and
+    momentum to select candidate points.
+
+    *cfo* is the cost function object.  This is a dictionary which contains
+    the following keys:
+
+        *cost* is the function to be optimized.  If *parallel_cost* exists,
+        it should accept a list of points, not just a single point on each
+        evaluation.
+
+        *n* is the problem dimension
+
+        *x0* is the initial point
+
+        *x1* and *x2* are lower and upper bounds for each parameter
+
+        *monitor* is a callable which is called each iteration using
+        *callback(step, x, fx, k)*, where *step* is the iteration number,
+        *x* is the population, *fx* is value of the cost function for each
+        member of the population and *k* is the index of the best point in
+        the population.
+
+        *f_opt* is the target value of the optimization
+
+    *NP* is the number of fit parameters
+
+    *epsilon* is the convergence criterion.
+
+    *abort_test* is a callable which indicates whether an external processes
+    requests the fit to stop.
+
+    *maxiter* is the maximum number of generations
+
+    Returns success, num_evals, f(x_best), x_best.
+    """
+
+    if 'parallel_cost' in cfo:
+        mapper = lambda v: asarray(cfo['parallel_cost'](v.T), 'd')
+    else:
+        mapper = lambda v: asarray(list(map(cfo['cost'], v.T)), 'd')
+    monitor = cfo.get('monitor', print_every_five)
+
+    n = cfo['n']
+    c1 = 2.8
+    c2 = 1.3
+    phi = c1 + c2
+    K = 2 / abs(2 - phi - sqrt(phi * phi - 4 * phi))
+
+    X = rand(n, NP)            # will hold original vectors
+    V = zeros((n, NP))
+
+    # CREATE FIRST GENERATION WITH LEGAL PARAMETER VALUES AND EVALUATE COSTS
+    rn1 = rand(n, NP)
+    # m th member of the population
+    for m in range(0, NP):
+        extend = cfo['x2'] - cfo['x1']
+        X[:, m] = cfo['x1'] + extend * X[:, m]
+        V[:, m] = 2 * rn1[:, m] * extend - extend
+
+    if 'x0' in cfo:
+        X[:, 0] = cfo['x0']
+    f = mapper(X)
+
+    n_feval = NP
+    P = X[:]
+
+    f_best, i_best = min(zip(f, count()))
+    for L in range(2, maxiter + 1):
+
+        rn2 = rand(n, NP)
+        for i in range(0, NP):
+            #r = rand(2)
+            r = rn2[:, i]
+            V[:, i] = V[:, i] + r[0] * c1 * \
+                (P[:, i] - X[:, i]) + r[1] * c2 * (P[:, i_best] - X[:, i])
+            V[:, i] = K * V[:, i]
+
+            X[:, i] = X[:, i] + V[:, i]
+
+        f_temp = mapper(X)
+        idx = f_temp < f
+        f[idx] = f_temp[idx]
+        P[:, idx] = X[:, idx]
+
+        n_feval = n_feval + NP
+
+        # CHECKING STOPPING CRITERIA
+        f_best, i_best = min(zip(f, count()))
+        if abs(cfo['f_opt'] - f_best) < epsilon:
+            satisfied_sc = 1
+            return satisfied_sc, n_feval, f_best, X[:, i_best]
+
+        monitor(L, X, f, i_best)
+
+    satisfied_sc = 0
+    return satisfied_sc, n_feval, f_best, X[:, i_best]
+
+
+def example_call(optimizer=random_lines):
+    from numpy.random import seed
+    seed(1)
+    cost = lambda x: x[0] ** 2 + x[1] ** 2
+    n = 2
+    x1 = -5 * ones(n)
+    x2 = 5 * ones(n)
+    f_opt = 0
+    cfo = {'cost': cost, 'n': n, 'x1': x1, 'x2': x2, 'f_opt': f_opt}
+
+    NP = 10 * n
+    satisfied_sc, n_feval, f_best, x_best = optimizer(cfo, NP)
+    print(satisfied_sc, "n:%d" % n_feval, f_best, x_best)
+
+
+def main():
+    print("=== Random Lines")
+    example_call(random_lines)
+    print("=== Particle Swarm")
+    example_call(particle_swarm)
+
+if __name__ == "__main__":
+    main()
diff --git a/bumps/simplex.py b/bumps/simplex.py
new file mode 100644
index 0000000..0824f47
--- /dev/null
+++ b/bumps/simplex.py
@@ -0,0 +1,352 @@
+#__docformat__ = "restructuredtext en"
+# ******NOTICE***************
+# from optimize.py module by Travis E. Oliphant
+#
+# You may copy and use this module as you see fit with no
+# guarantee implied provided you keep this notice in all copies.
+# *****END NOTICE************
+#
+# Modified by Paul Kienzle to support bounded minimization
+"""
+Downhill simplex optimizer.
+"""
+
+from __future__ import print_function
+
+__all__ = ['simplex']
+__docformat__ = "restructuredtext en"
+__version__ = "0.7"
+
+import numpy as np
+
+
+
+def wrap_function(function, bounds):
+    ncalls = [0]
+    if bounds is not None:
+        lo, hi = [np.asarray(v) for v in bounds]
+
+        def function_wrapper(x):
+            ncalls[0] += 1
+            if np.any((x < lo) | (x > hi)):
+                return np.inf
+            else:
+                # function(x)
+                return function(x)
+    else:
+        def function_wrapper(x):
+            ncalls[0] += 1
+            return function(x)
+    return ncalls, function_wrapper
+
+
+class Result:
+
+    """
+    Results from the fit.
+
+    x : ndarray
+        Best parameter set
+    fx : float
+        Best value
+    iters : int
+        Number of iterations
+    calls : int
+        Number of function calls
+    status : boolean
+        True if the fit completed successful, false if terminated early
+        because of too many iterations.
+    """
+
+    def __init__(self, x, fx, iters, calls, status):
+        self.x, self.fx, self.iters, self.calls = x, fx, iters, calls
+        self.status = status
+
+    def __str__(self):
+        msg = "Converged" if self.status else "Aborted"
+        return ("%s with %g at %s after %d calls"
+                % (msg, self.fx, self.x, self.calls))
+
+
+def dont_abort():
+    return False
+
+
+def simplex(f, x0=None, bounds=None, radius=0.05,
+            xtol=1e-4, ftol=1e-4, maxiter=None,
+            update_handler=None, abort_test=dont_abort):
+    """
+    Minimize a function using Nelder-Mead downhill simplex algorithm.
+
+    This optimizer is also known as Amoeba (from Numerical Recipes) and
+    the Nealder-Mead simplex algorithm.  This is not the simplex algorithm
+    for solving constrained linear systems.
+
+    Downhill simplex is a robust derivative free algorithm for finding
+    minima.  It proceeds by choosing a set of points (the simplex) forming
+    an n-dimensional triangle, and transforming that triangle so that the
+    worst vertex is improved, either by stretching, shrinking or reflecting
+    it about the center of the triangle.  This algorithm is not known for
+    its speed, but for its simplicity and robustness, and is a good algorithm
+    to start your problem with.
+
+    *Parameters*:
+
+        f : callable f(x,*args)
+            The objective function to be minimized.
+        x0 : ndarray
+            Initial guess.
+        bounds : (ndarray,ndarray) or None
+            Bounds on the parameter values for the function.
+        radius: float
+            Size of the initial simplex.  For bounded parameters (those
+            which have finite lower and upper bounds), radius is clipped
+            to a value in (0,0.5] representing the portion of the
+            range to use as the size of the initial simplex.
+
+    *Returns*: Result (`park.simplex.Result`)
+
+        x : ndarray
+            Parameter that minimizes function.
+        fx : float
+            Value of function at minimum: ``fopt = func(xopt)``.
+        iters : int
+            Number of iterations performed.
+        calls : int
+            Number of function calls made.
+        success : boolean
+            True if fit completed successfully.
+
+    *Other Parameters*:
+
+        xtol : float
+            Relative error in xopt acceptable for convergence.
+        ftol : number
+            Relative error in func(xopt) acceptable for convergence.
+        maxiter : int=200*N
+            Maximum number of iterations to perform.  Defaults
+        update_handler : callable
+            Called after each iteration, as callback(k,n,xk,fxk),
+            where k is the current iteration, n is the maximum
+            iteration, xk is the simplex and fxk is the value of
+            the simplex vertices.  xk[0],fxk[0] is the current best.
+        abort_test : callable
+            Called after each iteration, as callback(), to see if
+            an external process has requested stop.
+
+    *Notes*
+
+        Uses a Nelder-Mead simplex algorithm to find the minimum of
+        function of one or more variables.
+
+    """
+    fcalls, func = wrap_function(f, bounds)
+    x0 = np.asfarray(x0).flatten()
+    # print "x0",x0
+    N = len(x0)
+    rank = len(x0.shape)
+    if not -1 < rank < 2:
+        raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
+
+    if maxiter is None:
+        maxiter = N * 200
+
+    rho = 1
+    chi = 2
+    psi = 0.5
+    sigma = 0.5
+
+    if rank == 0:
+        sim = np.zeros((N + 1,), dtype=x0.dtype)
+    else:
+        sim = np.zeros((N + 1, N), dtype=x0.dtype)
+    fsim = np.zeros((N + 1,), float)
+    sim[0] = x0
+    fsim[0] = func(x0)
+
+    # Metropolitan simplex: simplex has vertices at x0 and at
+    # x0 + j*radius for each unit vector j.  Radius is a percentage
+    # change from the initial value, or just the radius if the initial
+    # value is 0.  For bounded problems, the radius is a percentage of
+    # the bounded range in dimension j.
+    val = x0 * (1 + radius)
+    val[val == 0] = radius
+    if bounds is not None:
+        radius = np.clip(radius, 0, 0.5)
+        lo, hi = [np.asarray(v) for v in bounds]
+
+        # Keep the initial simplex inside the bounds
+        x0 = np.select([x0 < lo, x0 > hi], [lo, hi], x0)
+        bounded = ~np.isinf(lo) & ~np.isinf(hi)
+        val[bounded] = x0[bounded] + (hi[bounded] - lo[bounded]) * radius
+        val = np.select([val < lo, val > hi], [lo, hi], val)
+
+        # If the initial point was at or beyond an upper bound, then bounds
+        # projection will put x0 and x0+j*radius at the same point.  We
+        # need to detect these collisions and reverse the radius step
+        # direction when such collisions occur.  The only time the collision
+        # can occur at the lower bound is when upper and lower bound are
+        # identical.  In that case, we are already done.
+        collision = val == x0
+        if np.any(collision):
+            reverse = x0 * (1 - radius)
+            reverse[reverse == 0] = -radius
+            reverse[bounded] = x0[bounded] - \
+                (hi[bounded] - lo[bounded]) * radius
+            val[collision] = reverse[collision]
+
+        # Make tolerance relative for bounded parameters
+        tol = np.ones(x0.shape) * xtol
+        tol[bounded] = (hi[bounded] - lo[bounded]) * xtol
+        xtol = tol
+
+    # Compute values at the simplex vertices
+    for k in range(0, N):
+        y = x0 + 0
+        y[k] = val[k]
+        sim[k + 1] = y
+        fsim[k + 1] = func(y)
+
+    # print sim
+    ind = np.argsort(fsim)
+    fsim = np.take(fsim, ind, 0)
+    # sort so sim[0,:] has the lowest function value
+    sim = np.take(sim, ind, 0)
+    # print sim
+
+    iterations = 1
+    while iterations < maxiter:
+        if np.all(abs(sim[1:] - sim[0]) <= xtol) \
+                and max(abs(fsim[0] - fsim[1:])) <= ftol:
+            # print abs(sim[1:]-sim[0])
+            break
+
+        xbar = np.sum(sim[:-1], 0) / N
+        xr = (1 + rho) * xbar - rho * sim[-1]
+        # print "xbar" ,xbar,rho,sim[-1],N
+        # break
+        fxr = func(xr)
+        doshrink = 0
+
+        if fxr < fsim[0]:
+            xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
+            fxe = func(xe)
+
+            if fxe < fxr:
+                sim[-1] = xe
+                fsim[-1] = fxe
+            else:
+                sim[-1] = xr
+                fsim[-1] = fxr
+        else:  # fsim[0] <= fxr
+            if fxr < fsim[-2]:
+                sim[-1] = xr
+                fsim[-1] = fxr
+            else:  # fxr >= fsim[-2]
+                # Perform contraction
+                if fxr < fsim[-1]:
+                    xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
+                    fxc = func(xc)
+
+                    if fxc <= fxr:
+                        sim[-1] = xc
+                        fsim[-1] = fxc
+                    else:
+                        doshrink = 1
+                else:
+                    # Perform an inside contraction
+                    xcc = (1 - psi) * xbar + psi * sim[-1]
+                    fxcc = func(xcc)
+
+                    if fxcc < fsim[-1]:
+                        sim[-1] = xcc
+                        fsim[-1] = fxcc
+                    else:
+                        doshrink = 1
+
+                if doshrink:
+                    for j in range(1, N + 1):
+                        sim[j] = sim[0] + sigma * (sim[j] - sim[0])
+                        fsim[j] = func(sim[j])
+
+        ind = np.argsort(fsim)
+        sim = np.take(sim, ind, 0)
+        fsim = np.take(fsim, ind, 0)
+        if update_handler is not None:
+            update_handler(iterations, maxiter, sim, fsim)
+        iterations += 1
+        if abort_test():
+            break  # STOPHERE
+
+    status = 0 if iterations < maxiter else 1
+    res = Result(sim[0], fsim[0], iterations, fcalls[0], status)
+    res.next_start = sim[np.random.randint(N)]
+    return res
+
+
+def main():
+    import time
+
+    def rosen(x):  # The Rosenbrock function
+        return np.sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0, axis=0)
+
+    x0 = [0.8, 1.2, 0.7]
+    print("Nelder-Mead Simplex")
+    print("===================")
+    start = time.time()
+    x = simplex(rosen, x0)
+    print(x)
+    print("Time:", time.time() - start)
+
+    x0 = [0] * 3
+    print("Nelder-Mead Simplex")
+    print("===================")
+    print("starting at zero")
+    start = time.time()
+    x = simplex(rosen, x0)
+    print(x)
+    print("Time:", time.time() - start)
+
+    x0 = [0.8, 1.2, 0.7]
+    lo, hi = [0] * 3, [1] * 3
+    print("Bounded Nelder-Mead Simplex")
+    print("===========================")
+    start = time.time()
+    x = simplex(rosen, x0, bounds=(lo, hi))
+    print(x)
+    print("Time:", time.time() - start)
+
+    x0 = [0.8, 1.2, 0.7]
+    lo, hi = [0.999] * 3, [1.001] * 3
+    print("Bounded Nelder-Mead Simplex")
+    print("===========================")
+    print("tight bounds")
+    print("simplex is smaller than 1e-7 in every dimension, but you can't")
+    print("see this without uncommenting the print statement simplex function")
+    start = time.time()
+    x = simplex(rosen, x0, bounds=(lo, hi), xtol=1e-4)
+    print(x)
+    print("Time:", time.time() - start)
+
+    x0 = [0] * 3
+    hi, lo = [-0.999] * 3, [-1.001] * 3
+    print("Bounded Nelder-Mead Simplex")
+    print("===========================")
+    print("tight bounds, x0=0 outside bounds from above")
+    start = time.time()
+    x = simplex(lambda x: rosen(-x), x0, bounds=(lo, hi), xtol=1e-4)
+    print(x)
+    print("Time:", time.time() - start)
+
+    x0 = [0.8, 1.2, 0.7]
+    lo, hi = [-np.inf] * 3, [np.inf] * 3
+    print("Bounded Nelder-Mead Simplex")
+    print("===========================")
+    print("infinite bounds")
+    start = time.time()
+    x = simplex(rosen, x0, bounds=(lo, hi), xtol=1e-4)
+    print(x)
+    print("Time:", time.time() - start)
+
+if __name__ == "__main__":
+    main()
diff --git a/bumps/util.py b/bumps/util.py
new file mode 100644
index 0000000..526aea0
--- /dev/null
+++ b/bumps/util.py
@@ -0,0 +1,290 @@
+"""
+Miscellaneous utility functions.
+"""
+from __future__ import division
+
+__all__ = ["erf", "kbhit", "profile",
+           "pushdir", "push_seed", "redirect_console"]
+
+import sys
+import os
+try:  # CRUFT: python 2.x
+    from cStringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+import numpy as np
+from numpy import ascontiguousarray as _dense
+from scipy.special import erf
+
+
+def parse_errfile(errfile):
+    """
+    Parse dream statistics from a particular fit.
+
+    Returns overall chisq, list of chisq for individual models and
+    a parameter dictionary with attributes for number, name, mean, median,
+    p68 for 68% credible interval and p95 for 95% credible interval.
+
+    The parameter dictionary is keyed by parameter name.
+
+    Usually there is only one errfile in a directory, which can be
+    retrieved using::
+
+        import glob
+        errfile = glob.glob(path+'/*.err')[0]
+    """
+    from .dream.stats import parse_var
+    pars = []
+    chisq = []
+    overall = None
+    with open(errfile) as fid:
+        for line in fid:
+            if line.startswith("[overall"):
+                overall = float(line.split()[1][6:-1])
+                continue
+
+            if line.startswith("[chisq"):
+                chisq.append(float(line.split()[0][7:-1]))
+                continue
+
+            p = parse_var(line)
+            if p is not None:
+                pars.append(p)
+
+    if overall is None:
+        overall = chisq[0]
+    pardict = dict((p.name, p) for p in pars)
+    return overall, chisq, pardict
+
+
+def _c_erf(x):
+    """
+    Error function calculator.
+    """
+    from ._reduction import _erf
+    input = _dense(x, 'd')
+    output = np.empty_like(input)
+    _erf(input, output)
+    return output
+
+
+def _erf_test():
+    assert erf(5) == 2
+    assert erf(0.) == 0.
+    assert (erf(np.array([0., 0.])) == 0.).all()
+    assert abs(erf(3.) - 0.99997790950300136) < 1e-14
+
+
+def profile(fn, *args, **kw):
+    """
+    Profile a function called with the given arguments.
+    """
+    import cProfile
+    import pstats
+
+    result = [None]
+    def call():
+        result[0] = fn(*args, **kw)
+    datafile = 'profile.out'
+    cProfile.runctx('call()', dict(call=call), {}, datafile)
+    stats = pstats.Stats(datafile)
+    # order='calls'
+    order = 'cumulative'
+    # order='pcalls'
+    # order='time'
+    stats.sort_stats(order)
+    stats.print_stats()
+    os.unlink(datafile)
+    return result[0]
+
+
+def kbhit():
+    """
+    Check whether a key has been pressed on the console.
+    """
+    try:  # Windows
+        import msvcrt
+        return msvcrt.kbhit()
+    except ImportError:  # Unix
+        import select
+        i, _, _ = select.select([sys.stdin], [], [], 0.0001)
+        return sys.stdin in i
+
+
+class redirect_console(object):
+    """
+    Console output redirection context
+
+    The output can be redirected to a string, to an already opened file
+    (anything with a *write* attribute), or to a filename which will be
+    opened for the duration of the with context.  Unless *stderr* is
+    specified, then both standard output and standard error are
+    redirected to the same file.  The open file handle is returned on
+    enter, and (if it was not an already opened file) it is closed on exit.
+
+    If no file is specified, then output is redirected to a StringIO
+    object, which has a getvalue() method which can retrieve the string.
+    The StringIO object is deleted when the context ends, so be sure to
+    retrieve its value within the redirect_console context.
+
+    :Example:
+
+    Show that output is captured in a file:
+
+        >>> from bumps.util import redirect_console
+        >>> print("hello")
+        hello
+        >>> with redirect_console("redirect_out.log"):
+        ...     print("captured")
+        >>> print("hello")
+        hello
+        >>> print(open("redirect_out.log").read()[:-1])
+        captured
+        >>> import os; os.unlink("redirect_out.log")
+
+    Output can also be captured to a string:
+
+        >>> with redirect_console() as fid:
+        ...    print("captured to string")
+        ...    captured_string = fid.getvalue()
+        >>> print(captured_string.strip())
+        captured to string
+
+    """
+    def __init__(self, stdout=None, stderr=None):
+        self.open_files = []
+        self.sys_stdout = []
+        self.sys_stderr = []
+
+        if stdout is None:
+            self.open_files.append(StringIO())
+            self.stdout = self.open_files[-1]
+        elif hasattr(stdout, 'write'):
+            self.stdout = stdout
+        else:
+            self.open_files.append(open(stdout, 'w'))
+            self.stdout = self.open_files[-1]
+
+        if stderr is None:
+            self.stderr = self.stdout
+        elif hasattr(stderr, 'write'):
+            self.stderr = stderr
+        else:
+            self.open_files.append(open(stderr, 'w'))
+            self.stderr = self.open_files[-1]
+
+    def __del__(self):
+        for f in self.open_files:
+            f.close()
+
+    def __enter__(self):
+        self.sys_stdout.append(sys.stdout)
+        self.sys_stderr.append(sys.stderr)
+        sys.stdout = self.stdout
+        sys.stderr = self.stderr
+        return self.open_files[-1]
+
+    def __exit__(self, *args):
+        sys.stdout = self.sys_stdout[-1]
+        sys.stderr = self.sys_stderr[-1]
+        del self.sys_stdout[-1]
+        del self.sys_stderr[-1]
+        return False
+
+
+class pushdir(object):
+    """
+    Change directories for the duration of a with statement.
+
+    :Example:
+
+    Show that the original directory is restored::
+
+        >>> import sys, os
+        >>> original_wd = os.getcwd()
+        >>> with pushdir(sys.path[0]):
+        ...     pushed_wd = os.getcwd()
+        ...     first_site = os.path.abspath(sys.path[0])
+        ...     assert pushed_wd == first_site
+        >>> restored_wd = os.getcwd()
+        >>> assert original_wd == restored_wd
+    """
+    def __init__(self, path):
+        self.path = os.path.abspath(path)
+
+    def __enter__(self):
+        self._cwd = os.getcwd()
+        os.chdir(self.path)
+
+    def __exit__(self, *args):
+        os.chdir(self._cwd)
+
+
+class push_seed(object):
+    """
+    Set the seed value for the random number generator.
+
+    When used in a with statement, the random number generator state is
+    restored after the with statement is complete.
+
+    :Parameters:
+
+    *seed* : int or array_like, optional
+        Seed for RandomState
+
+    :Example:
+
+    Seed can be used directly to set the seed::
+
+        >>> from numpy.random import randint
+        >>> push_seed(24)
+        <...push_seed object at...>
+        >>> print(randint(0,1000000,3))
+        [242082    899 211136]
+
+    Seed can also be used in a with statement, which sets the random
+    number generator state for the enclosed computations and restores
+    it to the previous state on completion::
+
+        >>> with push_seed(24):
+        ...    print(randint(0,1000000,3))
+        [242082    899 211136]
+
+    Using nested contexts, we can demonstrate that state is indeed
+    restored after the block completes::
+
+        >>> with push_seed(24):
+        ...    print(randint(0,1000000))
+        ...    with push_seed(24):
+        ...        print(randint(0,1000000,3))
+        ...    print(randint(0,1000000))
+        242082
+        [242082    899 211136]
+        899
+
+    The restore step is protected against exceptions in the block::
+
+        >>> with push_seed(24):
+        ...    print(randint(0,1000000))
+        ...    try:
+        ...        with push_seed(24):
+        ...            print(randint(0,1000000,3))
+        ...            raise Exception()
+        ...    except Exception:
+        ...        print("Exception raised")
+        ...    print(randint(0,1000000))
+        242082
+        [242082    899 211136]
+        Exception raised
+        899
+    """
+    def __init__(self, seed=None):
+        self._state = np.random.get_state()
+        np.random.seed(seed)
+
+    def __enter__(self):
+        return None
+
+    def __exit__(self, *args):
+        np.random.set_state(self._state)
diff --git a/bumps/wsolve.py b/bumps/wsolve.py
new file mode 100644
index 0000000..6633675
--- /dev/null
+++ b/bumps/wsolve.py
@@ -0,0 +1,452 @@
+r"""
+Weighted linear and polynomial solver with uncertainty.
+
+Given $A \bar x = \bar y \pm \delta \bar y$, solve using *s = wsolve(A,y,dy)*
+
+*wsolve* uses the singular value decomposition for increased accuracy.
+
+The uncertainty in the solution is estimated from the scatter in the data.
+Estimates the uncertainty for the solution from the scatter in the data.
+
+The returned model object *s* provides:
+
+    ======== ============================================
+    ======== ============================================
+    s.x      solution
+    s.std    uncertainty estimate assuming no correlation
+    s.rnorm  residual norm
+    s.DoF    degrees of freedom
+    s.cov    covariance matrix
+    s.ci(p)  confidence intervals at point p
+    s.pi(p)  prediction intervals at point p
+    s(p)     predicted value at point p
+    ======== ============================================
+
+Example
+=======
+
+Weighted system::
+
+    >>> import numpy as np
+    >>> from bumps import wsolve
+    >>> A = np.matrix("1,2,3;2,1,3;1,1,1",'d').A
+    >>> dy = [0.2,0.01,0.1]
+    >>> y = [ 14.16, 13.01, 6.15]
+    >>> s = wsolve.wsolve(A,y,dy)
+    >>> print(", ".join("%0.2f +/- %0.2f"%(a,b) for a,b in zip(s.x,s.std)))
+    1.05 +/- 0.17, 2.20 +/- 0.12, 2.91 +/- 0.12
+
+
+Note there is a counter-intuitive result that scaling the estimated
+uncertainty in the data does not affect the computed uncertainty in
+the fit.  This is the correct result --- if the data were indeed
+selected from a process with ten times the uncertainty, you would
+expect the scatter in the data to increase by a factor of ten as
+well.  When this new data set is fitted, it will show a computed
+uncertainty increased by the same factor.  Monte carlo simulations
+bear this out.  The conclusion is that the dataset carries its own
+information about the variance in the data, and the weight vector
+serves only to provide relative weighting between the points.
+"""
+
+__all__ = ['wsolve', 'wpolyfit', 'LinearModel', 'PolynomialModel']
+
+# FIXME: test second example
+#
+# Example 2: weighted overdetermined system  y = x1 + 2*x2 + 3*x3 + e
+#
+#    A = fullfact([3,3,3]); xin=[1;2;3];
+#    y = A*xin; dy = rand(size(y))/50; y+=dy.*randn(size(y));
+#    [x,s] = wsolve(A,y,dy);
+#    dx = s.normr*sqrt(sumsq(inv(s.R'))'/s.df);
+#    res = [xin, x, dx]
+
+
+import numpy as np
+
+
+class LinearModel(object):
+    r"""
+    Model evaluator for linear solution to $Ax = y$.
+
+    Use *s(A)* to compute the predicted value of the linear model *s*
+    at points given on the rows of $A$.
+
+    Computes a confidence interval (range of likely values for the
+    mean at $x$) or a prediction interval (range of likely values
+    seen when measuring at $x$).  The prediction interval gives
+    the width of the distribution at $x$.  This should be the same
+    regardless of the number of measurements you have for the value
+    at $x$.  The confidence interval gives the uncertainty in the
+    mean at $x$.  It should get smaller as you increase the number of
+    measurements.  Error bars in the physical sciences usually show
+    a $1-\alpha$ confidence value of $\text{erfc}(1/\sqrt{2})$, representing
+    a $1-\sigma$ standand deviation of uncertainty in the mean.
+
+    Confidence intervals for the expected value of the linear system
+    evaluated at a new point $w$ are given by the $t$ distribution for
+    the selected interval $1-\alpha$, the solution $x$, and the number
+    of degrees of freedom $n-p$:
+
+    .. math::
+
+        w^T x \pm t^{\alpha/2}_{n-p} \sqrt{ \text{var}(w) }
+
+    where the variance $\text{var}(w)$ is given by:
+
+    .. math::
+
+        \text{var}(w) = \sigma^2 (w^T (A^TA)^{-1} w)
+
+    Prediction intervals are similar, except the variance term increases to
+    include both the uncertainty in the predicted value and the variance in
+    the data:
+
+    .. math::
+
+        \text{var}(w) = \sigma^2 (1 + w^T (A^TA)^{-1} w)
+    """
+    def __init__(self, x=None, DoF=None, SVinv=None, rnorm=None):
+        # Note: SVinv should be computed from S,V where USV' = A
+        #: solution to the equation $Ax = y$
+        self.x = x
+        #: number of degrees of freedom in the solution
+        self.DoF = DoF
+        #: 2-norm of the residuals $||y-Ax||_2$
+        self.rnorm = rnorm
+        self._SVinv = SVinv
+
+    def __call__(self, A):
+        """
+        Return the prediction for a linear system at points in the rows of A.
+        """
+        return np.dot(np.asarray(A), self.x)
+
+    # covariance matrix invC = A'A  = (USV')'USV' = VSU'USV' = VSSV'
+    # C = inv(A'A) = inv(VSSV') = inv(V')inv(SS)inv(V) = Vinv(SS)V'
+    # diag(inv(A'A)) is sum of the squares of the columns inv(S) V'
+    # and is also the sum of the squares of the rows of V inv(S)
+    @property
+    def cov(self):
+        """covariance matrix [inv(A'A); O(n^3)]"""
+        # FIXME: don't know if we need to scale by C, but it will
+        # at least make things consistent
+        C = self.rnorm**2/self.DoF if self.DoF > 0 else 1
+        return C * np.dot(self._SVinv, self._SVinv.T)
+
+    @property
+    def var(self):
+        """solution variance [diag(cov); O(n^2)]"""
+        C = self.rnorm**2/self.DoF if self.DoF > 0 else 1
+        return C * np.sum(self._SVinv**2, axis=1)
+
+    @property
+    def std(self):
+        """solution standard deviation [sqrt(var); O(n^2)]"""
+        return np.sqrt(self.var)
+
+    @property
+    def p(self):
+        """p-value probability of rejection"""
+        from scipy.stats import chi2  # lazy import in case scipy not present
+        return chi2.sf(self.rnorm ** 2, self.DoF)
+
+    def _interval(self, X, alpha, pred):
+        """
+        Helper for computing prediction/confidence intervals.
+        """
+        # Comments from QR decomposition solution to Ax = y:
+        #
+        #   Rather than A'A we have R from the QR decomposition of A, but
+        #   R'R equals A'A.  Note that R is not upper triangular since we
+        #   have already multiplied it by the permutation matrix, but it
+        #   is invertible.  Rather than forming the product R'R which is
+        #   ill-conditioned, we can rewrite x' inv(A'A) x as the equivalent
+        #      x' inv(R) inv(R') x = t t', for t = x' inv(R)
+        #
+        # We have since switched to an SVD solver, which gives us
+        #
+        #    invC = A' A  = (USV')' USV' = VSU' USV' = V S S V'
+        #    C = inv(A'A) = inv(VSSV') = inv(V') inv(S S) inv(V)
+        #      = V inv(S S) V' = V inv(S) inv(S) V'
+        #
+        # Substituting, we get
+        #
+        #    x' inv(A'A) x = t t', for t = x' V inv(S)
+        #
+        # Since x is a vector, t t' is the inner product sum(t**2).
+        # Note that LAPACK allows us to do this simultaneously for many
+        # different x using sqrt(sum(T**2,axis=1)), with T = X' Vinv(S).
+        #
+        # Note: sqrt(F(1-a;1,df)) = T(1-a/2;df)
+        #
+        from scipy.stats import t  # lazy import in case scipy not present
+        y = np.dot(X, self.x).ravel()
+        s = t.ppf(1-alpha/2, self.DoF) * self.rnorm/np.sqrt(self.DoF)
+        t = np.dot(X, self._SVinv)
+        dy = s * np.sqrt(pred + np.sum(t**2, axis=1))
+        return y, dy
+
+    def ci(self, A, sigma=1):
+        r"""
+        Compute the calculated values and the confidence intervals
+        for the linear model evaluated at $A$.
+
+        *sigma=1* corresponds to a $1-\sigma$ confidence interval
+
+        Confidence intervals are sometimes expressed as $1-\alpha$ values,
+        where $\alpha = \text{erfc}(\sigma/\sqrt{2})$.
+        """
+        from scipy.special import erfc  # lazy import in case scipy not present
+        alpha = erfc(sigma / np.sqrt(2))
+        return self._interval(np.asarray(A), alpha, 0)
+
+    def pi(self, A, p=0.05):
+        r"""
+        Compute the calculated values and the prediction intervals
+        for the linear model evaluated at $A$.
+
+        *p=0.05* corresponds to the 95% prediction interval.
+        """
+        return self._interval(np.asarray(A), p, 1)
+
+
+def wsolve(A, y, dy=1, rcond=1e-12):
+    r"""
+    Given a linear system $y = A x + \delta y$, estimates $x$ and $\delta x$.
+
+    *A* is an n x m array of measurement points.
+
+    *y* is an n x k array or vector of length n of measured values at *A*.
+
+    *dy* is a scalar or an n x 1 array of uncertainties in the values at *A*.
+
+    Returns :class:`LinearModel`.
+    """
+    # The ugliness v[:,N.newaxis] transposes a vector
+    # The ugliness N.dot(a,b) is a*b for a,b matrices
+    # The ugliness vh.T.conj() is the hermitian transpose
+
+    # Make sure inputs are arrays
+    A, y, dy = np.asarray(A), np.asarray(y), np.asarray(dy)
+    if dy.ndim == 1:
+        dy = dy[:, np.newaxis]
+    if y.ndim == 1:
+        y = y[:, np.newaxis]
+
+    # Apply weighting if dy is not a scalar
+    # If dy is a scalar, it cancels out of both sides of the equation
+    # Note: with A,dy arrays instead of matrices, A/dy operates element-wise
+    # Since dy is a row vector, this divides each row of A by the corresponding
+    # element of dy.
+    if dy.ndim == 2:
+        A, y = A/dy, y/dy
+
+    # Singular value decomposition: A = U S V.H
+    # Since A is an array, U, S, VH are also arrays
+    # The zero indicates an economy decomposition, with u nxm rathern than nxn
+    u, s, vh = np.linalg.svd(A, 0)
+
+    # FIXME what to do with ill-conditioned systems?
+    #if s[-1]<rcond*s[0]: raise ValueError, "matrix is singular"
+    # s[s<rcond*s[0]] = 0.  # Can't do this because 1/s below will fail
+
+    # Solve: x = V inv(S) U.H y
+    # S diagonal elements => 1/S is inv(S)
+    # A*D, D diagonal multiplies each column of A by the corresponding diagonal
+    # D*A, D diagonal multiplies each row of A by the corresponding diagonal
+    # Computing V*inv(S) is slightly faster than inv(S)*U.H since V is smaller
+    # than U.H.  Similarly, U.H*y is somewhat faster than V*U.H
+    SVinv = vh.T.conj() / s
+    Uy = np.dot(u.T.conj(), y)
+    x = np.dot(SVinv, Uy)
+
+    DoF = y.shape[0] - x.shape[0]
+    rnorm = np.linalg.norm(y - np.dot(A, x))
+
+    return LinearModel(x=x, DoF=DoF, SVinv=SVinv, rnorm=rnorm)
+
+
+def _poly_matrix(x, degree, origin=False):
+    """
+    Generate the matrix A used to fit a polynomial using a linear solver.
+    """
+    if origin:
+        n = np.array(range(degree, 0, -1))
+    else:
+        n = np.array(range(degree, -1, -1))
+    return np.asarray(x)[:, None] ** n[None, :]
+
+
+class PolynomialModel(object):
+    r"""
+    Model evaluator for best fit polynomial $p(x) = y +/- \delta y$.
+
+    Use *p(x)* for PolynomialModel *p* to evaluate the polynomial at all
+    points in the vector *x*.
+    """
+
+    def __init__(self, x, y, dy, s, origin=False):
+        self.x, self.y, self.dy = [np.asarray(v) for v in (x, y, dy)]
+        #: True if polynomial goes through the origin
+        self.origin = origin
+        #: polynomial coefficients
+        self.coeff = np.ravel(s.x)
+        if origin:
+            self.coeff = np.hstack((self.coeff, 0))
+        #: polynomial degree
+        self.degree = len(self.coeff) - 1
+        #: number of degrees of freedom in the solution
+        self.DoF = s.DoF
+        #: 2-norm of the residuals $||y-Ax||_2$
+        self.rnorm = s.rnorm
+        self._conf = s
+
+    @property
+    def cov(self):
+        """
+        covariance matrix
+
+        Note that the ones column will be absent if *origin* is True.
+        """
+        return self._conf.cov
+
+    @property
+    def var(self):
+        """solution variance"""
+        return self._conf.var
+
+    @property
+    def std(self):
+        """solution standard deviation"""
+        return self._conf.std
+
+    @property
+    def p(self):
+        """p-value probability of rejection"""
+        return self._conf.p
+
+    def __call__(self, x):
+        """
+        Evaluate the polynomial at x.
+        """
+        return np.polyval(self.coeff, x)
+
+    def der(self, x):
+        """
+        Evaluate the polynomial derivative at x.
+        """
+        return np.polyval(np.polyder(self.coeff), x)
+
+    def ci(self, x, sigma=1):
+        """
+        Evaluate the polynomial and the confidence intervals at x.
+
+        sigma=1 corresponds to a 1-sigma confidence interval
+        """
+        A = _poly_matrix(x, self.degree, self.origin)
+        return self._conf.ci(A, sigma)
+
+    def pi(self, x, p=0.05):
+        """
+        Evaluate the polynomial and the prediction intervals at x.
+
+        p = 1-alpha = 0.05 corresponds to 95% prediction interval
+        """
+        A = _poly_matrix(x, self.degree, self.origin)
+        return self._conf.pi(A, p)
+
+    def __str__(self):
+        # TODO: better polynomial pretty printing using formatnum
+        return "Polynomial(%s)" % self.coeff
+
+
+    def plot(self, ci=1, pi=0):
+        import pylab
+        x = np.linspace(np.min(self.x), np.max(self.x), 200)
+        y = self.__call__(x)
+        pylab.errorbar(self.x, self.y, self.dy, fmt='b.')
+        pylab.plot(x, y, 'b-', hold=True)
+        if ci > 0:
+            _, cdy = self.ci(x, ci)
+            pylab.plot(x, y + cdy, 'b-.', x, y - cdy, 'b-.', hold=True)
+        if pi > 0:
+            py, pdy = self.pi(x, pi)
+            pylab.plot(x, y + pdy, 'b-.', x, y - pdy, 'b-.', hold=True)
+
+def wpolyfit(x, y, dy=1, degree=None, origin=False):
+    r"""
+    Return the polynomial of degree $n$ that minimizes $\sum(p(x_i) - y_i)^2/\sigma_i^2$.
+
+    if origin is True, the fit should go through the origin.
+
+    Returns :class:`PolynomialModel`.
+    """
+    assert degree is not None, "Missing degree argument to wpolyfit"
+
+    A = _poly_matrix(x, degree, origin)
+    s = wsolve(A, y, dy)
+    return PolynomialModel(x, y, dy, s, origin=origin)
+
+
+def demo():
+    """
+    Fit a random cubic polynomial.
+    """
+    import pylab
+
+    # Make fake data
+    x = np.linspace(-15, 5, 15)
+    th = np.polyval([.2, 3, 1, 5], x)  # polynomial
+    dy = np.sqrt(np.abs(th))        # poisson uncertainty estimate
+    y = np.random.normal(th, dy)    # but normal generator
+
+    # Fit to a polynomial
+    poly = wpolyfit(x, y, dy=dy, degree=3)
+    poly.plot()
+    pylab.show()
+
+def demo2():
+    import pylab
+    x = [1,2,3]
+    y = [10, 8, 6]
+    dy = [1, 3, 1]
+    poly = wpolyfit(x,y,dy=dy, degree=1)
+    poly.plot()
+    pylab.show()
+
+
+def test():
+    """
+    Check that results are correct for a known problem.
+    """
+    x = np.array([0, 1, 2, 3, 4], 'd')
+    y = np.array([2.5, 7.9, 13.9, 21.1, 44.4], 'd')
+    dy = np.array([1.7, 2.4, 3.6, 4.8, 6.2], 'd')
+    poly = wpolyfit(x, y, dy, 1)
+    px = np.array([1.5], 'd')
+    _, pi = poly.pi(px)  # Same y is returend from pi and ci
+    py, ci = poly.ci(px)
+
+    # Uncomment these to show target values
+    # print "Tp = [%.16g, %.16g]"%(p[0],p[1])
+    # print "Tdp = [%.16g, %.16g]"%(dp[0],dp[1])
+    # print "Tpi,Tci = %.16g, %.16g"%(pi,ci)
+    Tp = np.array([7.787249069840737, 1.503992847461524])
+    Tdp = np.array([1.522338103010216, 2.117633626902384])
+    Tpi, Tci = 7.611128464981324, 2.342860389884832
+
+    perr = np.max(np.abs(poly.coeff - Tp))
+    dperr = np.max(np.abs(poly.std - Tdp))
+    cierr = np.abs(ci - Tci)
+    pierr = np.abs(pi - Tpi)
+    assert perr < 1e-14, "||p-Tp||=%g" % perr
+    assert dperr < 1e-14, "||dp-Tdp||=%g" % dperr
+    assert cierr < 1e-14, "||ci-Tci||=%g" % cierr
+    assert pierr < 1e-14, "||pi-Tpi||=%g" % pierr
+    assert py == poly(px), "direct call to poly function fails"
+
+if __name__ == "__main__":
+#    test()
+#    demo()
+    demo2()
diff --git a/check_examples.py b/check_examples.py
new file mode 100755
index 0000000..4035179
--- /dev/null
+++ b/check_examples.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+import sys
+import os
+
+sys.dont_write_bytecode = True
+
+ROOT = os.path.abspath(os.path.dirname(__file__))
+CLI = "%s %s/bin/bumps %%s %%s" % (sys.executable, ROOT)
+EXAMPLEDIR = os.path.join(ROOT, 'doc', 'examples')
+
+# Add the build dir to the system path
+packages = [ROOT]
+if 'PYTHONPATH' in os.environ:
+    packages.append(os.environ['PYTHONPATH'])
+os.environ['PYTHONPATH'] = os.pathsep.join(packages)
+
+
+class Commands(object):
+
+    @staticmethod
+    def preview(f):
+        return os.system(CLI % (f, '--preview --seed=1'))
+
+    @staticmethod
+    def edit(f):
+        return os.system(CLI % (f, '--edit --seed=1'))
+
+    @staticmethod
+    def chisq(f):
+        return os.system(CLI % (f, '--chisq --seed=1'))
+
+examples = [
+    "peaks/model.py",
+    "curvefit/curve.py",
+    "constraints/model.py",
+    "constraints/gmodel.py",
+    "test_functions/anticor.py",
+    #"test_functions/model.py",
+]
+
+
+def main():
+    if len(sys.argv) == 1 or not hasattr(Commands, sys.argv[1][2:]):
+        print("usage: check_examples.py [--preview|--edit|--chisq]")
+    else:
+        command = getattr(Commands, sys.argv[1][2:])
+        for f in examples:
+            print("\n" + f)
+            if command(os.path.join(EXAMPLEDIR, f)) != 0:
+                # break
+                pass
+
+if __name__ == "__main__":
+    main()
diff --git a/check_fitters.py b/check_fitters.py
new file mode 100755
index 0000000..7394de9
--- /dev/null
+++ b/check_fitters.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+"""
+Run each fitter on the 3 dimensional Rosenbrock function to make sure they
+all converge.
+"""
+from __future__ import print_function
+
+import sys
+import os
+import tempfile
+import shutil
+import glob
+from os.path import join as joinpath, realpath, dirname
+import traceback
+import subprocess
+
+sys.dont_write_bytecode = True
+
+# Ask bumps for a list of available fitters
+ROOT = realpath(dirname(__file__))
+sys.path.insert(0, ROOT)
+from bumps.fitters import FIT_AVAILABLE_IDS
+
+RUNPY = joinpath(ROOT, 'run.py')
+EXAMPLEDIR = joinpath(ROOT, 'doc', 'examples')
+
+def clear_directory(path, recursive=False):
+    """
+    Remove all regular files in a directory.
+
+    If *recursive* is True, removes subdirectories as well.
+
+    This does not remove the directory itself.  Use *shutil.rmtree* if
+    you want to delete the entire tree.
+    """
+    for f in os.listdir(path):
+        target = joinpath(path, f)
+        if not os.path.isdir(target):
+            os.unlink(target)
+        elif recursive:
+            clear_directory(target, recursive)
+            os.rmdir(target)
+
+def run_fit(fit_args, model_args, store, seed=1):
+    command_parts = ([sys.executable, RUNPY] + fit_args + model_args
+                     + ['--store='+store, '--seed=%d'%seed, '--batch'])
+    try:
+        output = subprocess.check_output(command_parts, stderr=subprocess.STDOUT)
+        output = output.strip()
+        if output: print(output.strip())
+    except subprocess.CalledProcessError as exc:
+        output = exc.output.strip()
+        if output: print(output)
+        if "KeyboardInterrupt" in output:
+            raise KeyboardInterrupt()
+        else:
+            raise RuntimeError("fit failed:\n" + " ".join(command_parts))
+
+def check_fit(fitter, store, targets):
+    errfiles = glob.glob(joinpath(store, "*.err"))
+    if not errfiles:
+        raise ValueError("error in %s: no err file created"%fitter)
+    elif len(errfiles) > 1:
+        raise ValueError("error in %s: too many err files created"%fitter)
+    model_index = 0
+    with open(errfiles[0]) as fid:
+        for line in fid:
+            if line.startswith("[chisq="):
+                value = float(line[7:].split("(")[0])
+                assert abs(value-targets[model_index]) < 1e-2, \
+                    "error in %s: expected %.3f but got %.3f" \
+                    % (fitter, targets[model_index], value)
+                model_index += 1
+    assert model_index == len(targets), \
+        "error in %s: not enough models found"%fitter
+
+
+def run_fits(model_args, store, fitters=FIT_AVAILABLE_IDS, seed=1):
+    failed = []
+    for f in fitters:
+        print("====== fitter: %s"%f)
+        try:
+            run_fit(["--fit="+f], model_args, store, seed=seed)
+            check_fit(f, store, [0.0])
+        except Exception as exc:
+            print(exc)
+            failed.append(f)
+        clear_directory(store)
+    return failed
+
+def main():
+    fitters = sys.argv[1:] if len(sys.argv) > 1 else FIT_AVAILABLE_IDS
+    store = tempfile.mkdtemp(prefix="bumps-test-")
+    model = joinpath(EXAMPLEDIR, "test_functions", "model.py")
+    #model_args = [model, '"fk(rosenbrock, 3)"']
+    model_args = [model, 'gauss', '3']
+    seed = 1
+    failed = run_fits(model_args, store, fitters=fitters, seed=seed)
+    shutil.rmtree(store)
+    if failed:
+        print("======")
+        print("Fits failed for: %s"%(", ".join(failed),))
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..f0f04a6
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,139 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+PYTHON        ?= python
+SPHINXOPTS    =
+SPHINXBUILD   = $(PYTHON) -m sphinx.__init__
+#sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp \
+        devhelp epub latex pdf text man changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  clean      to initialize the build area"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  pdf        to make LaTeX files and run them through pdflatex"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	# Cleaning matplotlib plots from build
+	# Cleaning html/pdf from $(BUILDDIR)
+	# Resetting api/ and tutorial/
+	-rm -rf $(BUILDDIR) api tutorial
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/bumpsdocumentation.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/bumpsdocumentation.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/bumpsdocumentation"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/bumpsdocumentation"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make pdf' here to do that automatically)."
+
+pdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through (pdf)latex..."
+	make -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+	cp $(BUILDDIR)/latex/Bumps.pdf $(BUILDDIR)/html
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
diff --git a/doc/_extensions/README.txt b/doc/_extensions/README.txt
new file mode 100644
index 0000000..d60b0c9
--- /dev/null
+++ b/doc/_extensions/README.txt
@@ -0,0 +1,4 @@
+dollarmath   - replace $tex$ with :math:`tex`
+slink        - templated links, with %(var)s by the value of var
+wx_directive - generate annotated screen shots for wx panels in a frame
+png          - helper module for wx_directive; copy of PyPng 0.11
diff --git a/doc/_extensions/dollarmath.py b/doc/_extensions/dollarmath.py
new file mode 100644
index 0000000..63b410b
--- /dev/null
+++ b/doc/_extensions/dollarmath.py
@@ -0,0 +1,47 @@
+# This program is public domain
+# Author: Paul Kienzle
+r"""
+Allow $math$ markup in text and docstrings, ignoring \$.
+
+The $math$ markup should be separated from the surrounding text by spaces.  To
+embed markup within a word, place backslash-space before and after.  For
+convenience, the final $ can be followed by punctuation (period, comma or
+semicolon).
+"""
+
+import re
+_dollar = re.compile(r"(?:^|(?<=\s))[$]([^\n]*?)(?<![\\])[$](?:$|(?=\s|[.,;:?\\()]))")
+_notdollar = re.compile(r"\\[$]")
+
+def replace_dollar(content):
+    content = _dollar.sub(r":math:`\1`",content)
+    content = _notdollar.sub("$", content)
+    return content
+
+def rewrite_rst(app, docname, source):
+    source[0] = replace_dollar(source[0])
+
+def rewrite_autodoc(app, what, name, obj, options, lines):
+    lines[:] = [replace_dollar(L) for L in lines]
+
+def setup(app):
+    app.connect('source-read', rewrite_rst)
+    app.connect('autodoc-process-docstring', rewrite_autodoc)
+
+
+def test_dollar():
+    assert replace_dollar(u"no dollar")==u"no dollar"
+    assert replace_dollar(u"$only$")==u":math:`only`"
+    assert replace_dollar(u"$first$ is good")==u":math:`first` is good"
+    assert replace_dollar(u"so is $last$")==u"so is :math:`last`"
+    assert replace_dollar(u"and $mid$ too")==u"and :math:`mid` too"
+    assert replace_dollar(u"$first$, $mid$, $last$")==u":math:`first`, :math:`mid`, :math:`last`"
+    assert replace_dollar(u"dollar\$ escape")==u"dollar$ escape"
+    assert replace_dollar(u"dollar \$escape\$ too")==u"dollar $escape$ too"
+    assert replace_dollar(u"emb\ $ed$\ ed")==u"emb\ :math:`ed`\ ed"
+    assert replace_dollar(u"$first$a")==u"$first$a"
+    assert replace_dollar(u"a$last$")==u"a$last$"
+    assert replace_dollar(u"a $mid$dle a")==u"a $mid$dle a"
+
+if __name__ == "__main__":
+    test_dollar()
diff --git a/doc/_extensions/png.py b/doc/_extensions/png.py
new file mode 100755
index 0000000..bcea7e0
--- /dev/null
+++ b/doc/_extensions/png.py
@@ -0,0 +1,3445 @@
+#!/usr/bin/env python
+
+# $URL: http://pypng.googlecode.com/svn/trunk/code/png.py $
+# $Rev: 201 $
+
+# png.py - PNG encoder/decoder in pure Python
+#
+# Copyright (C) 2006 Johann C. Rocholl <johann at browsershots.org>
+# Portions Copyright (C) 2009 David Jones <drj at pobox.com>
+# And probably portions Copyright (C) 2006 Nicko van Someren <nicko at nicko.org>
+#
+# Original concept by Johann C. Rocholl.
+#
+# LICENSE (The MIT License)
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+# Changelog (recent first):
+# 2009-03-11 David: interlaced bit depth < 8 (writing).
+# 2009-03-10 David: interlaced bit depth < 8 (reading).
+# 2009-03-04 David: Flat and Boxed pixel formats.
+# 2009-02-26 David: Palette support (writing).
+# 2009-02-23 David: Bit-depths < 8; better PNM support.
+# 2006-06-17 Nicko: Reworked into a class, faster interlacing.
+# 2006-06-17 Johann: Very simple prototype PNG decoder.
+# 2006-06-17 Nicko: Test suite with various image generators.
+# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support.
+# 2006-06-15 Johann: Scanline iterator interface for large input files.
+# 2006-06-09 Johann: Very simple prototype PNG encoder.
+
+# Incorporated into Bangai-O Development Tools by drj on 2009-02-11 from
+# http://trac.browsershots.org/browser/trunk/pypng/lib/png.py?rev=2885
+
+# Incorporated into pypng by drj on 2009-03-12 from
+# //depot/prj/bangaio/master/code/png.py#67
+
+
+"""
+Pure Python PNG Reader/Writer
+
+This Python module implements support for PNG images (see PNG
+specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
+and writes PNG files with all allowable bit depths (1/2/4/8/16/24/32/48/64
+bits per pixel) and colour combinations: greyscale (1/2/4/8/16 bit); RGB,
+RGBA, LA (greyscale with alpha) with 8/16 bits per channel; colour mapped
+images (1/2/4/8 bit).  Adam7 interlacing is supported for reading and
+writing.  A number of optional chunks can be specified (when writing)
+and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
+
+For help, type ``import png; help(png)`` in your python interpreter.
+
+A good place to start is the :class:`Reader` and :class:`Writer` classes.
+
+Requires Python 2.3.  Limited support is available for Python 2.2, but
+not everything works.  Best with Python 2.4 and higher.  Installation is
+trivial, but see the ``README.txt`` file (with the source distribution)
+for details.
+
+This file can also be used as a command-line utility to convert
+`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the reverse conversion from PNG to
+PNM. The interface is similar to that of the ``pnmtopng`` program from
+Netpbm.  Type ``python png.py --help`` at the shell prompt
+for usage and a list of options.
+
+A note on spelling and terminology
+----------------------------------
+
+Generally British English spelling is used in the documentation.  So
+that's "greyscale" and "colour".  This not only matches the author's
+native language, it's also used by the PNG specification.
+
+The major colour models supported by PNG (and hence by PyPNG) are:
+greyscale, RGB, greyscale--alpha, RGB--alpha.  These are sometimes
+referred to using the abbreviations: L, RGB, LA, RGBA.  In this case
+each letter abbreviates a single channel: *L* is for Luminance or Luma or
+Lightness which is the channel used in greyscale images; *R*, *G*, *B* stand
+for Red, Green, Blue, the components of a colour image; *A* stands for
+Alpha, the opacity channel (used for transparency effects, but higher
+values are more opaque, so it makes sense to call it opacity).
+
+A note on formats
+-----------------
+
+When getting pixel data out of this module (reading) and presenting
+data to this module (writing) there are a number of ways the data could
+be represented as a Python value.  Generally this module uses one of
+three formats called "flat row flat pixel", "boxed row flat pixel", and
+"boxed row boxed pixel".  Basically the concern is whether each pixel
+and each row comes in its own little tuple (box), or not.
+
+Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
+has RGB components:
+
+Boxed row flat pixel::
+
+  list([R,G,B, R,G,B, R,G,B],
+       [R,G,B, R,G,B, R,G,B])
+
+Each row appears as its own list, but the pixels are flattened so that
+three values for one pixel simply follow the three values for the previous
+pixel.  This is the most common format used, because it provides a good
+compromise between space and convenience.  PyPNG regards itself as
+at liberty to replace any sequence type with any sufficiently compatible
+other sequence type; in practice each row is an array (from the array
+module), and the outer list is sometimes an iterator rather than an
+explicit list (so that streaming is possible).
+
+Flat row flat pixel::
+
+  [R,G,B, R,G,B, R,G,B,
+   R,G,B, R,G,B, R,G,B]
+
+The entire image is one single giant sequence of colour values.
+Generally an array will be used (to save space), not a list.
+
+Boxed row boxed pixel::
+
+  list([ (R,G,B), (R,G,B), (R,G,B) ],
+       [ (R,G,B), (R,G,B), (R,G,B) ])
+
+Each row appears in its own list, but each pixel also appears in its own
+tuple.  A serious memory burn in Python.
+
+In all cases the top row comes first, and for each row the pixels are
+ordered from left-to-right.  Within a pixel the values appear in the
+order, R-G-B-A (or L-A for greyscale--alpha).
+
+There is a fourth format, mentioned because it is used internally,
+is close to what lies inside a PNG file itself, and has some support
+from the public API.  This format is called packed.  When packed,
+each row is a sequence of bytes (integers from 0 to 255), just as
+it is before PNG scanline filtering is applied.  When the bit depth
+is 8 this is essentially the same as boxed row flat pixel; when the
+bit depth is less than 8, several pixels are packed into each byte;
+when the bit depth is 16 (the only value more than 8 that is supported
+by the PNG image format) each pixel value is decomposed into 2 bytes
+(and `packed` is a misnomer).  This format is used by the
+:meth:`Writer.write_packed` method.  It isn't usually a convenient
+format, but may be just right if the source data for the PNG image
+comes from something that uses a similar format (for example, 1-bit
+BMPs, or another PNG file).
+
+And now, my famous members
+--------------------------
+"""
+
+# http://www.python.org/doc/2.2.3/whatsnew/node5.html
+from __future__ import generators
+
+__version__ = "$URL: http://pypng.googlecode.com/svn/trunk/code/png.py $ $Rev: 201 $"
+
+from array import array
+try: # See :pyver:old
+    import itertools
+except:
+    pass
+import math
+# http://www.python.org/doc/2.4.4/lib/module-operator.html
+import operator
+import struct
+import sys
+import zlib
+# http://www.python.org/doc/2.4.4/lib/module-warnings.html
+import warnings
+
+
+__all__ = ['Reader', 'Writer', 'write_chunks']
+
+
+# The PNG signature.
+# http://www.w3.org/TR/PNG/#5PNG-file-signature
+_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
+
+_adam7 = ((0, 0, 8, 8),
+          (4, 0, 8, 8),
+          (0, 4, 4, 8),
+          (2, 0, 4, 4),
+          (0, 2, 2, 4),
+          (1, 0, 2, 2),
+          (0, 1, 1, 2))
+
+def group(s, n):
+    # See
+    # http://www.python.org/doc/2.6/library/functions.html#zip
+    return zip(*[iter(s)]*n)
+
+def isarray(x):
+    """Same as ``isinstance(x, array)`` except on Python 2.2, where it
+    always returns ``False``.  This helps PyPNG work on Python 2.2.
+    """
+
+    try:
+        return isinstance(x, array)
+    except:
+        return False
+
+try:  # see :pyver:old
+    array.tostring
+except:
+    def tostring(row):
+        l = len(row)
+        return struct.pack('%dB' % l, *row)
+else:
+    def tostring(row):
+        """Convert row of bytes to string.  Expects `row` to be an
+        ``array``.
+        """
+        return row.tostring()
+
+
+def interleave_planes(ipixels, apixels, ipsize, apsize):
+    """
+    Interleave (colour) planes, e.g. RGB + A = RGBA.
+
+    Return an array of pixels consisting of the `ipsize` elements of data
+    from each pixel in `ipixels` followed by the `apsize` elements of data
+    from each pixel in `apixels`.  Conventionally `ipixels` and
+    `apixels` are byte arrays so the sizes are bytes, but it actually
+    works with any arrays of the same type.  The returned array is the
+    same type as the input arrays which should be the same type as each other.
+    """
+
+    itotal = len(ipixels)
+    atotal = len(apixels)
+    newtotal = itotal + atotal
+    newpsize = ipsize + apsize
+    # Set up the output buffer
+    # See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
+    out = array(ipixels.typecode)
+    # It's annoying that there is no cheap way to set the array size :-(
+    out.extend(ipixels)
+    out.extend(apixels)
+    # Interleave in the pixel data
+    for i in range(ipsize):
+        out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
+    for i in range(apsize):
+        out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
+    return out
+
+def check_palette(palette):
+    """Check a palette argument (to the :class:`Writer` class) for validity.
+    Returns the palette as a list if okay; raises an exception otherwise.
+    """
+
+    # None is the default and is allowed.
+    if palette is None:
+        return None
+
+    p = list(palette)
+    if not (0 < len(p) <= 256):
+        raise ValueError("a palette must have between 1 and 256 entries")
+    seen_triple = False
+    for i,t in enumerate(p):
+        if len(t) not in (3,4):
+            raise ValueError(
+              "palette entry %d: entries must be 3- or 4-tuples." % i)
+        if len(t) == 3:
+            seen_triple = True
+        if seen_triple and len(t) == 4:
+            raise ValueError(
+              "palette entry %d: all 4-tuples must precede all 3-tuples" % i)
+        for x in t:
+            if int(x) != x or not(0 <= x <= 255):
+                raise ValueError(
+                  "palette entry %d: values must be integer: 0 <= x <= 255" % i)
+    return p
+
+class Error(Exception):
+    prefix = 'Error'
+    def __str__(self):
+        return self.prefix + ': ' + ' '.join(self.args)
+
+class FormatError(Error):
+    """Problem with input file format.  In other words, PNG file does
+    not conform to the specification in some way and is invalid.
+    """
+
+    prefix = 'FormatError'
+
+class ChunkError(FormatError):
+    prefix = 'ChunkError'
+
+
+class Writer:
+    """
+    PNG encoder in pure Python.
+    """
+
+    def __init__(self, width=None, height=None,
+                 size=None,
+                 greyscale=False,
+                 alpha=False,
+                 bitdepth=8,
+                 palette=None,
+                 transparent=None,
+                 background=None,
+                 gamma=None,
+                 compression=None,
+                 interlace=False,
+                 bytes_per_sample=None, # deprecated
+                 planes=None,
+                 colormap=None,
+                 maxval=None,
+                 chunk_limit=2**20):
+        """
+        Create a PNG encoder object.
+
+        Arguments:
+
+        width, height
+          Image size in pixels, as two separate arguments.
+        size
+          Image size (w,h) in pixels, as single argument.
+        greyscale
+          Input data is greyscale, not RGB.
+        alpha
+          Input data has alpha channel (RGBA or LA).
+        bitdepth
+          Bit depth: from 1 to 16.
+        palette
+          Create a palette for a colour mapped image (colour type 3).
+        transparent
+          Specify a transparent colour (create a ``tRNS`` chunk).
+        background
+          Specify a default background colour (create a ``bKGD`` chunk).
+        gamma
+          Specify a gamma value (create a ``gAMA`` chunk).
+        compression
+          zlib compression level (1-9).
+        interlace
+          Create an interlaced image.
+        chunk_limit
+          Write multiple ``IDAT`` chunks to save memory.
+
+        The image size (in pixels) can be specified either by using the
+        `width` and `height` arguments, or with the single `size`
+        argument.  If `size` is used it should be a pair (*width*,
+        *height*).
+
+        `greyscale` and `alpha` are booleans that specify whether
+        an image is greyscale (or colour), and whether it has an
+        alpha channel (or not).
+
+        `bitdepth` specifies the bit depth of the source pixel values.
+        Each source pixel values must be an integer between 0 and
+        ``2**bitdepth-1``.  For example, 8-bit images have values
+        between 0 and 255.  PNG only stores images with bit depths of
+        1,2,4,8, or 16.  When `bitdepth` is not one of these values,
+        the next highest valid bit depth is selected, and an ``sBIT``
+        (significant bits) chunk is generated that specifies the original
+        precision of the source image.  In this case the supplied pixel
+        values will be rescaled to fit the range of the selected bit depth.
+
+        The details of which bit depth / colour model combinations the
+        PNG file format supports directly, are allowed are somewhat arcane
+        (refer to the PNG specification for full details).  Briefly:
+        "small" bit depths (1,2,4) are only allowed with greyscale and
+        colour mapped images; colour mapped images cannot have bit depth
+        16.
+
+        For colour mapped images (in other words, when the `palette`
+        argument is specified) the `bitdepth` argument must match one of
+        the valid PNG bit depths: 1, 2, 4, or 8.  (It is valid to have a
+        PNG image with a palette and an ``sBIT`` chunk, but the meaning
+        is slightly different; it would be awkward to press the
+        `bitdepth` argument into service for this.)
+
+        The `palette` option, when specified, causes a colour mapped image
+        to be created: the PNG colour type is set to 3; greyscale
+        must not be set; alpha must not be set; transparent must
+        not be set; the bit depth must be 1,2,4, or 8.  When a colour
+        mapped image is created, the pixel values are palette indexes
+        and the `bitdepth` argument specifies the size of these indexes
+        (not the size of the colour values in the palette).
+
+        The palette argument value should be a sequence of 3- or
+        4-tuples.  3-tuples specify RGB palette entries; 4-tuples
+        specify RGBA palette entries.  If both 4-tuples and 3-tuples
+        appear in the sequence then all the 4-tuples must come
+        before all the 3-tuples.  A ``PLTE`` chunk is created; if there
+        are 4-tuples then a ``tRNS`` chunk is created as well.  The
+        ``PLTE`` chunk will contain all the RGB triples in the same
+        sequence; the ``tRNS`` chunk will contain the alpha channel for
+        all the 4-tuples, in the same sequence.  Palette entries
+        are always 8-bit.
+
+        If specified, the `transparent` and `background` parameters must
+        be a tuple with three integer values for red, green, blue, or
+        a simple integer (or singleton tuple) for a greyscale image.
+
+        If specified, the `gamma` parameter must be a positive number
+        (generally, a float).  A ``gAMA`` chunk will be created.  Note that
+        this will not change the values of the pixels as they appear in
+        the PNG file, they are assumed to have already been converted
+        appropriately for the gamma specified.
+
+        The `compression` argument specifies the compression level
+        to be used by the ``zlib`` module.  Higher values are likely
+        to compress better, but will be slower to compress.  The
+        default for this argument is ``None``; this does not mean
+        no compression, rather it means that the default from the
+        ``zlib`` module is used (which is generally acceptable).
+
+        If `interlace` is true then an interlaced image is created
+        (using PNG's so far only interace method, *Adam7*).  This does not
+        affect how the pixels should be presented to the encoder, rather
+        it changes how they are arranged into the PNG file.  On slow
+        connexions interlaced images can be partially decoded by the
+        browser to give a rough view of the image that is successively
+        refined as more image data appears.
+
+        .. note ::
+
+          Enabling the `interlace` option requires the entire image
+          to be processed in working memory.
+
+        `chunk_limit` is used to limit the amount of memory used whilst
+        compressing the image.  In order to avoid using large amounts of
+        memory, multiple ``IDAT`` chunks may be created.
+        """
+
+        # At the moment the `planes` argument is ignored;
+        # its purpose is to act as a dummy so that
+        # ``Writer(x, y, **info)`` works, where `info` is a dictionary
+        # returned by Reader.read and friends.
+        # Ditto for `colormap`.
+
+        # A couple of helper functions come first.  Best skipped if you
+        # are reading through.
+
+        def isinteger(x):
+            try:
+                return int(x) == x
+            except:
+                return False
+
+        def check_color(c, which):
+            """Checks that a colour argument for transparent or
+            background options is the right form.  Also "corrects" bare
+            integers to 1-tuples.
+            """
+
+            if c is None:
+                return c
+            if greyscale:
+                try:
+                    l = len(c)
+                except TypeError:
+                    c = (c,)
+                if len(c) != 1:
+                    raise ValueError("%s for greyscale must be 1-tuple" %
+                        which)
+                if not isinteger(c[0]):
+                    raise ValueError(
+                        "%s colour for greyscale must be integer" %
+                        which)
+            else:
+                if not (len(c) == 3 and
+                        isinteger(c[0]) and
+                        isinteger(c[1]) and
+                        isinteger(c[2])):
+                    raise ValueError(
+                        "%s colour must be a triple of integers" %
+                        which)
+            return c
+
+        if size:
+            if len(size) != 2:
+                raise ValueError(
+                  "size argument should be a pair (width, height)")
+            if width is not None and width != size[0]:
+                raise ValueError(
+                  "size[0] (%r) and width (%r) should match when both are used."
+                    % (size[0], width))
+            if height is not None and height != size[1]:
+                raise ValueError(
+                  "size[1] (%r) and height (%r) should match when both are used."
+                    % (size[1], height))
+            width,height = size
+        del size
+
+        if width <= 0 or height <= 0:
+            raise ValueError("width and height must be greater than zero")
+        if not isinteger(width) or not isinteger(height):
+            raise ValueError("width and height must be integers")
+        # http://www.w3.org/TR/PNG/#7Integers-and-byte-order
+        if width > 2**32-1 or height > 2**32-1:
+            raise ValueError("width and height cannot exceed 2**32-1")
+
+        if alpha and transparent is not None:
+            raise ValueError(
+                "transparent colour not allowed with alpha channel")
+
+        if bytes_per_sample is not None:
+            warnings.warn('please use bitdepth instead of bytes_per_sample',
+                          DeprecationWarning)
+            if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
+                raise ValueError(
+                    "bytes per sample must be .125, .25, .5, 1, or 2")
+            bitdepth = int(8*bytes_per_sample)
+        del bytes_per_sample
+        if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
+            raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
+              bitdepth)
+
+        self.rescale = None
+        if palette:
+            if bitdepth not in (1,2,4,8):
+                raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
+            if transparent is not None:
+                raise ValueError("transparent and palette not compatible")
+            if alpha:
+                raise ValueError("alpha and palette not compatible")
+            if greyscale:
+                raise ValueError("greyscale and palette not compatible")
+        else:
+            # No palette, check for sBIT chunk generation.
+            if alpha or not greyscale:
+                if bitdepth not in (8,16):
+                    targetbitdepth = (8,16)[bitdepth > 8]
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+            else:
+                assert greyscale
+                assert not alpha
+                if bitdepth not in (1,2,4,8,16):
+                    if bitdepth > 8:
+                        targetbitdepth = 16
+                    elif bitdepth == 3:
+                        targetbitdepth = 4
+                    else:
+                        assert bitdepth in (5,6,7)
+                        targetbitdepth = 8
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+
+        if bitdepth < 8 and (alpha or not greyscale and not palette):
+            raise ValueError(
+              "bitdepth < 8 only permitted with greyscale or palette")
+        if bitdepth > 8 and palette:
+            raise ValueError(
+                "bit depth must be 8 or less for images with palette")
+
+        transparent = check_color(transparent, 'transparent')
+        background = check_color(background, 'background')
+
+        # It's important that the true boolean values (greyscale, alpha,
+        # colormap, interlace) are converted to bool because Iverson's
+        # convention is relied upon later on.
+        self.width = width
+        self.height = height
+        self.transparent = transparent
+        self.background = background
+        self.gamma = gamma
+        self.greyscale = bool(greyscale)
+        self.alpha = bool(alpha)
+        self.colormap = bool(palette)
+        self.bitdepth = int(bitdepth)
+        self.compression = compression
+        self.chunk_limit = chunk_limit
+        self.interlace = bool(interlace)
+        self.palette = check_palette(palette)
+
+        self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
+        assert self.color_type in (0,2,3,4,6)
+
+        self.color_planes = (3,1)[self.greyscale or self.colormap]
+        self.planes = self.color_planes + self.alpha
+        # :todo: fix for bitdepth < 8
+        self.psize = (self.bitdepth/8) * self.planes
+
+    def make_palette(self):
+        """Create the byte sequences for a ``PLTE`` and if necessary a
+        ``tRNS`` chunk.  Returned as a pair (*p*, *t*).  *t* will be
+        ``None`` if no ``tRNS`` chunk is necessary.
+        """
+
+        p = array('B')
+        t = array('B')
+
+        for x in self.palette:
+            p.extend(x[0:3])
+            if len(x) > 3:
+                t.append(x[3])
+        p = tostring(p)
+        t = tostring(t)
+        if t:
+            return p,t
+        return p,None
+
+    def write(self, outfile, rows):
+        """Write a PNG image to the output file.  `rows` should be
+        an iterable that yields each row in boxed row flat pixel format.
+        The rows should be the rows of the original image, so there
+        should be ``self.height`` rows of ``self.width * self.planes`` values.
+        If `interlace` is specified (when creating the instance), then
+        an interlaced PNG file will be written.  Supply the rows in the
+        normal image order; the interlacing is carried out internally.
+
+        .. note ::
+
+          Interlacing will require the entire image to be in working memory.
+        """
+
+        if self.interlace:
+            fmt = 'BH'[self.bitdepth > 8]
+            a = array(fmt, itertools.chain(*rows))
+            return self.write_array(outfile, a)
+        else:
+            nrows = self.write_passes(outfile, rows)
+            if nrows != self.height:
+                raise ValueError(
+                  "rows supplied (%d) does not match height (%d)" %
+                  (nrows, self.height))
+
+    def write_passes(self, outfile, rows, packed=False):
+        """
+        Write a PNG image to the output file.
+
+        Most users are expected to find the :meth:`write` or
+        :meth:`write_array` method more convenient.
+
+        The rows should be given to this method in the order that
+        they appear in the output file.  For straightlaced images,
+        this is the usual top to bottom ordering, but for interlaced
+        images the rows should have already been interlaced before
+        passing them to this function.
+
+        `rows` should be an iterable that yields each row.  When
+        `packed` is ``False`` the rows should be in boxed row flat pixel
+        format; when `packed` is ``True`` each row should be a packed
+        sequence of bytes.
+
+        """
+
+        # http://www.w3.org/TR/PNG/#5PNG-file-signature
+        outfile.write(_signature)
+
+        # http://www.w3.org/TR/PNG/#11IHDR
+        write_chunk(outfile, 'IHDR',
+                    struct.pack("!2I5B", self.width, self.height,
+                                self.bitdepth, self.color_type,
+                                0, 0, self.interlace))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11gAMA
+        if self.gamma is not None:
+            write_chunk(outfile, 'gAMA',
+                        struct.pack("!L", int(round(self.gamma*1e5))))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11sBIT
+        if self.rescale:
+            write_chunk(outfile, 'sBIT',
+                struct.pack('%dB' % self.planes,
+                            *[self.rescale[0]]*self.planes))
+
+        # :chunk:order: Without a palette (PLTE chunk), ordering is
+        # relatively relaxed.  With one, gAMA chunk must precede PLTE
+        # chunk which must precede tRNS and bKGD.
+        # See http://www.w3.org/TR/PNG/#5ChunkOrdering
+        if self.palette:
+            p,t = self.make_palette()
+            write_chunk(outfile, 'PLTE', p)
+            if t:
+                # tRNS chunk is optional.  Only needed if palette entries
+                # have alpha.
+                write_chunk(outfile, 'tRNS', t)
+
+        # http://www.w3.org/TR/PNG/#11tRNS
+        if self.transparent is not None:
+            if self.greyscale:
+                write_chunk(outfile, 'tRNS',
+                            struct.pack("!1H", *self.transparent))
+            else:
+                write_chunk(outfile, 'tRNS',
+                            struct.pack("!3H", *self.transparent))
+
+        # http://www.w3.org/TR/PNG/#11bKGD
+        if self.background is not None:
+            if self.greyscale:
+                write_chunk(outfile, 'bKGD',
+                            struct.pack("!1H", *self.background))
+            else:
+                write_chunk(outfile, 'bKGD',
+                            struct.pack("!3H", *self.background))
+
+        # http://www.w3.org/TR/PNG/#11IDAT
+        if self.compression is not None:
+            compressor = zlib.compressobj(self.compression)
+        else:
+            compressor = zlib.compressobj()
+
+        # Choose an extend function based on the bitdepth.  The extend
+        # function packs/decomposes the pixel values into bytes and
+        # stuffs them onto the data array.
+        data = array('B')
+        if self.bitdepth == 8 or packed:
+            extend = data.extend
+        elif self.bitdepth == 16:
+            # Decompose into bytes
+            def extend(sl):
+                fmt = '!%dH' % len(sl)
+                data.extend(array('B', struct.pack(fmt, *sl)))
+        else:
+            # Pack into bytes
+            assert self.bitdepth < 8
+            # samples per byte
+            spb = int(8/self.bitdepth)
+            def extend(sl):
+                a = array('B', sl)
+                # Adding padding bytes so we can group into a whole
+                # number of spb-tuples.
+                l = float(len(a))
+                extra = math.ceil(l / float(spb))*spb - l
+                a.extend([0]*int(extra))
+                # Pack into bytes
+                l = group(a, spb)
+                l = map(lambda e: reduce(lambda x,y:
+                                           (x << self.bitdepth) + y, e), l)
+                data.extend(l)
+        if self.rescale:
+            oldextend = extend
+            factor = \
+              float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
+            def extend(sl):
+                oldextend(map(lambda x: int(round(factor*x)), sl))
+
+        # Build the first row, testing mostly to see if we need to
+        # changed the extend function to cope with NumPy integer types
+        # (they cause our ordinary definition of extend to fail, so we
+        # wrap it).  See
+        # http://code.google.com/p/pypng/issues/detail?id=44
+        enumrows = enumerate(rows)
+        del rows
+
+        # First row's filter type.
+        data.append(0)
+        # :todo: Certain exceptions in the call to ``.next()`` or the
+        # following try would indicate no row data supplied.
+        # Should catch.
+        i,row = enumrows.next()
+        try:
+            # If this fails...
+            extend(row)
+        except:
+            # ... try a version that converts the values to int first.
+            # Not only does this work for the (slightly broken) NumPy
+            # types, there are probably lots of other, unknown, "nearly"
+            # int types it works for.
+            def wrapmapint(f):
+                return lambda sl: f(map(int, sl))
+            extend = wrapmapint(extend)
+            del wrapmapint
+            extend(row)
+
+        for i,row in enumrows:
+            # Add "None" filter type.  Currently, it's essential that
+            # this filter type be used for every scanline as we do not
+            # mark the first row of a reduced pass image; that means we
+            # could accidentally compute the wrong filtered scanline if
+            # we used "up", "average", or "paeth" on such a line.
+            data.append(0)
+            extend(row)
+            if len(data) > self.chunk_limit:
+                compressed = compressor.compress(tostring(data))
+                if len(compressed):
+                    # print >> sys.stderr, len(data), len(compressed)
+                    write_chunk(outfile, 'IDAT', compressed)
+                # Because of our very witty definition of ``extend``,
+                # above, we must re-use the same ``data`` object.  Hence
+                # we use ``del`` to empty this one, rather than create a
+                # fresh one (which would be my natural FP instinct).
+                del data[:]
+        if len(data):
+            compressed = compressor.compress(tostring(data))
+        else:
+            compressed = ''
+        flushed = compressor.flush()
+        if len(compressed) or len(flushed):
+            # print >> sys.stderr, len(data), len(compressed), len(flushed)
+            write_chunk(outfile, 'IDAT', compressed + flushed)
+        # http://www.w3.org/TR/PNG/#11IEND
+        write_chunk(outfile, 'IEND')
+        return i+1
+
+    def write_array(self, outfile, pixels):
+        """
+        Write an array in flat row flat pixel format as a PNG file on
+        the output file.  See also :meth:`write` method.
+        """
+
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def write_packed(self, outfile, rows):
+        """
+        Write PNG file to `outfile`.  The pixel data comes from `rows`
+        which should be in boxed row packed format.  Each row should be
+        a sequence of packed bytes.
+
+        Technically, this method does work for interlaced images but it
+        is best avoided.  For interlaced images, the rows should be
+        presented in the order that they appear in the file.
+
+        This method should not be used when the source image bit depth
+        is not one naturally supported by PNG; the bit depth should be
+        1, 2, 4, 8, or 16.
+        """
+
+        if self.rescale:
+            raise Error("write_packed method not suitable for bit depth %d" %
+              self.rescale[0])
+        return self.write_passes(outfile, rows, packed=True)
+
+    def convert_pnm(self, infile, outfile):
+        """
+        Convert a PNM file containing raw pixel data into a PNG file
+        with the parameters set in the writer object.  Works for
+        (binary) PGM, PPM, and PAM formats.
+        """
+
+        if self.interlace:
+            pixels = array('B')
+            pixels.fromfile(infile,
+                            (self.bitdepth/8) * self.color_planes *
+                            self.width * self.height)
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.file_scanlines(infile))
+
+    def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
+        """
+        Convert a PPM and PGM file containing raw pixel data into a
+        PNG outfile with the parameters set in the writer object.
+        """
+        pixels = array('B')
+        pixels.fromfile(ppmfile,
+                        (self.bitdepth/8) * self.color_planes *
+                        self.width * self.height)
+        apixels = array('B')
+        apixels.fromfile(pgmfile,
+                         (self.bitdepth/8) *
+                         self.width * self.height)
+        pixels = interleave_planes(pixels, apixels,
+                                   (self.bitdepth/8) * self.color_planes,
+                                   (self.bitdepth/8))
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def file_scanlines(self, infile):
+        """
+        Generates boxed rows in flat pixel format, from the input file
+        `infile`.  It assumes that the input file is in a "Netpbm-like"
+        binary format, and is positioned at the beginning of the first
+        pixel.  The number of pixels to read is taken from the image
+        dimensions (`width`, `height`, `planes`) and the number of bytes
+        per value is implied by the image `bitdepth`.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        row_bytes = vpr
+        if self.bitdepth > 8:
+            assert self.bitdepth == 16
+            row_bytes *= 2
+            fmt = '>%dH' % vpr
+            def line():
+                return array('H', struct.unpack(fmt, infile.read(row_bytes)))
+        else:
+            def line():
+                scanline = array('B', infile.read(row_bytes))
+                return scanline
+        for y in range(self.height):
+            yield line()
+
+    def array_scanlines(self, pixels):
+        """
+        Generates boxed rows (flat pixels) from flat rows (flat pixels)
+        in an array.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        stop = 0
+        for y in range(self.height):
+            start = stop
+            stop = start + vpr
+            yield pixels[start:stop]
+
+    def array_scanlines_interlace(self, pixels):
+        """
+        Generator for interlaced scanlines from an array.  `pixels` is
+        the full source image in flat row flat pixel format.  The
+        generator yields each scanline of the reduced passes in turn, in
+        boxed row flat pixel format.
+        """
+
+        # http://www.w3.org/TR/PNG/#8InterlaceMethods
+        # Array type.
+        fmt = 'BH'[self.bitdepth > 8]
+        # Value per row
+        vpr = self.width * self.planes
+        for xstart, ystart, xstep, ystep in _adam7:
+            if xstart >= self.width:
+                continue
+            # Pixels per row (of reduced image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # number of values in reduced image row.
+            row_len = ppr*self.planes
+            for y in range(ystart, self.height, ystep):
+                if xstep == 1:
+                    offset = y * vpr
+                    yield pixels[offset:offset+vpr]
+                else:
+                    row = array(fmt)
+                    # There's no easier way to set the length of an array
+                    row.extend(pixels[0:row_len])
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        row[i::self.planes] = \
+                            pixels[offset+i:end_offset:skip]
+                    yield row
+
+def write_chunk(outfile, tag, data=''):
+    """
+    Write a PNG chunk to the output file, including length and
+    checksum.
+    """
+
+    # http://www.w3.org/TR/PNG/#5Chunk-layout
+    outfile.write(struct.pack("!I", len(data)))
+    outfile.write(tag)
+    outfile.write(data)
+    checksum = zlib.crc32(tag)
+    checksum = zlib.crc32(data, checksum)
+    outfile.write(struct.pack("!i", checksum))
+
+def write_chunks(out, chunks):
+    """Create a PNG file by writing out the chunks."""
+
+    out.write(_signature)
+    for chunk in chunks:
+        write_chunk(out, *chunk)
+
+def filter_scanline(type, line, fo, prev=None):
+    """Apply a scanline filter to a scanline.  `type` specifies the
+    filter type (0 to 4); `line` specifies the current (unfiltered)
+    scanline as a sequence of bytes; `prev` specifies the previous
+    (unfiltered) scanline as a sequence of bytes. `fo` specifies the
+    filter offset; normally this is size of a pixel in bytes (the number
+    of bytes per sample times the number of channels), but when this is
+    < 1 (for bit depths < 8) then the filter offset is 1.
+    """
+
+    assert 0 <= type < 5
+
+    # The output array.  Which, pathetically, we extend one-byte at a
+    # time (fortunately this is linear).
+    out = array('B', [type])
+
+    def sub():
+        ai = -fo
+        for x in line:
+            if ai >= 0:
+                x = (x - line[ai]) & 0xff
+            out.append(x)
+            ai += 1
+    def up():
+        for i,x in enumerate(line):
+            x = (x - prev[i]) & 0xff
+            out.append(x)
+    def average():
+        ai = -fo
+        for i,x in enumerate(line):
+            if ai >= 0:
+                x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
+            else:
+                x = (x - (prev[i] >> 1)) & 0xff
+            out.append(x)
+            ai += 1
+    def paeth():
+        # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
+        ai = -fo # also used for ci
+        for i,x in enumerate(line):
+            a = 0
+            b = prev[i]
+            c = 0
+
+            if ai >= 0:
+                a = line[ai]
+                c = prev[ai]
+            p = a + b - c
+            pa = abs(p - a)
+            pb = abs(p - b)
+            pc = abs(p - c)
+            if pa <= pb and pa <= pc: Pr = a
+            elif pb <= pc: Pr = b
+            else: Pr = c
+
+            x = (x - Pr) & 0xff
+            out.append(x)
+            ai += 1
+
+    if not prev:
+        # We're on the first line.  Some of the filters can be reduced
+        # to simpler cases which makes handling the line "off the top"
+        # of the image simpler.  "up" becomes "none"; "paeth" becomes
+        # "left" (non-trivial, but true). "average" needs to be handled
+        # specially.
+        if type == 2: # "up"
+            return line # type = 0
+        elif type == 3:
+            prev = [0]*len(line)
+        elif type == 4: # "paeth"
+            type = 1
+    if type == 0:
+        out.extend(line)
+    elif type == 1:
+        sub()
+    elif type == 2:
+        up()
+    elif type == 3:
+        average()
+    else: # type == 4
+        paeth()
+    return out
+
+
+class _readable:
+    """
+    A simple file-like interface for strings and arrays.
+    """
+
+    def __init__(self, buf):
+        self.buf = buf
+        self.offset = 0
+
+    def read(self, n):
+        r = self.buf[self.offset:self.offset+n]
+        if isarray(r):
+            r = r.tostring()
+        self.offset += n
+        return r
+
+
+class Reader:
+    """
+    PNG decoder in pure Python.
+    """
+
+    def __init__(self, _guess=None, **kw):
+        """
+        Create a PNG decoder object.
+
+        The constructor expects exactly one keyword argument. If you
+        supply a positional argument instead, it will guess the input
+        type. You can choose among the following keyword arguments:
+
+        filename
+          Name of input file (a PNG file).
+        file
+          A file-like object (object with a read() method).
+        bytes
+          ``array`` or ``string`` with PNG data.
+
+        """
+        if ((_guess is not None and len(kw) != 0) or
+            (_guess is None and len(kw) != 1)):
+            raise TypeError("Reader() takes exactly 1 argument")
+
+        # Will be the first 8 bytes, later on.  See validate_signature.
+        self.signature = None
+        self.transparent = None
+        # A pair of (len,type) if a chunk has been read but its data and
+        # checksum have not (in other words the file position is just
+        # past the 4 bytes that specify the chunk type).  See preamble
+        # method for how this is used.
+        self.atchunk = None
+
+        if _guess is not None:
+            if isarray(_guess):
+                kw["bytes"] = _guess
+            elif isinstance(_guess, str):
+                kw["filename"] = _guess
+            elif isinstance(_guess, file):
+                kw["file"] = _guess
+
+        if "filename" in kw:
+            self.file = file(kw["filename"], "rb")
+        elif "file" in kw:
+            self.file = kw["file"]
+        elif "bytes" in kw:
+            self.file = _readable(kw["bytes"])
+        else:
+            raise TypeError("expecting filename, file or bytes array")
+
+    def chunk(self, seek=None):
+        """
+        Read the next PNG chunk from the input file; returns type (as a 4
+        character string) and data.  If the optional `seek` argument is
+        specified then it will keep reading chunks until it either runs
+        out of file or finds the type specified by the argument.  Note
+        that in general the order of chunks in PNGs is unspecified, so
+        using `seek` can cause you to miss chunks.
+        """
+
+        self.validate_signature()
+
+        while True:
+            # http://www.w3.org/TR/PNG/#5Chunk-layout
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+            length,type = self.atchunk
+            self.atchunk = None
+            data = self.file.read(length)
+            if len(data) != length:
+                raise ChunkError('Chunk %s too short for required %i octets.'
+                  % (type, length))
+            checksum = self.file.read(4)
+            if len(checksum) != 4:
+                raise ValueError('Chunk %s too short for checksum.', tag)
+            if seek and type != seek:
+                continue
+            verify = zlib.crc32(type)
+            verify = zlib.crc32(data, verify)
+            # Whether the output from zlib.crc32 is signed or not varies
+            # according to hideous implementation details, see
+            # http://bugs.python.org/issue1202 .
+            # We coerce it to be positive here (in a way which works on
+            # Python 2.3 and older).
+            verify &= 2**32 - 1
+            verify = struct.pack('!I', verify)
+            if checksum != verify:
+                # print repr(checksum)
+                (a, ) = struct.unpack('!I', checksum)
+                (b, ) = struct.unpack('!I', verify)
+                raise ChunkError(
+                  "Checksum error in %s chunk: 0x%08X != 0x%08X." %
+                  (type, a, b))
+            return type, data
+
+    def chunks(self):
+        """Return an iterator that will yield each chunk as a
+        (*chunktype*, *content*) pair.
+        """
+
+        while True:
+            t,v = self.chunk()
+            yield t,v
+            if t == 'IEND':
+                break
+
+    def undo_filter(self, filter_type, scanline, previous):
+        """Undo the filter for a scanline.  `scanline` is a sequence of
+        bytes that does not include the initial filter type byte.
+        `previous` is decoded previous scanline (for straightlaced
+        images this is the previous pixel row, but for interlaced
+        images, it is the previous scanline in the reduced image, which
+        in general is not the previous pixel row in the final image).
+        When there is no previous scanline (the first row of a
+        straightlaced image, or the first row in one of the passes in an
+        interlaced image), then this argument should be ``None``.
+
+        The scanline will have the effects of filtering removed, and the
+        result will be returned as a fresh sequence of bytes.
+        """
+
+        # :todo: Would it be better to update scanline in place?
+
+        # Create the result byte array.  It seems that the best way to
+        # create the array to be the right size is to copy from an
+        # existing sequence.  *sigh*
+        # If we fill the result with scanline, then this allows a
+        # micro-optimisation in the "null" and "sub" cases.
+        result = array('B', scanline)
+
+        if filter_type == 0:
+            # And here, we _rely_ on filling the result with scanline,
+            # above.
+            return result
+
+        if filter_type not in (1,2,3,4):
+            raise FormatError('Invalid PNG Filter Type.'
+              '  See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
+
+        # Filter unit.  The stride from one pixel to the corresponding
+        # byte from the previous previous.  Normally this is the pixel
+        # size in bytes, but when this is smaller than 1, the previous
+        # byte is used instead.
+        fu = max(1, self.psize)
+
+        # For the first line of a pass, synthesize a dummy previous
+        # line.  An alternative approach would be to observe that on the
+        # first line 'up' is the same as 'null', 'paeth' is the same
+        # as 'sub', with only 'average' requiring any special case.
+        if not previous:
+            previous = array('B', [0]*len(scanline))
+
+        def sub():
+            """Undo sub filter."""
+
+            ai = 0
+            # Loops starts at index fu.  Observe that the initial part
+            # of the result is already filled in correctly with
+            # scanline.
+            for i in range(fu, len(result)):
+                x = scanline[i]
+                a = result[ai]
+                result[i] = (x + a) & 0xff
+                ai += 1
+
+        def up():
+            """Undo up filter."""
+
+            for i in range(len(result)):
+                x = scanline[i]
+                b = previous[i]
+                result[i] = (x + b) & 0xff
+
+        def average():
+            """Undo average filter."""
+
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = 0
+                else:
+                    a = result[ai]
+                b = previous[i]
+                result[i] = (x + ((a + b) >> 1)) & 0xff
+                ai += 1
+
+        def paeth():
+            """Undo Paeth filter."""
+
+            # Also used for ci.
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = c = 0
+                else:
+                    a = result[ai]
+                    c = previous[ai]
+                b = previous[i]
+                p = a + b - c
+                pa = abs(p - a)
+                pb = abs(p - b)
+                pc = abs(p - c)
+                if pa <= pb and pa <= pc:
+                    pr = a
+                elif pb <= pc:
+                    pr = b
+                else:
+                    pr = c
+                result[i] = (x + pr) & 0xff
+                ai += 1
+
+        # Call appropriate filter algorithm.  Note that 0 has already
+        # been dealt with.
+        (None, sub, up, average, paeth)[filter_type]()
+        return result
+
+    def deinterlace(self, raw):
+        """
+        Read raw pixel data, undo filters, deinterlace, and flatten.
+        Return in flat row flat pixel format.
+        """
+
+        # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," +
+        #     " bpp=%s") % (self.width, self.height, self.planes, self.bps)
+        # Values per row (of the target image)
+        vpr = self.width * self.planes
+
+        # Make a result array, and make it big enough.  Interleaving
+        # writes to the output array randomly (well, not quite), so the
+        # entire output array must be in memory.
+        fmt = 'BH'[self.bitdepth > 8]
+        a = array(fmt, [0]*vpr*self.height)
+        source_offset = 0
+
+        for xstart, ystart, xstep, ystep in _adam7:
+            # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % (
+            #     xstart, ystart, xstep, ystep)
+            if xstart >= self.width:
+                continue
+            # The previous (reconstructed) scanline.  None at the
+            # beginning of a pass to indicate that there is no previous
+            # line.
+            recon = None
+            # Pixels per row (reduced pass image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # Row size in bytes for this pass.
+            row_size = int(math.ceil(self.psize * ppr))
+            for y in range(ystart, self.height, ystep):
+                filter_type = raw[source_offset]
+                source_offset += 1
+                scanline = raw[source_offset:source_offset+row_size]
+                source_offset += row_size
+                recon = self.undo_filter(filter_type, scanline, recon)
+                # Convert so that there is one element per pixel value
+                flat = self.serialtoflat(recon, ppr)
+                if xstep == 1:
+                    assert xstart == 0
+                    offset = y * vpr
+                    a[offset:offset+vpr] = flat
+                else:
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        a[offset+i:end_offset:skip] = \
+                            flat[i::self.planes]
+        return a
+
+    def iterboxed(self, rows):
+        """Iterator that yields each scanline in boxed row flat pixel
+        format.  `rows` should be an iterator that yields the bytes of
+        each row in turn.
+        """
+
+        def asvalues(raw):
+            """Convert a row of raw bytes into a flat row.  Result may
+            or may not share with argument"""
+
+            if self.bitdepth == 8:
+                return raw
+            if self.bitdepth == 16:
+                raw = tostring(raw)
+                return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
+            assert self.bitdepth < 8
+            width = self.width
+            # Samples per byte
+            spb = 8//self.bitdepth
+            out = array('B')
+            mask = 2**self.bitdepth - 1
+            shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
+            for o in raw:
+                out.extend(map(lambda i: mask&(o>>i), shifts))
+            return out[:width]
+
+        return itertools.imap(asvalues, rows)
+
+    def serialtoflat(self, bytes, width=None):
+        """Convert serial format (byte stream) pixel data to flat row
+        flat pixel.
+        """
+
+        if self.bitdepth == 8:
+            return bytes
+        if self.bitdepth == 16:
+            bytes = tostring(bytes)
+            return array('H',
+              struct.unpack('!%dH' % (len(bytes)//2), bytes))
+        assert self.bitdepth < 8
+        if width is None:
+            width = self.width
+        # Samples per byte
+        spb = 8//self.bitdepth
+        out = array('B')
+        mask = 2**self.bitdepth - 1
+        shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
+        l = width
+        for o in bytes:
+            out.extend(map(lambda i: mask&(o>>i), shifts)[:l])
+            l -= spb
+            if l <= 0:
+                l = width
+        return out
+
+    def iterstraight(self, raw):
+        """Iterator that undoes the effect of filtering, and yields each
+        row in serialised format (as a sequence of bytes).  Assumes input
+        is straightlaced.  `raw` should be an iterable that yields the
+        raw bytes in chunks of arbitrary size."""
+
+        # length of row, in bytes
+        rb = self.row_bytes
+        a = array('B')
+        # The previous (reconstructed) scanline.  None indicates first
+        # line of image.
+        recon = None
+        for some in raw:
+            a.extend(some)
+            while len(a) >= rb + 1:
+                filter_type = a[0]
+                scanline = a[1:rb+1]
+                del a[:rb+1]
+                recon = self.undo_filter(filter_type, scanline, recon)
+                yield recon
+        if len(a) != 0:
+            # :file:format We get here with a file format error: when the
+            # available bytes (after decompressing) do not pack into exact
+            # rows.
+            raise FormatError(
+              'Wrong size for decompressed IDAT chunk.')
+        assert len(a) == 0
+
+    def validate_signature(self):
+        """If signature (header) has not been read then read and
+        validate it; otherwise do nothing.
+        """
+
+        if self.signature:
+            return
+        self.signature = self.file.read(8)
+        if self.signature != _signature:
+            raise FormatError("PNG file has invalid signature.")
+
+    def preamble(self):
+        """
+        Extract the image metadata by reading the initial part of the PNG
+        file up to the start of the ``IDAT`` chunk.  All the chunks that
+        precede the ``IDAT`` chunk are read and either processed for
+        metadata or discarded.
+        """
+
+        self.validate_signature()
+
+        while True:
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+                if self.atchunk is None:
+                    raise FormatError(
+                      'This PNG file has no IDAT chunks.')
+            if self.atchunk[1] == 'IDAT':
+                return
+            self.process_chunk()
+
+    def chunklentype(self):
+        """Reads just enough of the input to determine the next
+        chunk's length and type, returned as a (*length*, *type*) pair
+        where *type* is a string.  If there are no more chunks, ``None``
+        is returned.
+        """
+
+        x = self.file.read(8)
+        if not x:
+            return None
+        if len(x) != 8:
+            raise FormatError(
+              'End of file whilst reading chunk length and type.')
+        length,type = struct.unpack('!I4s', x)
+        if length > 2**31-1:
+            raise FormatError('Chunk %s is too large: %d.' % (type,length))
+        return length,type
+
+    def process_chunk(self):
+        """Process the next chunk and its data.  This only processes the
+        following chunk types, all others are ignored: ``IHDR``,
+        ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
+        """
+
+        type, data = self.chunk()
+        if type == 'IHDR':
+            # http://www.w3.org/TR/PNG/#11IHDR
+            if len(data) != 13:
+                raise FormatError('IHDR chunk has incorrect length.')
+            (self.width, self.height, self.bitdepth, self.color_type,
+             self.compression, self.filter,
+             self.interlace) = struct.unpack("!2I5B", data)
+
+            # Check that the header specifies only valid combinations.
+            if self.bitdepth not in (1,2,4,8,16):
+                raise Error("invalid bit depth %d" % self.bitdepth)
+            if self.color_type not in (0,2,3,4,6):
+                raise Error("invalid colour type %d" % self.color_type)
+            # Check indexed (palettized) images have 8 or fewer bits
+            # per pixel; check only indexed or greyscale images have
+            # fewer than 8 bits per pixel.
+            if ((self.color_type & 1 and self.bitdepth > 8) or
+                (self.bitdepth < 8 and self.color_type not in (0,3))):
+                raise FormatError("Illegal combination of bit depth (%d)"
+                  " and colour type (%d)."
+                  " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
+                  % (self.bitdepth, self.color_type))
+            if self.compression != 0:
+                raise Error("unknown compression method %d" % self.compression)
+            if self.filter != 0:
+                raise FormatError("Unknown filter method %d,"
+                  " see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
+                  % self.filter)
+            if self.interlace not in (0,1):
+                raise FormatError("Unknown interlace method %d,"
+                  " see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
+                  % self.interlace)
+
+            # Derived values
+            # http://www.w3.org/TR/PNG/#6Colour-values
+            colormap =  bool(self.color_type & 1)
+            greyscale = not (self.color_type & 2)
+            alpha = bool(self.color_type & 4)
+            color_planes = (3,1)[greyscale or colormap]
+            planes = color_planes + alpha
+
+            self.colormap = colormap
+            self.greyscale = greyscale
+            self.alpha = alpha
+            self.color_planes = color_planes
+            self.planes = planes
+            self.psize = float(self.bitdepth)/float(8) * planes
+            if int(self.psize) == self.psize:
+                self.psize = int(self.psize)
+            self.row_bytes = int(math.ceil(self.width * self.psize))
+            # Stores PLTE chunk if present, and is used to check
+            # chunk ordering constraints.
+            self.plte = None
+            # Stores tRNS chunk if present, and is used to check chunk
+            # ordering constraints.
+            self.trns = None
+            # Stores sbit chunk if present.
+            self.sbit = None
+        elif type == 'PLTE':
+            # http://www.w3.org/TR/PNG/#11PLTE
+            if self.plte:
+                warnings.warn("Multiple PLTE chunks present.")
+            self.plte = data
+            if len(data) % 3 != 0:
+                raise FormatError(
+                  "PLTE chunk's length should be a multiple of 3.")
+            if len(data) > (2**self.bitdepth)*3:
+                raise FormatError("PLTE chunk is too long.")
+            if len(data) == 0:
+                raise FormatError("Empty PLTE is not allowed.")
+        elif type == 'bKGD':
+            try:
+                if self.colormap:
+                    if not self.plte:
+                        warnings.warn(
+                          "PLTE chunk is required before bKGD chunk.")
+                    self.background = struct.unpack('B', data)
+                else:
+                    self.background = struct.unpack("!%dH" % self.color_planes,
+                      data)
+            except struct.error:
+                raise FormatError("bKGD chunk has incorrect length.")
+        elif type == 'tRNS':
+            # http://www.w3.org/TR/PNG/#11tRNS
+            self.trns = data
+            if self.colormap:
+                if not self.plte:
+                    warnings.warn("PLTE chunk is required before tRNS chunk.")
+                else:
+                    if len(data) > len(self.plte)/3:
+                        # Was warning, but promoted to Error as it
+                        # would otherwise cause pain later on.
+                        raise FormatError("tRNS chunk is too long.")
+            else:
+                if self.alpha:
+                    raise FormatError(
+                      "tRNS chunk is not valid with colour type %d." %
+                      self.color_type)
+                try:
+                    self.transparent = \
+                        struct.unpack("!%dH" % self.color_planes, data)
+                except struct.error:
+                    raise FormatError("tRNS chunk has incorrect length.")
+        elif type == 'gAMA':
+            try:
+                self.gamma = struct.unpack("!L", data)[0] / 100000.0
+            except struct.error:
+                raise FormatError("gAMA chunk has incorrect length.")
+        elif type == 'sBIT':
+            self.sbit = data
+            if (self.colormap and len(data) != 3 or
+                not self.colormap and len(data) != self.planes):
+                raise FormatError("sBIT chunk has incorrect length.")
+
+    def read(self):
+        """
+        Read the PNG file and decode it.  Returns (`width`, `height`,
+        `pixels`, `metadata`).
+
+        May use excessive memory.
+
+        `pixels` are returned in boxed row flat pixel format.
+        """
+
+        def iteridat():
+            """Iterator that yields all the ``IDAT`` chunks as strings."""
+            while True:
+                try:
+                    type, data = self.chunk()
+                except ValueError, e:
+                    raise ChunkError(e.args[0])
+                if type == 'IEND':
+                    # http://www.w3.org/TR/PNG/#11IEND
+                    break
+                if type != 'IDAT':
+                    continue
+                # type == 'IDAT'
+                # http://www.w3.org/TR/PNG/#11IDAT
+                if self.colormap and not self.plte:
+                    warnings.warn("PLTE chunk is required before IDAT chunk")
+                yield data
+
+        def iterdecomp(idat):
+            """Iterator that yields decompressed strings.  `idat` should
+            be an iterator that yields the ``IDAT`` chunk data.
+            """
+
+            # Currently, with no max_length paramter to decompress, this
+            # routine will do one yield per IDAT chunk.  So not very
+            # incremental.
+            d = zlib.decompressobj()
+            # The decompression loop:
+            # Decompress an IDAT chunk, then decompress any remaining
+            # unused data until the unused data does not get any
+            # smaller.  Add the unused data to the front of the input
+            # and loop to process the next IDAT chunk.
+            cdata = ''
+            for data in idat:
+                # :todo: add a max_length argument here to limit output
+                # size.
+                yield array('B', d.decompress(cdata + data))
+            yield array('B', d.flush())
+
+        self.preamble()
+        raw = iterdecomp(iteridat())
+
+        if self.interlace:
+            raw = array('B', itertools.chain(*raw))
+            arraycode = 'BH'[self.bitdepth>8]
+            # Like :meth:`group` but producing an array.array object for
+            # each row.
+            pixels = itertools.imap(lambda *row: array(arraycode, row),
+                       *[iter(self.deinterlace(raw))]*self.width*self.planes)
+        else:
+            pixels = self.iterboxed(self.iterstraight(raw))
+        meta = dict()
+        for attr in 'greyscale alpha planes bitdepth interlace'.split():
+            meta[attr] = getattr(self, attr)
+        meta['size'] = (self.width, self.height)
+        for attr in 'gamma transparent background'.split():
+            a = getattr(self, attr, None)
+            if a is not None:
+                meta[attr] = a
+        return self.width, self.height, pixels, meta
+
+
+    def read_flat(self):
+        """
+        Read a PNG file and decode it into flat row flat pixel format.
+        Returns (*width*, *height*, *pixels*, *metadata*).
+
+        May use excessive memory.
+
+        `pixels` are returned in flat row flat pixel format.
+
+        See also the :meth:`read` method which returns pixels in the
+        more stream-friendly boxed row flat pixel format.
+        """
+
+        x, y, pixel, meta = self.read()
+        arraycode = 'BH'[meta['bitdepth']>8]
+        pixel = array(arraycode, itertools.chain(*pixel))
+        return x, y, pixel, meta
+
+    def palette(self, alpha='natural'):
+        """Returns a palette that is a sequence of 3-tuples or 4-tuples,
+        synthesizing it from the ``PLTE`` and ``tRNS`` chunks.  These
+        chunks should have already been processed (for example, by
+        calling the :meth:`preamble` method).  All the tuples are the
+        same size, 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
+        there is a ``tRNS`` chunk.  Assumes that the image is colour type
+        3 and therefore a ``PLTE`` chunk is required.
+
+        If the `alpha` argument is ``'force'`` then an alpha channel is
+        always added, forcing the result to be a sequence of 4-tuples.
+        """
+
+        if not self.plte:
+            raise FormatError(
+                "Required PLTE chunk is missing in colour type 3 image.")
+        plte = group(array('B', self.plte), 3)
+        if self.trns or alpha == 'force':
+            trns = array('B', self.trns or '')
+            trns.extend([255]*(len(plte)-len(trns)))
+            plte = map(operator.add, plte, group(trns, 1))
+        return plte
+
+    def asDirect(self):
+        """Returns the image data as a direct representation of an
+        ``x * y * planes`` array.  This method is intended to remove the
+        need for callers to deal with palettes and transparency
+        themselves.  Images with a palette (colour type 3)
+        are converted to RGB or RGBA; images with transparency (a
+        ``tRNS`` chunk) are converted to LA or RGBA as appropriate.
+        When returned in this format the pixel values represent the
+        colour value directly without needing to refer to palettes or
+        transparency information.
+
+        Like the :meth:`read` method this method returns a 4-tuple:
+
+        (*width*, *height*, *pixels*, *meta*)
+
+        This method normally returns pixel values with the bit depth
+        they have in the source image, but when the source PNG has an
+        ``sBIT`` chunk it is inspected and can reduce the bit depth of
+        the result pixels; pixel values will be reduced according to
+        the bit depth specified in the ``sBIT`` chunk (PNG nerds should
+        note a single result bit depth is used for all channels; the
+        maximum of the ones specified in the ``sBIT`` chunk.  An RGB565
+        image will be rescaled to 6-bit RGB666).
+
+        The *meta* dictionary that is returned reflects the `direct`
+        format and not the original source image.  For example, an RGB
+        source image with a ``tRNS`` chunk to represent a transparent
+        colour, will have ``planes=3`` and ``alpha=False`` for the
+        source image, but the *meta* dictionary returned by this method
+        will have ``planes=4`` and ``alpha=True`` because an alpha
+        channel is synthesized and added.
+
+        *pixels* is the pixel data in boxed row flat pixel format (just
+        like the :meth:`read` method).
+
+        All the other aspects of the image data are not changed.
+        """
+
+        self.preamble()
+
+        # Simple case, no conversion necessary.
+        if not self.colormap and not self.trns and not self.sbit:
+            return self.read()
+
+        x,y,pixels,meta = self.read()
+
+        if self.colormap:
+            meta['colormap'] = False
+            meta['alpha'] = bool(self.trns)
+            meta['bitdepth'] = 8
+            meta['planes'] = 3 + bool(self.trns)
+            plte = self.palette()
+            def iterpal(pixels):
+                for row in pixels:
+                    row = map(plte.__getitem__, row)
+                    yield array('B', itertools.chain(*row))
+            pixels = iterpal(pixels)
+        elif self.trns:
+            # It would be nice if there was some reasonable way of doing
+            # this without generating a whole load of intermediate tuples.
+            # But tuples does seem like the easiest way, with no other way
+            # clearly much simpler or much faster.  (Actually, the L to LA
+            # conversion could perhaps go faster (all those 1-tuples!), but
+            # I still wonder whether the code proliferation is worth it)
+            it = self.transparent
+            maxval = 2**meta['bitdepth']-1
+            planes = meta['planes']
+            meta['alpha'] = True
+            meta['planes'] += 1
+            typecode = 'BH'[meta['bitdepth']>8]
+            def itertrns(pixels):
+                for row in pixels:
+                    # For each row we group it into pixels, then form a
+                    # characterisation vector that says whether each pixel
+                    # is opaque or not.  Then we convert True/False to
+                    # 0/maxval (by multiplication), and add it as the extra
+                    # channel.
+                    row = group(row, planes)
+                    opa = map(it.__ne__, row)
+                    opa = map(maxval.__mul__, opa)
+                    opa = zip(opa) # convert to 1-tuples
+                    yield array(typecode,
+                      itertools.chain(*map(operator.add, row, opa)))
+            pixels = itertrns(pixels)
+        targetbitdepth = None
+        if self.sbit:
+            sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
+            targetbitdepth = max(sbit)
+            if targetbitdepth > meta['bitdepth']:
+                raise Error('sBIT chunk %r exceeds bitdepth %d' %
+                    (sbit,self.bitdepth))
+            if min(sbit) <= 0:
+                raise Error('sBIT chunk %r has a 0-entry' % sbit)
+            if targetbitdepth == meta['bitdepth']:
+                targetbitdepth = None
+        if targetbitdepth:
+            shift = meta['bitdepth'] - targetbitdepth
+            meta['bitdepth'] = targetbitdepth
+            def itershift(pixels):
+                for row in pixels:
+                    yield map(shift.__rrshift__, row)
+            pixels = itershift(pixels)
+        return x,y,pixels,meta
+
+    def asFloat(self, maxval=1.0):
+        """Return image pixels as per :meth:`asDirect` method, but scale
+        all pixel values to be floating point values between 0.0 and
+        *maxval*.
+        """
+
+        x,y,pixels,info = self.asDirect()
+        sourcemaxval = 2**info['bitdepth']-1
+        del info['bitdepth']
+        info['maxval'] = float(maxval)
+        factor = float(maxval)/float(sourcemaxval)
+        def iterfloat():
+            for row in pixels:
+                yield map(factor.__mul__, row)
+        return x,y,iterfloat(),info
+
+    def _as_rescale(self, get, targetbitdepth):
+        """Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
+
+        width,height,pixels,meta = get()
+        maxval = 2**meta['bitdepth'] - 1
+        targetmaxval = 2**targetbitdepth - 1
+        factor = float(targetmaxval) / float(maxval)
+        meta['bitdepth'] = targetbitdepth
+        def iterscale():
+            for row in pixels:
+                yield map(lambda x: int(round(x*factor)), row)
+        return width, height, iterscale(), meta
+
+    def asRGB8(self):
+        """Return the image data as an RGB pixels with 8-bits per
+        sample.  This is like the :meth:`asRGB` method except that
+        this method additionally rescales the values so that they
+        are all between 0 and 255 (8-bit).  In the case where the
+        source image has a bit depth < 8 the transformation preserves
+        all the information; where the source image has bit depth
+        > 8, then rescaling to 8-bit values loses precision.  No
+        dithering is performed.  Like :meth:`asRGB`, an alpha channel
+        in the source image will raise an exception.
+
+        This function returns a 4-tuple:
+        (*width*, *height*, *pixels*, *metadata*).
+        *width*, *height*, *metadata* are as per the :meth:`read` method.
+
+        *pixels* is the pixel data in boxed row flat pixel format.
+        """
+
+        return self._as_rescale(self.asRGB, 8)
+
+    def asRGBA8(self):
+        """Return the image data as RGBA pixels with 8-bits per
+        sample.  This method is similar to :meth:`asRGB8` and
+        :meth:`asRGBA`:  The result pixels have an alpha channel, _and_
+        values are rescale to the range 0 to 255.  The alpha channel is
+        synthesized if necessary.
+        """
+
+        return self._as_rescale(self.asRGBA, 8)
+
+    def asRGB(self):
+        """Return image as RGB pixels.  Greyscales are expanded into RGB
+        triplets.  An alpha channel in the source image will raise an
+        exception.  The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha']:
+            raise Error("will not convert image with alpha channel to RGB")
+        if not meta['greyscale']:
+            return width,height,pixels,meta
+        meta['greyscale'] = False
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        def iterrgb():
+            for row in pixels:
+                a = array(typecode, [0]) * 3 * width
+                for i in range(3):
+                    a[i::3] = row
+                yield a
+        return width,height,iterrgb(),meta
+
+    def asRGBA(self):
+        """Return image as RGBA pixels.  Greyscales are expanded into
+        RGB triplets; an alpha channel is synthesized if necessary.
+        The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``, and
+        ``metadata['alpha']`` will be ``True``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha'] and not meta['greyscale']:
+            return width,height,pixels,meta
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        maxval = 2**meta['bitdepth'] - 1
+        def newarray():
+            return array(typecode, [0]) * 4 * width
+        if meta['alpha'] and meta['greyscale']:
+            # LA to RGBA
+            def convert():
+                for row in pixels:
+                    # Create a fresh target row, then copy L channel
+                    # into first three target channels, and A channel
+                    # into fourth channel.
+                    a = newarray()
+                    for i in range(3):
+                        a[i::4] = row[0::2]
+                    a[3::4] = row[1::2]
+                    yield a
+        elif meta['greyscale']:
+            # L to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    for i in range(3):
+                        a[i::4] = row
+                    a[3::4] = array(typecode, maxval) * width
+                    yield a
+        else:
+            assert not meta['alpha'] and not meta['greyscale']
+            # RGB to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    for i in range(3):
+                        a[i::4] = row[i::3]
+                    a[3::4] = array(typecode, [maxval]) * width
+                    yield a
+        meta['alpha'] = True
+        meta['greyscale'] = False
+        return width,height,convert(),meta
+
+
+# === Legacy Version Support ===
+
+# :pyver:old:  PyPNG works on Python versions 2.3 and 2.2, but not
+# without some awkward problems.  Really PyPNG works on Python 2.4 (and
+# above); it works on Pythons 2.3 and 2.2 by virtue of fixing up
+# problems here.  It's a bit ugly (which is why it's hidden down here).
+#
+# Generally the strategy is one of pretending that we're running on
+# Python 2.4 (or above), and patching up the library support on earlier
+# versions so that it looks enough like Python 2.4.  When it comes to
+# Python 2.2 there is one thing we cannot patch: extended slices
+# http://www.python.org/doc/2.3/whatsnew/section-slices.html.
+# Instead we simply declare that features that are implemented using
+# extended slices will not work on Python 2.2.
+#
+# In order to work on Python 2.3 we fix up a recurring annoyance involving
+# the array type.  In Python 2.3 an array cannot be initialised with an
+# array, and it cannot be extended with a list (or other sequence).
+# Both of those are repeated issues in the code.  Whilst I would not
+# normally tolerate this sort of behaviour, here we "shim" a replacement
+# for array into place (and hope no-ones notices).  You never read this.
+#
+# In an amusing case of warty hacks on top of warty hacks... the array
+# shimming we try and do only works on Python 2.3 and above (you can't
+# subclass array.array in Python 2.2).  So to get it working on Python
+# 2.2 we go for something much simpler and (probably) way slower.
+try:
+    array('B').extend([])
+    array('B', array('B'))
+except:
+    # Expect to get here on Python 2.3
+    try:
+        class _array_shim(array):
+            true_array = array
+            def __new__(cls, typecode, init=None):
+                super_new = super(_array_shim, cls).__new__
+                it = super_new(cls, typecode)
+                if init is None:
+                    return it
+                it.extend(init)
+                return it
+            def extend(self, extension):
+                super_extend = super(_array_shim, self).extend
+                if isinstance(extension, self.true_array):
+                    return super_extend(extension)
+                if not isinstance(extension, (list, str)):
+                    # Convert to list.  Allows iterators to work.
+                    extension = list(extension)
+                return super_extend(self.true_array(self.typecode, extension))
+        array = _array_shim
+    except:
+        # Expect to get here on Python 2.2
+        def array(typecode, init=()):
+            if type(init) == str:
+                return map(ord, init)
+            return list(init)
+
+# Further hacks to get it limping along on Python 2.2
+try:
+    enumerate
+except:
+    def enumerate(seq):
+        i=0
+        for x in seq:
+            yield i,x
+            i += 1
+
+try:
+    reversed
+except:
+    def reversed(l):
+        l = list(l)
+        l.reverse()
+        for x in l:
+            yield x
+
+try:
+    itertools
+except:
+    class _dummy_itertools:
+        pass
+    itertools = _dummy_itertools()
+    def _itertools_imap(f, seq):
+        for x in seq:
+            yield f(x)
+    itertools.imap = _itertools_imap
+    def _itertools_chain(*iterables):
+        for it in iterables:
+            for element in it:
+                yield element
+    itertools.chain = _itertools_chain
+
+
+
+# === Internal Test Support ===
+
+# This section comprises the tests that are internally validated (as
+# opposed to tests which produce output files that are externally
+# validated).  Primarily they are unittests.
+
+# Note that it is difficult to internally validate the results of
+# writing a PNG file.  The only thing we can do is read it back in
+# again, which merely checks consistency, not that the PNG file we
+# produce is valid.
+
+# Run the tests from the command line:
+# python -c 'import png;png.test()'
+
+from StringIO import StringIO
+import tempfile
+# http://www.python.org/doc/2.4.4/lib/module-unittest.html
+import unittest
+
+
+def test():
+    unittest.main(__name__)
+
+def topngbytes(name, rows, x, y, **k):
+    """Convenience function for creating a PNG file "in memory" as a
+    string.  Creates a :class:`Writer` instance using the keyword arguments,
+    then passes `rows` to its :meth:`Writer.write` method.  The resulting
+    PNG file is returned as a string.  `name` is used to identify the file for
+    debugging.
+    """
+
+    import os
+
+    print name
+    f = StringIO()
+    w = Writer(x, y, **k)
+    w.write(f, rows)
+    if os.environ.get('PYPNG_TEST_TMP'):
+        w = open(name, 'wb')
+        w.write(f.getvalue())
+        w.close()
+    return f.getvalue()
+
+def testWithIO(inp, out, f):
+    """Calls the function `f` with ``sys.stdin`` changed to `inp`
+    and ``sys.stdout`` changed to `out`.  They are restored when `f`
+    returns.  This function returns whatever `f` returns.
+    """
+    try:
+        oldin,sys.stdin = sys.stdin,inp
+        oldout,sys.stdout = sys.stdout,out
+        x = f()
+    finally:
+        sys.stdin = oldin
+        sys.stdout = oldout
+    return x
+
+class Test(unittest.TestCase):
+    # This member is used by the superclass.  If we don't define a new
+    # class here then when we use self.assertRaises() and the PyPNG code
+    # raises an assertion then we get no proper traceback.  I can't work
+    # out why, but defining a new class here means we get a proper
+    # traceback.
+    class failureException(Exception):
+        pass
+
+    def helperLN(self, n):
+        mask = (1 << n) - 1
+        # Use small chunk_limit so that multiple chunk writing is
+        # tested.  Making it a test for Issue 20.
+        w = Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99)
+        f = StringIO()
+        w.write_array(f, array('B', map(mask.__and__, range(1, 256))))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assertEqual(x, 15)
+        self.assertEqual(y, 17)
+        self.assertEqual(list(itertools.chain(*pixels)),
+                         map(mask.__and__, range(1,256)))
+    def testL8(self):
+        return self.helperLN(8)
+    def testL4(self):
+        return self.helperLN(4)
+    def testL2(self):
+        "Also tests asRGB8."
+        w = Writer(1, 4, greyscale=True, bitdepth=2)
+        f = StringIO()
+        w.write_array(f, array('B', range(4)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGB8()
+        self.assertEqual(x, 1)
+        self.assertEqual(y, 4)
+        for i,row in enumerate(pixels):
+            self.assertEqual(len(row), 3)
+            self.assertEqual(list(row), [0x55*i]*3)
+    def testP2(self):
+        "2-bit palette."
+        a = (255,255,255)
+        b = (200,120,120)
+        c = (50,99,50)
+        w = Writer(1, 4, bitdepth=2, palette=[a,b,c])
+        f = StringIO()
+        w.write_array(f, array('B', (0,1,1,2)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGB8()
+        self.assertEqual(x, 1)
+        self.assertEqual(y, 4)
+        self.assertEqual(list(pixels), map(list, [a, b, b, c]))
+    def testPtrns(self):
+        "Test colour type 3 and tRNS chunk (and 4-bit palette)."
+        a = (50,99,50,50)
+        b = (200,120,120,80)
+        c = (255,255,255)
+        d = (200,120,120)
+        e = (50,99,50)
+        w = Writer(3, 3, bitdepth=4, palette=[a,b,c,d,e])
+        f = StringIO()
+        w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1)))
+        r = Reader(bytes=f.getvalue())
+        x,y,pixels,meta = r.asRGBA8()
+        self.assertEquals(x, 3)
+        self.assertEquals(y, 3)
+        c = c+(255,)
+        d = d+(255,)
+        e = e+(255,)
+        boxed = [(e,d,c),(d,c,a),(c,a,b)]
+        flat = map(lambda row: itertools.chain(*row), boxed)
+        self.assertEqual(map(list, pixels), map(list, flat))
+    def testRGBtoRGBA(self):
+        "asRGBA8() on colour type 2 source."""
+        # Test for Issue 26
+        r = Reader(bytes=_pngsuite['basn2c08'])
+        x,y,pixels,meta = r.asRGBA8()
+        # Test the pixels at row 9 columns 0 and 1.
+        row9 = list(pixels)[9]
+        self.assertEqual(row9[0:8],
+                         [0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff])
+    def testCtrns(self):
+        "Test colour type 2 and tRNS chunk."
+        # Test for Issue 25
+        r = Reader(bytes=_pngsuite['tbrn2c08'])
+        x,y,pixels,meta = r.asRGBA8()
+        # I just happen to know that the first pixel is transparent.
+        # In particular it should be #7f7f7f00
+        row0 = list(pixels)[0]
+        self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00))
+    def testAdam7read(self):
+        """Adam7 interlace reading.
+        Specifically, test that for images in the PngSuite that
+        have both an interlaced and straightlaced pair that both
+        images from the pair produce the same array of pixels."""
+        for candidate in _pngsuite:
+            if not candidate.startswith('basn'):
+                continue
+            candi = candidate.replace('n', 'i')
+            if candi not in _pngsuite:
+                continue
+            print 'adam7 read', candidate
+            straight = Reader(bytes=_pngsuite[candidate])
+            adam7 = Reader(bytes=_pngsuite[candi])
+            # Just compare the pixels.  Ignore x,y (because they're
+            # likely to be correct?); metadata is ignored because the
+            # "interlace" member differs.  Lame.
+            straight = straight.read()[2]
+            adam7 = adam7.read()[2]
+            self.assertEqual(map(list, straight), map(list, adam7))
+    def testAdam7write(self):
+        """Adam7 interlace writing.
+        For each test image in the PngSuite, write an interlaced
+        and a straightlaced version.  Decode both, and compare results.
+        """
+        # Not such a great test, because the only way we can check what
+        # we have written is to read it back again.
+
+        for name,bytes in _pngsuite.items():
+            # Only certain colour types supported for this test.
+            if name[3:5] not in ['n0', 'n2', 'n4', 'n6']:
+                continue
+            it = Reader(bytes=bytes)
+            x,y,pixels,meta = it.read()
+            pngi = topngbytes('adam7wn'+name+'.png', pixels,
+              x=x, y=y, bitdepth=it.bitdepth,
+              greyscale=it.greyscale, alpha=it.alpha,
+              transparent=it.transparent,
+              interlace=False)
+            x,y,ps,meta = Reader(bytes=pngi).read()
+            it = Reader(bytes=bytes)
+            x,y,pixels,meta = it.read()
+            pngs = topngbytes('adam7wi'+name+'.png', pixels,
+              x=x, y=y, bitdepth=it.bitdepth,
+              greyscale=it.greyscale, alpha=it.alpha,
+              transparent=it.transparent,
+              interlace=True)
+            x,y,pi,meta = Reader(bytes=pngs).read()
+            self.assertEqual(map(list, ps), map(list, pi))
+    def testPGMin(self):
+        """Test that the command line tool can read PGM files."""
+        def do():
+            return _main(['testPGMin'])
+        s = StringIO()
+        s.write('P5 2 2 3\n')
+        s.write('\x00\x01\x02\x03')
+        s.flush()
+        s.seek(0)
+        o = StringIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assert_(r.greyscale)
+        self.assertEqual(r.bitdepth, 2)
+    def testPAMin(self):
+        """Test that the command line tool can read PAM file."""
+        def do():
+            return _main(['testPAMin'])
+        s = StringIO()
+        s.write('P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n'
+                'TUPLTYPE RGB_ALPHA\nENDHDR\n')
+        # The pixels in flat row flat pixel format
+        flat =  [255,0,0,255, 0,255,0,120, 0,0,255,30]
+        s.write(''.join(map(chr, flat)))
+        s.flush()
+        s.seek(0)
+        o = StringIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.read()
+        self.assert_(r.alpha)
+        self.assert_(not r.greyscale)
+        self.assertEqual(list(itertools.chain(*pixels)), flat)
+    def testLA4(self):
+        """Create an LA image with bitdepth 4."""
+        bytes = topngbytes('la4.png', [[5, 12]], 1, 1,
+          greyscale=True, alpha=True, bitdepth=4)
+        sbit = Reader(bytes=bytes).chunk('sBIT')[1]
+        self.assertEqual(sbit, '\x04\x04')
+    def testPNMsbit(self):
+        """Test that PNM files can generates sBIT chunk."""
+        def do():
+            return _main(['testPNMsbit'])
+        s = StringIO()
+        s.write('P6 8 1 1\n')
+        for pixel in range(8):
+            s.write(struct.pack('<I', (0x4081*pixel)&0x10101)[:3])
+        s.flush()
+        s.seek(0)
+        o = StringIO()
+        testWithIO(s, o, do)
+        r = Reader(bytes=o.getvalue())
+        sbit = r.chunk('sBIT')[1]
+        self.assertEqual(sbit, '\x01\x01\x01')
+    def testLtrns0(self):
+        """Create greyscale image with tRNS chunk."""
+        return self.helperLtrns(0)
+    def testLtrns1(self):
+        """Using 1-tuple for transparent arg."""
+        return self.helperLtrns((0,))
+    def helperLtrns(self, transparent):
+        """Helper used by :meth:`testLtrns*`."""
+        pixels = zip(map(ord, '00384c545c403800'.decode('hex')))
+        o = StringIO()
+        w = Writer(8, 8, greyscale=True, bitdepth=1, transparent=transparent)
+        w.write_packed(o, pixels)
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,meta = r.asDirect()
+        self.assert_(meta['alpha'])
+        self.assert_(meta['greyscale'])
+        self.assertEqual(meta['bitdepth'], 1)
+    def testWinfo(self):
+        """Test the dictionary returned by a `read` method can be used
+        as args for :meth:`Writer`.
+        """
+        r = Reader(bytes=_pngsuite['basn2c16'])
+        info = r.read()[3]
+        w = Writer(**info)
+    def testPackedIter(self):
+        """Test iterator for row when using write_packed.
+
+        Indicative for Issue 47.
+        """
+        w = Writer(16, 2, greyscale=True, alpha=False, bitdepth=1)
+        o = StringIO()
+        w.write_packed(o, [itertools.chain([0x0a], [0xaa]),
+                           itertools.chain([0x0f], [0xff])])
+        r = Reader(bytes=o.getvalue())
+        x,y,pixels,info = r.asDirect()
+        pixels = list(pixels)
+        self.assertEqual(len(pixels), 2)
+        self.assertEqual(len(pixels[0]), 16)
+    def testInterlacedArray(self):
+        """Test that reading an interlaced PNG yields each row as an
+        array."""
+        r = Reader(bytes=_pngsuite['basi0g08'])
+        list(r.read()[2])[0].tostring
+    def testTrnsArray(self):
+        """Test that reading a type 2 PNG with tRNS chunk yields each
+        row as an array (using asDirect)."""
+        r = Reader(bytes=_pngsuite['tbrn2c08'])
+        list(r.asDirect()[2])[0].tostring
+
+    # Invalid file format tests.  These construct various badly
+    # formatted PNG files, then feed them into a Reader.  When
+    # everything is working properly, we should get FormatError
+    # exceptions raised.
+    def testEmpty(self):
+        """Test empty file."""
+
+        r = Reader(bytes='')
+        self.assertRaises(FormatError, r.asDirect)
+    def testSigOnly(self):
+        """Test file containing just signature bytes."""
+
+        r = Reader(bytes=_signature)
+        self.assertRaises(FormatError, r.asDirect)
+    def testExtraPixels(self):
+        """Test file that contains too many pixels."""
+
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            data = chunk[1].decode('zip')
+            data += '\x00garbage'
+            data = data.encode('zip')
+            chunk = (chunk[0], data)
+            return chunk
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+    def testNotEnoughPixels(self):
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            # Remove last byte.
+            data = chunk[1].decode('zip')
+            data = data[:-1]
+            data = data.encode('zip')
+            return (chunk[0], data)
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+    def helperFormat(self, f):
+        r = Reader(bytes=_pngsuite['basn0g01'])
+        o = StringIO()
+        def newchunks():
+            for chunk in r.chunks():
+                yield f(chunk)
+        write_chunks(o, newchunks())
+        r = Reader(bytes=o.getvalue())
+        return list(r.asDirect()[2])
+    def testBadFilter(self):
+        def eachchunk(chunk):
+            if chunk[0] != 'IDAT':
+                return chunk
+            data = chunk[1].decode('zip')
+            # Corrupt the first filter byte
+            data = '\x99' + data[1:]
+            data = data.encode('zip')
+            return (chunk[0], data)
+        self.assertRaises(FormatError, self.helperFormat, eachchunk)
+    def testFlat(self):
+        """Test read_flat."""
+        import hashlib
+
+        r = Reader(bytes=_pngsuite['basn0g02'])
+        x,y,pixel,meta = r.read_flat()
+        d = hashlib.md5(''.join(map(chr, pixel))).digest()
+        self.assertEqual(d.encode('hex'), '255cd971ab8cd9e7275ff906e5041aa0')
+
+    # numpy dependent tests.  These are skipped (with a message to
+    # sys.stderr) if numpy cannot be imported.
+    def testNumpyuint16(self):
+        """numpy uint16."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.uint16, range(0,0x10000,0x5555))]
+        b = topngbytes('numpyuint16.png', rows, 4, 1,
+            greyscale=True, alpha=False, bitdepth=16)
+    def testNumpyuint8(self):
+        """numpy uint8."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.uint8, range(0,0x100,0x55))]
+        b = topngbytes('numpyuint8.png', rows, 4, 1,
+            greyscale=True, alpha=False, bitdepth=8)
+    def testNumpybool(self):
+        """numpy bool."""
+
+        try:
+            import numpy
+        except ImportError:
+            print >>sys.stderr, "skipping numpy test"
+            return
+
+        rows = [map(numpy.bool, [0,1])]
+        b = topngbytes('numpybool.png', rows, 2, 1,
+            greyscale=True, alpha=False, bitdepth=1)
+
+
+# === Command Line Support ===
+
+def _dehex(s):
+    """Liberally convert from hex string to binary string."""
+    import re
+
+    # Remove all non-hexadecimal digits
+    s = re.sub(r'[^a-fA-F\d]', '', s)
+    return s.decode('hex')
+
+# Copies of PngSuite test files taken
+# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
+# on 2009-02-19 by drj and converted to hex.
+# Some of these are not actually in PngSuite (but maybe they should
+# be?), they use the same naming scheme, but start with a capital
+# letter.
+_pngsuite = {
+  'basi0g01': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
+cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
+300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
+65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
+d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
+ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
+a18c7ffd0094e3511d661822f20000000049454e44ae426082
+"""),
+  'basi0g02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
+1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
+00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
+90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
+0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
+"""),
+  'basi0g04': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
+bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
+301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
+03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
+3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
+43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
+a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
+13811f2781eba9d34d07160000000049454e44ae426082
+"""),
+  'basi0g08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
+be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
+3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
+94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
+999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
+70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
+f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
+769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
+"""),
+  'basi0g16': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
+fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
+301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
+23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
+4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
+c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
+272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
+5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
+b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
+00000049454e44ae426082
+"""),
+  'basi2c08': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
+350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
+210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
+7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
+a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
+6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
+4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
+63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
+26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
+db803146578337df4d0a3121fc3d330000000049454e44ae426082
+"""),
+  'basi2c16': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
+760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
+3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
+b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
+d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
+b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
+c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
+5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
+6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
+d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
+d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
+f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
+480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
+a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
+bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
+df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
+c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
+cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
+14deb3df1344f70000000049454e44ae426082
+"""),
+  'basi3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
+500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
+ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
+ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
+00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
+6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
+8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
+ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
+4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
+ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
+ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
+770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
+005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
+88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
+eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
+63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
+00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
+00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
+999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
+aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
+dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
+ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
+110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
+ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
+004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
+661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
+a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
+d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
+16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
+9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
+e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
+d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
+33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
+0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
+b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
+8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
+3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
+c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
+cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
+a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
+d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
+0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
+e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
+0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
+070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
+6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
+f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
+02f80f73fefe1072afc1e50000000049454e44ae426082
+"""),
+  'basi6a08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
+620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
+3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
+949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
+9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
+b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
+6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
+909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
+622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
+0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
+200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
+0049454e44ae426082
+"""),
+  'basn0g01': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
+590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
+300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
+a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
+6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
+ae426082
+"""),
+  'basn0g02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
+890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
+1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
+49454e44ae426082
+"""),
+  # A version of basn0g04 dithered down to 3 bits.
+  'Basn0g03': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
+c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
+44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
+83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
+63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
+952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
+a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
+a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
+b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
+426082
+"""),
+  'basn0g04': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+290000000467414d41000186a031e8965f0000004849444154789c6360601014
+545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
+912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
+01eaff41fa0000000049454e44ae426082
+"""),
+  'basn0g08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
+280000000467414d41000186a031e8965f0000004149444154789c6364602400
+1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
+8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
+000049454e44ae426082
+"""),
+  'basn0g16': _dehex("""
+89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
+6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
+300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
+3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
+d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
+454e44ae426082
+"""),
+  'basn2c08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
+a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
+300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
+7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
+5e0f40cf4b0000000049454e44ae426082
+"""),
+  'basn2c16': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
+e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
+301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
+1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
+f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
+35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
+8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
+fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
+b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
+c4ec0000000049454e44ae426082
+"""),
+  'basn6a08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
+f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
+300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
+305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
+1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
+011bf00052201a9c160fb84c0000000049454e44ae426082
+"""),
+  'cs3n3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f0000000373424954030303a392a042
+00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
+b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
+b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
+0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
+184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
+6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
+426082
+"""),
+  's09n3p02': _dehex("""
+89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
+830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
+0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
+789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
+437f230000000049454e44ae426082
+"""),
+  'tbgn3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
+abab110000222200737300999999510d00444400959500959595e6e600919191
+8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
+006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
+515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
+009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
+002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
+1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
+4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
+00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
+0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
+bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
+005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
+00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
+1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
+113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
+160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
+6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
+01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
+4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
+6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
+06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
+ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
+1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
+866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
+412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
+92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
+63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
+084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
+2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
+b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
+e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
+3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
+9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
+49454e44ae426082
+"""),
+  'Tp2n3p08': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
+c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
+7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
+0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
+06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
+0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
+ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
+7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
+7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
+0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
+0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
+ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
+02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
+7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
+ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
+80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
+7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
+ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
+ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
+ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
+ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
+04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
+0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
+000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
+04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
+06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
+f874524e53000000000000000008080808080808081010101010101010181818
+1818181818202020202020202029292929292929293131313131313131393939
+393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
+5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
+7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
+9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
+bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
+dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
+ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
+63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
+b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
+c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
+8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
+119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
+e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
+e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
+c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
+bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
+44ae426082
+"""),
+  'tbbn1g04': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
+290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
+000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
+efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
+e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
+bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
+9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
+0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
+b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
+46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
+1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
+1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
+7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
+426082
+"""),
+  'tbrn2c08': _dehex("""
+89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
+a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
+33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
+965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
+56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
+f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
+f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
+a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
+f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
+8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
+b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
+dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
+31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
+b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
+74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
+e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
+636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
+7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
+3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
+8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
+36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
+8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
+87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
+ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
+a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
+5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
+783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
+34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
+118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
+5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
+08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
+18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
+73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
+6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
+43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
+c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
+288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
+680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
+8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
+84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
+1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
+9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
+426082
+"""),
+  'basn6a16': _dehex("""
+89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
+b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
+d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
+10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
+728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
+c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
+0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
+c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
+744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
+d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
+2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
+13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
+9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
+1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
+b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
+155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
+cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
+d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
+5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
+589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
+153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
+f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
+0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
+e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
+9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
+2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
+2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
+61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
+d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
+39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
+c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
+f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
+c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
+d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
+6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
+00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
+76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
+7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
+004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
+3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
+13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
+040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
+99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
+bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
+bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
+bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
+aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
+5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
+a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
+b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
+949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
+fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
+de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
+3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
+ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
+805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
+c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
+e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
+70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
+ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
+8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
+78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
+a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
+1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
+c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
+7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
+8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
+dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
+3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
+6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
+c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
+fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
+7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
+7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
+c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
+bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
+8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
+7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
+5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
+4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
+1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
+a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
+839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
+7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
+c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
+ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
+7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
+901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
+7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
+c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
+64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
+50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
+33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
+1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
+90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
+5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
+d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
+c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
+7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
+05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
+3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
+550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
+acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
+25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
+41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
+09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
+9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
+00000049454e44ae426082
+"""),
+}
+
+def test_suite(options, args):
+    """
+    Create a PNG test image and write the file to stdout.
+    """
+
+    # Below is a big stack of test image generators.
+    # They're all really tiny, so PEP 8 rules are suspended.
+
+    def test_gradient_horizontal_lr(x, y): return x
+    def test_gradient_horizontal_rl(x, y): return 1-x
+    def test_gradient_vertical_tb(x, y): return y
+    def test_gradient_vertical_bt(x, y): return 1-y
+    def test_radial_tl(x, y): return max(1-math.sqrt(x*x+y*y), 0.0)
+    def test_radial_center(x, y): return test_radial_tl(x-0.5, y-0.5)
+    def test_radial_tr(x, y): return test_radial_tl(1-x, y)
+    def test_radial_bl(x, y): return test_radial_tl(x, 1-y)
+    def test_radial_br(x, y): return test_radial_tl(1-x, 1-y)
+    def test_stripe(x, n): return float(int(x*n) & 1)
+    def test_stripe_h_2(x, y): return test_stripe(x, 2)
+    def test_stripe_h_4(x, y): return test_stripe(x, 4)
+    def test_stripe_h_10(x, y): return test_stripe(x, 10)
+    def test_stripe_v_2(x, y): return test_stripe(y, 2)
+    def test_stripe_v_4(x, y): return test_stripe(y, 4)
+    def test_stripe_v_10(x, y): return test_stripe(y, 10)
+    def test_stripe_lr_10(x, y): return test_stripe(x+y, 10)
+    def test_stripe_rl_10(x, y): return test_stripe(1+x-y, 10)
+    def test_checker(x, y, n): return float((int(x*n) & 1) ^ (int(y*n) & 1))
+    def test_checker_8(x, y): return test_checker(x, y, 8)
+    def test_checker_15(x, y): return test_checker(x, y, 15)
+    def test_zero(x, y): return 0
+    def test_one(x, y): return 1
+
+    test_patterns = {
+        'GLR': test_gradient_horizontal_lr,
+        'GRL': test_gradient_horizontal_rl,
+        'GTB': test_gradient_vertical_tb,
+        'GBT': test_gradient_vertical_bt,
+        'RTL': test_radial_tl,
+        'RTR': test_radial_tr,
+        'RBL': test_radial_bl,
+        'RBR': test_radial_br,
+        'RCTR': test_radial_center,
+        'HS2': test_stripe_h_2,
+        'HS4': test_stripe_h_4,
+        'HS10': test_stripe_h_10,
+        'VS2': test_stripe_v_2,
+        'VS4': test_stripe_v_4,
+        'VS10': test_stripe_v_10,
+        'LRS': test_stripe_lr_10,
+        'RLS': test_stripe_rl_10,
+        'CK8': test_checker_8,
+        'CK15': test_checker_15,
+        'ZERO': test_zero,
+        'ONE': test_one,
+        }
+
+    def test_pattern(width, height, bitdepth, pattern):
+        """Create a single plane (monochrome) test pattern.  Returns a
+        flat row flat pixel array.
+        """
+
+        maxval = 2**bitdepth-1
+        if maxval > 255:
+            a = array('H')
+        else:
+            a = array('B')
+        fw = float(width)
+        fh = float(height)
+        pfun = test_patterns[pattern]
+        for y in range(height):
+            fy = float(y)/fh
+            for x in range(width):
+                a.append(int(round(pfun(float(x)/fw, fy) * maxval)))
+        return a
+
+    def test_rgba(size=256, bitdepth=8,
+                    red="GTB", green="GLR", blue="RTL", alpha=None):
+        """
+        Create a test image.  Each channel is generated from the
+        specified pattern; any channel apart from red can be set to
+        None, which will cause it not to be in the image.  It
+        is possible to create all PNG channel types (L, RGB, LA, RGBA),
+        as well as non PNG channel types (RGA, and so on).
+        """
+
+        i = test_pattern(size, size, bitdepth, red)
+        psize = 1
+        for channel in (green, blue, alpha):
+            if channel:
+                c = test_pattern(size, size, bitdepth, channel)
+                i = interleave_planes(i, c, psize, 1)
+                psize += 1
+        return i
+
+    def pngsuite_image(name):
+        """
+        Create a test image by reading an internal copy of the files
+        from the PngSuite.  Returned in flat row flat pixel format.
+        """
+
+        if name not in _pngsuite:
+            raise NotImplementedError("cannot find PngSuite file %s (use -L for a list)" % name)
+        r = Reader(bytes=_pngsuite[name])
+        w,h,pixels,meta = r.asDirect()
+        assert w == h
+        # LAn for n < 8 is a special case for which we need to rescale
+        # the data.
+        if meta['greyscale'] and meta['alpha'] and meta['bitdepth'] < 8:
+            factor = 255 // (2**meta['bitdepth']-1)
+            def rescale(data):
+                for row in data:
+                    yield map(factor.__mul__, row)
+            pixels = rescale(pixels)
+            meta['bitdepth'] = 8
+        arraycode = 'BH'[meta['bitdepth']>8]
+        return w, array(arraycode, itertools.chain(*pixels)), meta
+
+    # The body of test_suite()
+    size = 256
+    if options.test_size:
+        size = options.test_size
+    options.bitdepth = options.test_depth
+    options.greyscale=bool(options.test_black)
+
+    kwargs = {}
+    if options.test_red:
+        kwargs["red"] = options.test_red
+    if options.test_green:
+        kwargs["green"] = options.test_green
+    if options.test_blue:
+        kwargs["blue"] = options.test_blue
+    if options.test_alpha:
+        kwargs["alpha"] = options.test_alpha
+    if options.greyscale:
+        if options.test_red or options.test_green or options.test_blue:
+            raise ValueError("cannot specify colours (R, G, B) when greyscale image (black channel, K) is specified")
+        kwargs["red"] = options.test_black
+        kwargs["green"] = None
+        kwargs["blue"] = None
+    options.alpha = bool(options.test_alpha)
+    if not args:
+        pixels = test_rgba(size, options.bitdepth, **kwargs)
+    else:
+        size,pixels,meta = pngsuite_image(args[0])
+        for k in ['bitdepth', 'alpha', 'greyscale']:
+            setattr(options, k, meta[k])
+
+    writer = Writer(size, size,
+                    bitdepth=options.bitdepth,
+                    transparent=options.transparent,
+                    background=options.background,
+                    gamma=options.gamma,
+                    greyscale=options.greyscale,
+                    alpha=options.alpha,
+                    compression=options.compression,
+                    interlace=options.interlace)
+    writer.write_array(sys.stdout, pixels)
+
+def read_pam_header(infile):
+    """
+    Read (the rest of a) PAM header.  `infile` should be positioned
+    immediately after the initial 'P7' line (at the beginning of the
+    second line).  Returns are as for `read_pnm_header`.
+    """
+
+    # Unlike PBM, PGM, and PPM, we can read the header a line at a time.
+    header = dict()
+    while True:
+        l = infile.readline().strip()
+        if l == 'ENDHDR':
+            break
+        if l == '':
+            raise EOFError('PAM ended prematurely')
+        if l[0] == '#':
+            continue
+        l = l.split(None, 1)
+        if l[0] not in header:
+            header[l[0]] = l[1]
+        else:
+            header[l[0]] += ' ' + l[1]
+
+    if ('WIDTH' not in header or
+        'HEIGHT' not in header or
+        'DEPTH' not in header or
+        'MAXVAL' not in header):
+        raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
+    width = int(header['WIDTH'])
+    height = int(header['HEIGHT'])
+    depth = int(header['DEPTH'])
+    maxval = int(header['MAXVAL'])
+    if (width <= 0 or
+        height <= 0 or
+        depth <= 0 or
+        maxval <= 0):
+        raise Error(
+          'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
+    return 'P7', width, height, depth, maxval
+
+def read_pnm_header(infile, supported=('P5','P6')):
+    """
+    Read a PNM header, returning (format,width,height,depth,maxval).
+    `width` and `height` are in pixels.  `depth` is the number of
+    channels in the image; for PBM and PGM it is synthesized as 1, for
+    PPM as 3; for PAM images it is read from the header.  `maxval` is
+    synthesized (as 1) for PBM images.
+    """
+
+    # Generally, see http://netpbm.sourceforge.net/doc/ppm.html
+    # and http://netpbm.sourceforge.net/doc/pam.html
+
+    # Technically 'P7' must be followed by a newline, so by using
+    # rstrip() we are being liberal in what we accept.  I think this
+    # is acceptable.
+    type = infile.read(3).rstrip()
+    if type not in supported:
+        raise NotImplementedError('file format %s not supported' % type)
+    if type == 'P7':
+        # PAM header parsing is completely different.
+        return read_pam_header(infile)
+    # Expected number of tokens in header (3 for P4, 4 for P6)
+    expected = 4
+    pbm = ('P1', 'P4')
+    if type in pbm:
+        expected = 3
+    header = [type]
+
+    # We have to read the rest of the header byte by byte because the
+    # final whitespace character (immediately following the MAXVAL in
+    # the case of P6) may not be a newline.  Of course all PNM files in
+    # the wild use a newline at this point, so it's tempting to use
+    # readline; but it would be wrong.
+    def getc():
+        c = infile.read(1)
+        if c == '':
+            raise Error('premature EOF reading PNM header')
+        return c
+
+    c = getc()
+    while True:
+        # Skip whitespace that precedes a token.
+        while c.isspace():
+            c = getc()
+        # Skip comments.
+        while c == '#':
+            while c not in '\n\r':
+                c = getc()
+        if not c.isdigit():
+            raise Error('unexpected character %s found in header' % c)
+        # According to the specification it is legal to have comments
+        # that appear in the middle of a token.
+        # This is bonkers; I've never seen it; and it's a bit awkward to
+        # code good lexers in Python (no goto).  So we break on such
+        # cases.
+        token = ''
+        while c.isdigit():
+            token += c
+            c = getc()
+        # Slight hack.  All "tokens" are decimal integers, so convert
+        # them here.
+        header.append(int(token))
+        if len(header) == expected:
+            break
+    # Skip comments (again)
+    while c == '#':
+        while c not in '\n\r':
+            c = getc()
+    if not c.isspace():
+        raise Error('expected header to end with whitespace, not %s' % c)
+
+    if type in pbm:
+        # synthesize a MAXVAL
+        header.append(1)
+    depth = (1,3)[type == 'P6']
+    return header[0], header[1], header[2], depth, header[3]
+
+def write_pnm(file, width, height, pixels, meta):
+    """Write a Netpbm PNM/PAM file."""
+
+    bitdepth = meta['bitdepth']
+    maxval = 2**bitdepth - 1
+    # Rudely, the number of image planes can be used to determine
+    # whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
+    planes = meta['planes']
+    # Can be an assert as long as we assume that pixels and meta came
+    # from a PNG file.
+    assert planes in (1,2,3,4)
+    if planes in (1,3):
+        if 1 == planes:
+            # PGM
+            # Could generate PBM if maxval is 1, but we don't (for one
+            # thing, we'd have to convert the data, not just blat it
+            # out).
+            fmt = 'P5'
+        else:
+            # PPM
+            fmt = 'P6'
+        file.write('%s %d %d %d\n' % (fmt, width, height, maxval))
+    if planes in (2,4):
+        # PAM
+        # See http://netpbm.sourceforge.net/doc/pam.html
+        if 2 == planes:
+            tupltype = 'GRAYSCALE_ALPHA'
+        else:
+            tupltype = 'RGB_ALPHA'
+        file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
+                   'TUPLTYPE %s\nENDHDR\n' %
+                   (width, height, planes, maxval, tupltype))
+    # Values per row
+    vpr = planes * width
+    # struct format
+    fmt = '>%d' % vpr
+    if maxval > 0xff:
+        fmt = fmt + 'H'
+    else:
+        fmt = fmt + 'B'
+    for row in pixels:
+        file.write(struct.pack(fmt, *row))
+    file.flush()
+
+def color_triple(color):
+    """
+    Convert a command line colour value to a RGB triple of integers.
+    FIXME: Somewhere we need support for greyscale backgrounds etc.
+    """
+    if color.startswith('#') and len(color) == 4:
+        return (int(color[1], 16),
+                int(color[2], 16),
+                int(color[3], 16))
+    if color.startswith('#') and len(color) == 7:
+        return (int(color[1:3], 16),
+                int(color[3:5], 16),
+                int(color[5:7], 16))
+    elif color.startswith('#') and len(color) == 13:
+        return (int(color[1:5], 16),
+                int(color[5:9], 16),
+                int(color[9:13], 16))
+
+
+def _main(argv):
+    """
+    Run the PNG encoder with options from the command line.
+    """
+
+    # Parse command line arguments
+    from optparse import OptionParser
+    import re
+    version = '%prog ' + re.sub(r'( ?\$|URL: |Rev:)', '', __version__)
+    parser = OptionParser(version=version)
+    parser.set_usage("%prog [options] [imagefile]")
+    parser.add_option('-r', '--read-png', default=False,
+                      action='store_true',
+                      help='Read PNG, write PNM')
+    parser.add_option("-i", "--interlace",
+                      default=False, action="store_true",
+                      help="create an interlaced PNG file (Adam7)")
+    parser.add_option("-t", "--transparent",
+                      action="store", type="string", metavar="color",
+                      help="mark the specified colour (#RRGGBB) as transparent")
+    parser.add_option("-b", "--background",
+                      action="store", type="string", metavar="color",
+                      help="save the specified background colour")
+    parser.add_option("-a", "--alpha",
+                      action="store", type="string", metavar="pgmfile",
+                      help="alpha channel transparency (RGBA)")
+    parser.add_option("-g", "--gamma",
+                      action="store", type="float", metavar="value",
+                      help="save the specified gamma value")
+    parser.add_option("-c", "--compression",
+                      action="store", type="int", metavar="level",
+                      help="zlib compression level (0-9)")
+    parser.add_option("-T", "--test",
+                      default=False, action="store_true",
+                      help="create a test image (a named PngSuite image if an argument is supplied)")
+    parser.add_option('-L', '--list',
+                      default=False, action='store_true',
+                      help="print list of named test images")
+    parser.add_option("-R", "--test-red",
+                      action="store", type="string", metavar="pattern",
+                      help="test pattern for the red image layer")
+    parser.add_option("-G", "--test-green",
+                      action="store", type="string", metavar="pattern",
+                      help="test pattern for the green image layer")
+    parser.add_option("-B", "--test-blue",
+                      action="store", type="string", metavar="pattern",
+                      help="test pattern for the blue image layer")
+    parser.add_option("-A", "--test-alpha",
+                      action="store", type="string", metavar="pattern",
+                      help="test pattern for the alpha image layer")
+    parser.add_option("-K", "--test-black",
+                      action="store", type="string", metavar="pattern",
+                      help="test pattern for greyscale image")
+    parser.add_option("-d", "--test-depth",
+                      default=8, action="store", type="int",
+                      metavar='NBITS',
+                      help="create test PNGs that are NBITS bits per channel")
+    parser.add_option("-S", "--test-size",
+                      action="store", type="int", metavar="size",
+                      help="width and height of the test image")
+    (options, args) = parser.parse_args(args=argv[1:])
+
+    # Convert options
+    if options.transparent is not None:
+        options.transparent = color_triple(options.transparent)
+    if options.background is not None:
+        options.background = color_triple(options.background)
+
+    if options.list:
+        names = list(_pngsuite)
+        names.sort()
+        for name in names:
+            print name
+        return
+
+    # Run regression tests
+    if options.test:
+        return test_suite(options, args)
+
+    # Prepare input and output files
+    if len(args) == 0:
+        infilename = '-'
+        infile = sys.stdin
+    elif len(args) == 1:
+        infilename = args[0]
+        infile = open(infilename, 'rb')
+    else:
+        parser.error("more than one input file")
+    outfile = sys.stdout
+
+    if options.read_png:
+        # Encode PNG to PPM
+        png = Reader(file=infile)
+        width,height,pixels,meta = png.asDirect()
+        write_pnm(outfile, width, height, pixels, meta)
+    else:
+        # Encode PNM to PNG
+        format, width, height, depth, maxval = \
+          read_pnm_header(infile, ('P5','P6','P7'))
+        # When it comes to the variety of input formats, we do something
+        # rather rude.  Observe that L, LA, RGB, RGBA are the 4 colour
+        # types supported by PNG and that they correspond to 1, 2, 3, 4
+        # channels respectively.  So we use the number of channels in
+        # the source image to determine which one we have.  We do not
+        # care about TUPLTYPE.
+        greyscale = depth <= 2
+        pamalpha = depth in (2,4)
+        supported = map(lambda x: 2**x-1, range(1,17))
+        try:
+            mi = supported.index(maxval)
+        except ValueError:
+            raise NotImplementedError(
+              'your maxval (%s) not in supported list %s' %
+              (maxval, str(supported)))
+        bitdepth = mi+1
+        writer = Writer(width, height,
+                        greyscale=greyscale,
+                        bitdepth=bitdepth,
+                        interlace=options.interlace,
+                        transparent=options.transparent,
+                        background=options.background,
+                        alpha=bool(pamalpha or options.alpha),
+                        gamma=options.gamma,
+                        compression=options.compression)
+        if options.alpha:
+            pgmfile = open(options.alpha, 'rb')
+            format, awidth, aheight, adepth, amaxval = \
+              read_pnm_header(pgmfile, 'P5')
+            if amaxval != '255':
+                raise NotImplementedError(
+                  'maxval %s not supported for alpha channel' % amaxval)
+            if (awidth, aheight) != (width, height):
+                raise ValueError("alpha channel image size mismatch"
+                                 " (%s has %sx%s but %s has %sx%s)"
+                                 % (infilename, width, height,
+                                    options.alpha, awidth, aheight))
+            writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
+        else:
+            writer.convert_pnm(infile, outfile)
+
+
+if __name__ == '__main__':
+    try:
+        _main(sys.argv)
+    except Error, e:
+        print >>sys.stderr, e
diff --git a/doc/_extensions/slink.py b/doc/_extensions/slink.py
new file mode 100755
index 0000000..e8b309d
--- /dev/null
+++ b/doc/_extensions/slink.py
@@ -0,0 +1,62 @@
+# This program is in the public domain
+# Author: Paul Kienzle
+"""
+Substitution references in hyperlinks.
+
+In order to construct documents programmatically with references to
+version specific download files for example, you will need to be able
+to control the generation of the text from the configure script.
+
+For this purpose we provide the substitution link, or slink, role to
+sphinx.  Within conf.py you must define *slink_vars*, which is a
+dictionary of variables which can be used for substitution.  Within
+your RST documents, you can then use :slink:`pattern` with standard
+python 2.x string substition template rules.  The pattern is usually
+"text <url>" but "url" defaults to "url <url>" with proper html escapes.
+
+For example::
+
+    -- conf.py --
+    ...
+    extensions.append('slink')
+    slink_vars = dict(url="http://some.url.com",
+                      source="sputter-%s.zip"%version,
+                      )
+    ...
+
+    -- download.rst --
+    ...
+    Source: :slink:`latest sputter <%(url)s/downloads/%(source)s>`
+    ...
+"""
+import traceback
+from docutils import nodes, utils
+
+def setup(app):
+    def slink_role(role, rawtext, text, lineno, inliner, options={},
+                      content=[]):
+        def warn(err):
+            msg = "\n  error in %s\n  %s"%(rawtext,err)
+            inliner.reporter.warning(msg,line=lineno)
+
+        try:
+            text = text%app.config.slink_vars
+        except Exception as exc:
+            #err = traceback.format_exc(0).strip()
+            err = traceback.format_exception_only(exc.__class__, exc)[0]
+            warn(err.strip())
+        lidx,ridx = text.find('<'), text.find('>')
+        if lidx >= 0 and ridx > lidx and ridx == len(text)-1:
+            ref = text[lidx+1:ridx]
+            name = utils.unescape(text[:lidx].strip())
+        elif lidx > 0 or ridx > 0:
+            warn('Incorrect reference format in expanded link: '+text)
+            ref = ''
+            name = utils.unescape(text)
+        else:
+            ref = text
+            name = utils.unescape(ref)
+        node = nodes.reference(rawtext, name, refuri=ref, **options)
+        return [node], []
+    app.add_config_value('slink_vars', {}, False)
+    app.add_role('slink', slink_role)
diff --git a/doc/_extensions/wx_directive.py b/doc/_extensions/wx_directive.py
new file mode 100644
index 0000000..1da016a
--- /dev/null
+++ b/doc/_extensions/wx_directive.py
@@ -0,0 +1,424 @@
+"""A special directive for including wx panels.
+
+Given a path to a .py file, it includes the source code inline, and an
+image of the panel it produces.
+
+This directive supports all of the options of the `image` directive,
+except for `target` (since plot will add its own target).
+
+Additionally, if the :include-source: option is provided, the literal
+source will be included inline, as well as a link to the source.
+
+The set of file formats to generate can be specified with the
+plot_formats configuration variable.
+"""
+
+# Note: adapted from matplotlib.sphinxext.plot_directive by Paul Kienzle
+
+from six.moves import StringIO
+
+import sys, os, glob, shutil, hashlib, imp, warnings
+import re
+try:
+    from hashlib import md5
+except ImportError:
+    from md5 import md5
+from docutils.parsers.rst import directives
+try:
+    # docutils 0.4
+    from docutils.parsers.rst.directives.images import align
+except ImportError:
+    # docutils 0.5
+    from docutils.parsers.rst.directives.images import Image
+    align = Image.align
+from docutils import nodes
+import sphinx
+
+import wx
+# Matplotlib helper utilities
+import matplotlib.cbook as cbook
+import numpy as np
+
+from . import png
+
+
+sphinx_version = sphinx.__version__.split(".")
+# The split is necessary for sphinx beta versions where the string is
+# '6b1'
+sphinx_version = tuple([int(re.split('[a-z]', x)[0])
+                        for x in sphinx_version[:2]])
+
+
+if hasattr(os.path, 'relpath'):
+    relpath = os.path.relpath
+else:
+    def relpath(target, base=os.curdir):
+        """
+        Return a relative path to the target from either the current dir or an optional base dir.
+        Base can be a directory specified either as absolute or relative to current dir.
+        """
+
+        if not os.path.exists(target):
+            raise OSError('Target does not exist: '+target)
+
+        if not os.path.isdir(base):
+            raise OSError('Base is not a directory or does not exist: '+base)
+
+        base_list = (os.path.abspath(base)).split(os.sep)
+        target_list = (os.path.abspath(target)).split(os.sep)
+
+        # On the windows platform the target may be on a completely different drive from the base.
+        if os.name in ('nt','dos','os2') and base_list[0] != target_list[0]:
+            raise OSError('Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper())
+
+        # Starting from the filepath root, work out how much of the filepath is
+        # shared by base and target.
+        for i in range(min(len(base_list), len(target_list))):
+            if base_list[i] != target_list[i]: break
+        else:
+            # If we broke out of the loop, i is pointing to the first differing path elements.
+            # If we didn't break out of the loop, i is pointing to identical path elements.
+            # Increment i so that in all cases it points to the first differing path elements.
+            i+=1
+
+        rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
+        return os.path.join(*rel_list)
+
+def write_char(s):
+    sys.stdout.write(s)
+    sys.stdout.flush()
+
+options = {'alt': directives.unchanged,
+           'height': directives.length_or_unitless,
+           'width': directives.length_or_percentage_or_unitless,
+           'scale': directives.nonnegative_int,
+           'align': align,
+           'class': directives.class_option,
+           'include-source': directives.flag }
+
+template = """
+.. image:: %(prefix)s%(tmpdir)s/%(outname)s
+   %(options)s
+"""
+
+exception_template = """
+.. htmlonly::
+
+   [`source code <%(linkdir)s/%(basename)s.py>`__]
+
+Exception occurred rendering plot.
+
+"""
+
+def out_of_date(original, derived):
+    """
+    Returns True if derivative is out-of-date wrt original,
+    both of which are full file paths.
+    """
+    return (not os.path.exists(derived))
+    # or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+def runfile(fullpath):
+    """
+    Import a Python module from a path.
+    """
+    # Change the working directory to the directory of the example, so
+    # it can get at its data files, if any.
+    pwd = os.getcwd()
+    path, fname = os.path.split(fullpath)
+    sys.path.insert(0, os.path.abspath(path))
+    stdout = sys.stdout
+    sys.stdout = StringIO()
+    os.chdir(path)
+    try:
+        fd = open(fname)
+        module = imp.load_module("__main__", fd, fname, ('py', 'r', imp.PY_SOURCE))
+    finally:
+        del sys.path[0]
+        os.chdir(pwd)
+        sys.stdout = stdout
+    return module
+
+def capture_image(panel, labels):
+    # Need to be at a top level window in order to force a redraw
+    frame = panel
+    while not frame.IsTopLevel():
+        frame = frame.parent
+    frame.Show()
+    wx.Yield()
+
+    # Grab the bitmap; if it is the top level, then include WindowDC so we
+    # can grab the window decorations.  This only works on Windows!
+    if panel.IsTopLevel():
+        graphdc = wx.WindowDC(panel)
+    else:
+        graphdc = wx.ClientDC(panel)
+    w,h = graphdc.GetSize()
+    bmp = wx.EmptyBitmap(w,h)
+    memdc = wx.MemoryDC()
+    memdc.SelectObject(bmp)
+    memdc.Blit(0,0, w, h, graphdc, 0, 0)
+
+    # Add annotations using a GCDC so we get antialiased corners
+    gcdc = wx.GCDC(memdc)
+    for widget,label,position in labels:
+        annotate(gcdc, widget=widget, label=label, position=position,
+                 panelsize=(w,h))
+
+    # Release the bitmap from the DC
+    memdc.SelectObject(wx.NullBitmap)
+
+    # Copy bitmap to a numpy array
+    img = np.empty((w,h,3),'uint8')
+    bmp.CopyToBuffer(buffer(img), format=wx.BitmapBufferFormat_RGB)
+
+    # Destroy the frame
+    frame.Destroy()
+    wx.Yield()
+    return img
+
+def write_png(outpath,img):
+    w,h,p = img.shape
+    img = np.ascontiguousarray(img)
+    writer = png.Writer(size=(w,h), alpha=False, bitdepth=8, compression=9)
+    with open(outpath,'wb') as fid:
+        writer.write(fid, np.reshape(img,(h,w*p)))
+
+def annotate(dc, widget, label, position='c', panelsize=(0,0)):
+    """
+    Draws label relative to the widget on the panel.
+
+    *panel* is the panel to receive the annotation
+    *widget* is the widget or coordinates (x,y) in panel to be annotated
+    *label* is the annotation label
+    *position* is the location of the annotation, which is one of:
+        * t: above the widget
+        * b: below the widget
+        * l: left of the widget
+        * r: right of the widget
+        * c: center of the widget
+    """
+    padx, pady = 4,4        # Space around rectangle
+    bordersize = 2          # Size of border line
+    fontsize = 18           # Size of text
+    radius = (fontsize+pady+bordersize)//2   # Rounding radius on rectangle
+    marginx, marginy = 2,2  # Space be edge rectangle and edge of widget
+    foreground = 'black'    # Font and outline colour
+    background = '#C1A004C0'  # Gold fill
+
+    pen = wx.Pen(colour=foreground, width=bordersize)
+    brush = wx.Brush(colour=background)
+    font = wx.Font(pointSize=fontsize,
+                   family=wx.FONTFAMILY_SWISS,
+                   style=wx.FONTSTYLE_NORMAL,
+                   weight=wx.FONTWEIGHT_NORMAL
+                   )
+    dc.SetPen(pen)
+    dc.SetBrush(brush)
+    dc.SetFont(font)
+
+    # Determine box dimensions
+    tw,th = dc.GetTextExtent(label)
+    rw,rh = tw+2*padx,th+2*pady
+
+    # If the box is tall and thin, force it to be a circle because it looks
+    # better.  Conveniently, numbers 1-9 as annotations should all be circles.
+    # TODO: maybe draw this as a circle rather than rounded rectangle?
+    if rw < rh:
+        padx += (rh-rw)//2
+        rw = rh
+
+    # Determine anchor position on the screen, which is either the
+    # rectangle containing a specific widget, or is a pair of coordinates (x,y)
+    try:     # Is it (x,y)?
+        bx,by = widget
+        bw,bh = 0,0
+    except:  # No.  Hope it is a widget
+        bx,by = widget.GetPositionTuple()
+        bw,bh = widget.GetSizeTuple()
+
+    # Position the label relative to the anchor
+    if position == 't':
+        rx = bx + (bw-rw)//2
+        ry = by - (marginy + rh)
+    elif position == 'b':
+        rx = bx + (bw-rw)//2
+        ry = by + bh + marginy
+    elif position == 'l':
+        rx = bx - (marginx + rw)
+        ry = by + (bh-rh)//2
+    elif position == 'r':
+        rx = bx + bw + marginx
+        ry = by + (bh-rh)//2
+    elif position == 'c':
+        rx = bx + (bw-rw)//2
+        ry = by + (bh-rh)//2
+    else:
+        raise ValueError('position should be t, l, b, r, or c')
+
+    # Make sure label box doesn't fall off the panel
+    #fw,fh = dc.GetSize()
+    fw,fh = panelsize # Grrr... antialiasing DC does not preserve size
+    #print "*** text",label,tw,th
+    #print " ** widget",bx,by,bw,bh
+    #print " ** rect",rx,ry,rw,rh
+    #print " ** panel",fw,fh
+    if rx+rw >= fw: rx = fw-(rw+bordersize//2 + 1)
+    if ry+rh >= fh: ry = fh-(rh+bordersize//2 + 1)
+    if rx < 0:   rx = bordersize//2
+    if ry < 0:   ry = bordersize//2
+
+    # Draw the box and the annotation label
+    dc.BeginDrawing()
+    dc.DrawRoundedRectangle(rx,ry,rw,rh,radius)
+    dc.DrawText(text=label,x=rx+padx,y=ry+pady)
+    dc.EndDrawing()
+
+def make_image(fullpath, code, outdir, context='', options={}):
+    """
+    run a script and save the PNG in _static
+    """
+
+    fullpath = str(fullpath)  # todo, why is unicode breaking this
+    basedir, fname = os.path.split(fullpath)
+    basename, ext = os.path.splitext(fname)
+
+    if str(basename) == "None":
+        import pdb
+        pdb.set_trace()
+
+    # Look for output file
+    outpath = os.path.join(outdir, basename+'.png')
+    if not out_of_date(fullpath, outpath):
+        write_char('.')
+        return 1
+
+    # We didn't find the files, so build them
+
+    if code is not None:
+        exec(code)
+    else:
+        try:
+            module = runfile(fullpath)
+            panel = module.panel
+        except:
+            warnings.warn("current path "+os.getcwd())
+            s = cbook.exception_to_str("Exception running wx %s %s" % (fullpath,context))
+            warnings.warn(s)
+            return False
+
+    try:    labels
+    except: labels = []
+    img = capture_image(panel,labels)
+    write_png(outpath, img)
+
+    return True
+
+def wx_directive(name, arguments, options, content, lineno,
+                   content_offset, block_text, state, state_machine):
+    """
+    Handle the plot directive.
+    """
+    # The user may provide a filename *or* Python code content, but not both
+    if len(arguments) == 1:
+        reference = directives.uri(arguments[0])
+        basedir, fname = os.path.split(reference)
+        basename, ext = os.path.splitext(fname)
+        basedir = relpath(basedir, setup.app.builder.srcdir)
+        if len(content):
+            raise ValueError("wx directive may not specify both a filename and inline content")
+        content = None
+    else:
+        basedir = "inline"
+        content = '\n'.join(content)
+        # Since we don't have a filename, use a hash based on the content
+        reference = basename = md5(content).hexdigest()[-10:]
+        fname = None
+
+    # Get the directory of the rst file, and determine the relative
+    # path from the resulting html file to the plot_directive links
+    # (linkdir).  This relative path is used for html links *only*,
+    # and not the embedded image.  That is given an absolute path to
+    # the temporary directory, and then sphinx moves the file to
+    # build/html/_images for us later.
+    rstdir, rstfile = os.path.split(state_machine.document.attributes['source'])
+    reldir = rstdir[len(setup.confdir)+1:]
+    relparts = [p for p in os.path.split(reldir) if p.strip()]
+    nparts = len(relparts)
+    outdir = os.path.join('wx_directive', basedir)
+    linkdir = ('../' * nparts) + outdir
+
+    context = "at %s:%d"%(rstfile,lineno)
+
+    # tmpdir is where we build all the output files.  This way the
+    # plots won't have to be redone when generating latex after html.
+
+    # Prior to Sphinx 0.6, absolute image paths were treated as
+    # relative to the root of the filesystem.  0.6 and after, they are
+    # treated as relative to the root of the documentation tree.  We need
+    # to support both methods here.
+    tmpdir = os.path.join('_build', outdir)
+    if sphinx_version < (0, 6):
+        tmpdir = os.path.abspath(tmpdir)
+        prefix = ''
+    else:
+        prefix = '/'
+    if not os.path.exists(tmpdir):
+        cbook.mkdirs(tmpdir)
+
+    # destdir is the directory within the output to store files
+    # that we'll be linking to -- not the embedded images.
+    destdir = os.path.abspath(os.path.join(setup.app.builder.outdir, outdir))
+    if not os.path.exists(destdir):
+        cbook.mkdirs(destdir)
+
+    # Generate the figures, and return the number of them
+    success = make_image(reference, content, tmpdir, context=context,
+                         options=options)
+
+    if 'include-source' in options:
+        if content is None:
+            content = open(reference, 'r').read()
+        lines = ['::', ''] + ['    %s'%row.rstrip() for row in content.split('\n')]
+        del options['include-source']
+    else:
+        lines = []
+
+    if success:
+        options = ['      :%s: %s' % (key, val) for key, val in options.items()]
+        options = "\n".join(options)
+        if fname is not None:
+            try:
+                shutil.copyfile(reference, os.path.join(destdir, fname))
+            except:
+                s = cbook.exception_to_str("Exception copying plot %s %s" % (reference,context))
+                warnings.warn(s)
+                return 0
+
+        outname = basename+'.png'
+
+        # Copy the linked-to files to the destination within the build tree,
+        # and add a link for them
+        shutil.copyfile(os.path.join(tmpdir, outname),
+                        os.path.join(destdir, outname))
+
+        # Output the resulting reST
+        lines.extend((template % locals()).split('\n'))
+    else:
+        lines.extend((exception_template % locals()).split('\n'))
+
+    if len(lines):
+        state_machine.insert_input(
+            lines, state_machine.input_lines.source(0))
+
+    return []
+
+def setup(app):
+    global _WXAPP
+    _WXAPP = wx.PySimpleApp()
+
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_directive('wx', wx_directive, True, (0, 1, 0), **options)
diff --git a/doc/_static/haiku-site.css b/doc/_static/haiku-site.css
new file mode 100644
index 0000000..d4dda02
--- /dev/null
+++ b/doc/_static/haiku-site.css
@@ -0,0 +1,20 @@
+ at import url("haiku.css");
+
+div.contents { 
+    float: right; 
+    background: Cornsilk;
+    margin: 0.5em 0.5em;
+}
+
+div.contents p.topic-title {
+    display: none;
+}
+
+div.contents ul {
+    padding: 0em;
+}
+
+div.contents ul > li {
+    background: none;
+    padding: 0 0 0 1em;
+}
diff --git a/doc/_static/logo.png b/doc/_static/logo.png
new file mode 100644
index 0000000..6f5ac77
Binary files /dev/null and b/doc/_static/logo.png differ
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..6c4af58
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,285 @@
+# -*- coding: utf-8 -*-
+#
+# BUMPS documentation documentation build configuration file, created by
+# sphinx-quickstart on Wed Oct 13 15:11:19 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+from __future__ import print_function, with_statement
+
+import sys, os
+sys.dont_write_bytecode = True
+print("python",sys.executable)
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+sys.path.insert(0, os.path.abspath('_extensions')) # for sphinx extensions
+sys.path.insert(0, os.path.abspath('.')) # for sitedoc
+
+# Add the build directory for the project; this does mean we need to build
+# before updating the documents each time, but this can be handled by the
+# makefile
+sys.path.insert(0, os.path.abspath('..'))
+print("== path ==")
+print("\n".join(sys.path))
+print("== end path ==")
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
+              'sphinx.ext.autosummary',
+              'sphinx.ext.coverage',
+              'sphinx.ext.viewcode',
+              #'sphinx.ext.pngmath',
+              #'sphinx.ext.jsmath',
+              'sphinx.ext.mathjax',
+              #'only_directives',
+              #'matplotlib.sphinxext.mathmpl',
+              'matplotlib.sphinxext.only_directives',
+              'matplotlib.sphinxext.plot_directive',
+              #'inheritance_diagram',
+              'dollarmath',
+              'slink',
+              #'wx_directive',
+              #'numpydoc.numpydoc',
+             ]
+#plot_formats = [('png', 120), ('pdf', 50)] # Only make 80 dpi plots
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'Bumps'
+copyright = '2006-2014, Public domain'
+#copyright = '2006-2011, University of Maryland'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+from bumps import __version__ as release
+# The short X.Y version.
+#version = ".".join(release.split(".")[:2])
+version = release
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_trees = ['_*','examples']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'haiku'
+#html_theme = 'default'
+html_style = 'haiku-site.css'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = 'logo.png'
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+html_show_copyright = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Bumps'
+
+
+program_title = 'Bumps: Curve Fitting and Uncertainty Analysis'
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'Bumps.tex', program_title, 'Paul Kienzle', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+LATEX_PREAMBLE=r"""
+\usepackage[utf8]{inputenc}      % Allow unicode symbols in text
+\DeclareUnicodeCharacter {00B7} {\ensuremath{\cdot}}   % cdot
+\DeclareUnicodeCharacter {00B0} {\ensuremath{^\circ}}  % degrees
+\DeclareUnicodeCharacter {212B} {\AA}                  % Angstrom
+"""
+latex_elements = {'preamble' : LATEX_PREAMBLE}
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+if os.path.exists('rst_prolog'):
+    with open('rst_prolog') as fid:
+        rst_prolog = fid.read()
+
+htmlroot="http://www.reflectometry.org/danse"
+def download(name):
+    subs = dict(file=name%dict(version=version), path=htmlroot)
+    return "%(file)s <%(path)s/download.php?file=%(file)s>"%subs
+slink_vars=dict(version=release, htmlroot=htmlroot,
+                srczip=download("bumps-%(version)s.zip"),
+                winexe=download("bumps-%(version)s-win32.exe"),
+                macapp=download("Bumps %(version)s.dmg"),
+                vcredist=download("vcredist_x86.exe"),
+                wx4osx=download("osx64/wx-2.9.5.0-py27_0.tar.bz2"),
+                )
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'bumps', program_title, ['Paul Kienzle'], 1)
+]
+
+# Generate API docs
+import genmods
+genmods.make()
+
+# Generate tutorials
+import gentut
+gentut.make()
\ No newline at end of file
diff --git a/doc/examples/constraints/model.py b/doc/examples/constraints/model.py
new file mode 100644
index 0000000..176a726
--- /dev/null
+++ b/doc/examples/constraints/model.py
@@ -0,0 +1,18 @@
+from bumps.names import *
+
+def line(x, m, b):
+    return m*x + b
+
+x = [1,2,3,4,5,6]
+y = [2.1,4.0,6.3,8.03,9.6,11.9]
+dy = [0.05,0.05,0.2,0.05,0.2,0.2]
+
+M = Curve(line,x,y,dy,m=2,b=2)
+M.m.range(0,4)
+M.b.range(-5,5)
+
+B=2
+def constraints():
+    return 0 if M.b.value>B else 1000+(M.b.value-B)**6
+
+problem = FitProblem(M,constraints=constraints)
diff --git a/doc/examples/curvefit/curve.py b/doc/examples/curvefit/curve.py
new file mode 100644
index 0000000..67f164d
--- /dev/null
+++ b/doc/examples/curvefit/curve.py
@@ -0,0 +1,88 @@
+# Fitting a curve
+# ===============
+#
+# Fitting a curve to a data set and getting uncertainties on the
+# parameters was the main reason that bumps was created, so it
+# should be very easy to do.  Let's see if it is.
+#
+# First let's import the standard names:
+
+from bumps.names import *
+
+# Next we need some data.  The x values represent the independent variable,
+# and the y values represent the value measured for condition x.  In this
+# case x is 1-D, but it could be a sequence of tuples instead.  We also
+# need the uncertainty on each measurement if we want to get a meaningful
+# uncertainty on the fitted parameters.
+
+x = [1,2,3,4,5,6]
+y = [2.1,4.0,6.3,8.03,9.6,11.9]
+dy = [0.05,0.05,0.2,0.05,0.2,0.2]
+
+# Instead of using lists we could have loaded the data from a
+# three-column text file using:
+#
+# .. parsed-literal::
+#
+#    data = np.loadtxt("data.txt").T
+#    x,y,dy = data[0,:], data[1,:], data[2,:]
+#
+# The variations are endless --- cleaning the data so that it is
+# in a fit state to model is often the hardest part in the analysis.
+
+# We now define the function we want to fit.  The first argument
+# to the function names the independent variable, and the remaining
+# arguments are the fittable parameters.  The parameter arguments can
+# use a bare name, or they can use name=value to indicate the default
+# value for each parameter.  Our function defines a straight like of
+# slope $m$ with intercept $b$ defaulting to 0.
+
+def line(x, m, b=0):
+    return m*x + b
+
+# We can build a curve fitting object from our function and our data.
+# This assumes that the measurement uncertainty is normally
+# distributed, with a 1-\ $\sigma$ confidence interval *dy* for each point.
+# We specify initial values for $m$ and $b$ when we define the
+# model, and then constrain the fit to $m \in [0,4]$ # and $b \in [-5,5]$
+# with the parameter :meth:`range <bumps.parameter.Parameter.range>` method.
+
+M = Curve(line,x,y,dy,m=2,b=2)
+M.m.range(0,4)
+M.b.range(-5,5)
+
+# Every model file ends with a problem definition including a
+# list of all models and datasets which are to be fitted.
+
+problem = FitProblem(M)
+
+# The complete model file :download:`curve.py <curve.py>` looks as follows:
+#
+# .. literalinclude:: curve.py
+#
+# We can now load and run the fit:
+#
+# .. parsed-literal::
+#
+#    $ bumps.py curve.py --fit=newton --steps=100 --store=T1
+#
+# The ``--fit=newton`` option says to use the quasi-newton optimizer for
+# not more than 100 steps.  The ``--store=T1`` option says to store the
+# initial model, the fit results and any monitoring information in the
+# directory T1.
+#
+# As the fit progresses, we are shown an iteration number and a cost
+# value.  The cost value is approximately the normalized $\chi^2_N$.
+# The value in parentheses is like the uncertainty in $\chi^2_N$, in
+# that a 1-\ $\sigma$ change in parameter values should increase
+# $\chi^2_N$ by that amount.
+#
+# Here is the resulting fit:
+#
+# .. plot::
+#
+#    from sitedoc import fit_model
+#    fit_model('curve.py')
+#
+# All is well: Normalized $\chi^2_N$ is close to 1 and the line goes nicely
+# through the data.
diff --git a/doc/examples/curvefit/poisson.py b/doc/examples/curvefit/poisson.py
new file mode 100644
index 0000000..c6c6554
--- /dev/null
+++ b/doc/examples/curvefit/poisson.py
@@ -0,0 +1,198 @@
+# Fitting Poisson data
+# ====================
+#
+# Data from poisson processes, such as the number of counts per unit time
+# or counts per unit area, do not have the same pattern of uncertainties
+# as data from gaussian processes.  Poisson data consists of natural
+# numbers occurring at some underlying rate.  The fitting process checks
+# if the number of counts observed is consistent with the proposed rate
+# for each point in the dataset, much like the fitting process for gaussian
+# data checks if the observed value is consistent with the proposed value
+# within the measurement uncertainty.
+#
+# Using :class:`bumps.curve.PoissonCurve` instead of :class:`bumps.curve.Curve`,
+# we can fit a set of *counts* at conditions *x* using a function
+# *f(x, p1, p2, ...)* to propose rates for the various *x* values given the
+# parameters, yielding parameter values *p1, p2, ...* that are most consistent
+# with the *counts* at *x*. When measuring poisson processes, the underlying
+# rate is not known, so the measurement variance, which is a property of the
+# rate, is not associated with the data but instead associated with the
+# theory function which predicts the rates.  This is opposite from what we
+# have with gaussian data, in which the uncertainty is associated with the
+# measurement device, and explains why the call to PoissonCurve only accepts
+# *x* and *counts*, not *x*, *y*, and *dy*.
+#
+# One property of the Poisson distribution is that it is well approximated
+# by a gaussian distribution for values above about 10.   It will never be
+# perfect match since numbers from a poisson distribution can never be
+# negative, whereas gaussian numbers can always be negative, albeit with
+# vanishingly small probability some of the time.  Below 10, there are
+# various ways you can approximate the poisson distribution with a gaussian.
+# This example explores some of the options.
+#
+# In particular, the handling of zero counts can be problematic when treating
+# the measurement as gaussian.  You cannot simply drop the points with zero
+# counts. Once you've done various reduction steps, the resulting non-zero
+# value for the uncertainty will carry meaning.  The longer you count,
+# the smaller the uncertainty should be, once you've normalized for counting
+# time or monitor.  Being off by a factor of 2 on the residuals is much
+# better than being off by a factor of infinity using uncertainty = zero,
+# and better than dropping the point altogether.
+#
+# There are a few things you can do with zero counts without being
+# completely arbitrary:
+#
+#   1) $\lambda = (k+1) \pm \sqrt{k+1}$ for all $k$
+#   2) $\lambda = (k+1/2) \pm \sqrt{k+1/4}$ for all k
+#   3) $\lambda = k \pm \sqrt{k+1}$ for all k
+#   4) $\lambda = k \pm \sqrt{k}$ for $k>0$, $1/2 \pm 1/2$ for $k = 0$
+#   5) $\lambda = k \pm \sqrt{k}$ for $k>0$, $0 \pm 1$ for $k = 0$
+#
+# See the notes from the CDF Statistics Committee for details at
+# `<http://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_.
+#
+# Of these, option 5 works slightly better for fitting, giving the best
+# estimate of the background.
+#
+# The ideal case is to have your model produce an expected number of counts
+# on the detector.  It is then trivial to compute the probability of
+# seeing the observed counts from the expected counts and fit the parameters
+# using PoissonCurve.  Unfortunately, this means incorporating all
+# instrumental effects when modelling the measurement rather than correcting
+# for instrumental effects in a data reduction program, and using a common
+# sample model independent of instrument.
+#
+# Setting $\lambda = k$ is good since that is the maximum likelihood value
+# for $\lambda$ given observed $k$, but this breaks down at $k=0$, giving zero
+# uncertainty regardless of how long we measured.
+#
+# Since the Poisson distribution is slightly skew, a good estimate is
+# $\lambda = k+1$ (option 1 above).  This follows from the formula for the
+# expected value of a distribution:
+#
+# .. math::
+#
+#    E[x] = \int_{-infty}^\infty x P(x) dx
+#
+# For the poisson distribution, this is:
+#
+# .. math::
+#
+#    E[\lambda] = \int_0^\infty \lambda \frac{\lambda^k e^{-\lambda}}{k!} d\lambda
+#
+# Running some simulations, we can see that $\hat\lambda=(k+1)\pm\sqrt{k+1}$
+# (see `sim.py <sim.html>`_). This is the best fit rms value to the distribution
+# of possible $\lambda$ values that could give rise to the observed $k$.
+#
+# Convincing the world to accept $\lambda = k+1$ would be challenging since
+# the expected value is not the most likely value.  As a compromise, one can
+# use $0 \pm 1$ for zero counts, and $k \pm \sqrt{k}$ for other values.  A
+# minor problem is that this permits negative count rates for zero without
+# significant penalty.
+#
+# Note that from the simulation, the variance on $\lambda$ given $\lambda=k$
+# is also $k+1$.
+#
+# Another suggestion is to choose the center and bounds so that the
+# uncertainty covers $1-\sigma$ from the distribution (68%).  A simple
+# approximation which does this is $(n+1/2) \pm \sqrt{n+1/4}$.
+#
+# Again, hard to convince the world to do, so one could compromise and
+# choose $1/2 \pm 1/2$ for $k=0$, and the usual $k \pm \sqrt{k}$ otherwise.
+#
+# What follows is a model which allows us to fit a simulated peak using
+# these various definitions of $\lambda$ and see which version best recovers
+# the true parameters which generated the peak.
+
+from bumps.names import *
+
+# Define the peak shape.  We are using a simple gaussian with center, width,
+# scale and background.
+
+def peak(x, scale, center, width, background):
+    return scale*np.exp(-0.5*(x-center)**2/width**2) + background
+
+# Generate simulated peak data with poisson noise.  When running the fit,
+# you can choose various values for the peak intensity.  We are using a
+# large number of points so that the peak is highly constrained by the
+# data, and the returned parameters are consistent from run to run.  Real
+# data is likely not so heavily sampled.
+
+x = np.linspace(5,20,345)
+#y = np.random.poisson(peak(x, 1000, 12, 1.0, 1))
+#y = np.random.poisson(peak(x, 300, 12, 1.5, 1))
+y = np.random.poisson(peak(x, 3, 12, 1.5, 1))
+
+# Define the various conditions.  These can be selected on the command
+# line by listing the condition name after the model file.  Note that
+# bumps will make any option not preceded by "-" available to the model
+# file as elements of *sys.argv*.  *sys.argv[0]* is the model file itself.
+#
+# The options correspond to the five options listed above, with an additional
+# option "poisson" which is used to select PoissonCurve rather than Curve
+# in the fit.
+
+cond = sys.argv[1] if len(sys.argv) > 1 else "pearson"
+if cond=="poisson": # option 0: use PoissonCurve rather than Curve to fit
+    pass
+elif cond=="expected": # option 1: L = (y+1) +/- sqrt(y+1)
+    y += 1
+    dy = np.sqrt(y)
+elif cond=="pearson": # option 2: L = (y + 0.5)  +/- sqrt(y + 1/4)
+    dy = np.sqrt(y+0.25)
+    y = y + 0.5
+elif cond=="expected_mle": # option 3: L = y +/- sqrt(y+1)
+    dy = np.sqrt(y+1)
+elif cond=="pearson_zero": # option 4: L = y +/- sqrt(y); L[0] = 0.5 +/- 0.5
+    dy = np.sqrt(y)
+    y = np.asarray(y, 'd')
+    y[y==0] = 0.5
+    dy[y==0] = 0.5
+elif cond=="expected_zero": # option 5: L = y +/- sqrt(y);  L[0] = 0 +/- 1
+    dy = np.sqrt(y)
+    dy[y==0] = 1.0
+else:
+    raise RuntimeError("Need to select uncertainty: pearson, pearson_zero, expected, expected_zero, expected_mle, poisson")
+
+# Build the fitter, and set the range on the fit parameters.
+
+if cond == "poisson":
+    M = PoissonCurve(peak,x,y,scale=1,center=2,width=2,background=0)
+else:
+    M = Curve(peak,x,y,dy,scale=1,center=2,width=2,background=0)
+dx = max(x)-min(x)
+M.scale.range(0,max(y)*1.5)
+M.center.range(min(x)-0.2*dx,max(x)+0.2*dx)
+M.width.range(0,0.7*dx)
+M.background.range(0,max(y))
+
+# Set the fit problem as usual.
+
+problem = FitProblem(M)
+
+# We can now load and run the fit.  Be sure to substitute COND for one of the
+# conditions defined above:
+#
+# .. parsed-literal::
+#
+#    $ bumps.py poisson.py --fit=dream --burn=600 --store=/tmp/T1 COND
+#
+# Comparing the results for the various conditions, we can see that all methods
+# yield a good fit to the underlying center, scale and width.  It is only the
+# background that causes problems.  Using poisson statistics for the fit gives
+# the proper background estimate, and using the traditional method of
+# $\lambda = k \pm \sqrt{k}$ for $k>0$, and $0 \pm 1$ for $k=1$ gives the
+# best gaussian approximation.
+#
+# .. table:: Fit results
+#
+#     = ================= ==========
+#     # method            background
+#     = ================= ==========
+#     0 poisson           1.0
+#     1 expected          1.55
+#     2 pearson           0.16
+#     3 expected_mle      0.55
+#     4 pearson_zero      0.34
+#     5 expected_zero     0.75
+#     = ================= ==========
diff --git a/doc/examples/curvefit/readme.rst b/doc/examples/curvefit/readme.rst
new file mode 100644
index 0000000..1449a14
--- /dev/null
+++ b/doc/examples/curvefit/readme.rst
@@ -0,0 +1,22 @@
+.. _poisson-fit:
+
+****************
+Simple functions
+****************
+
+.. contents:: :local:
+
+Bumps allows fits with varying levels of complexity.  Simple fits accept
+a function $f(x;p)$ and data $x,y,\sigma_y$, where vector $y$ is the value
+measured in conditions $x$, and $\sigma_y$ is the $1-\sigma$ uncertainty in
+the measurement.  Bumps also provides a simple wrapper for poisson data
+taken from counting statistics, with function $f(x;p)$ and data $x,y$.
+sim.py is a simulation of data from a poisson process, showing maximum
+likelihood, expected value and variance.
+
+.. toctree::
+
+    curve.rst
+    poisson.rst
+    sim.rst
+
diff --git a/doc/examples/curvefit/sim.png b/doc/examples/curvefit/sim.png
new file mode 100644
index 0000000..46d44b2
Binary files /dev/null and b/doc/examples/curvefit/sim.png differ
diff --git a/doc/examples/curvefit/sim.py b/doc/examples/curvefit/sim.py
new file mode 100644
index 0000000..a2a1bb0
--- /dev/null
+++ b/doc/examples/curvefit/sim.py
@@ -0,0 +1,93 @@
+# Poisson simulation
+# ==================
+#
+# For the poisson background estimation problem, `poisson.py <poisson.html>`_,
+# we explore different options for estimating the rate parameter
+# $\lambda$ from an observed number of counts.  This program uses a Monte
+# Carlo method to generate the true probability distribution $P(\lambda)$ of
+# the observed number of counts $k$ coming from an underly rate $\lambda$.
+# We do this by running a Poisson generator to draw thousands of samples
+# of $k$ from each of a range of values $\lambda$.  By counting the number
+# of times $k$ occurs in each $\lambda$ bin, and normalizing by the bin size
+# and by the total number of times that $k$ occurs across all bins, the
+# resulting vector is a histogram of the $\lambda$ probability distribution.
+#
+# With this histogram we can compute the expected value as:
+#
+# .. math::
+#
+#    \hat\lambda = \int_0^\infty \lambda P(\lambda|k) d\lambda
+#
+# and the variance as:
+#
+# .. math::
+#
+#    d\hat\lambda^2 = \int_0^\infty (\lambda - \hat\lambda)^2 P(\lambda|k) d\lambda
+#
+
+from __future__ import division, print_function
+import numpy as np
+from pylab import *
+
+# Generate a bunch of samples from different underlying rate
+# parameters L in the range 0 to 20
+
+P = np.random.poisson
+L = linspace(0,20,1000)
+X = P(L, size=(10000,len(L)))
+
+# Generate the distributions
+
+P = dict((k, sum(X==k,axis=0)/sum(X==k)) for k in range(4))
+
+# Show the expected value of L for each observed value k
+
+print("Expected value of L for a given observed k")
+for k,Pi in sorted(P.items()):
+    print(k, sum(L*Pi))
+
+# Show the variance.  Note that we are using $\hat\lambda = k+1$ as observed
+# from the expected value table.  This is not strictly correct since we have
+# lost a degree of freedom by using $\hat\lambda$ estimated from the data,
+# but good enough for an approximate value of the variance.
+
+print("Variance of L for a given observed k")
+for k, Pi in sorted(P.items()):
+    print(k, sum((L-(k+1))**2*Pi))
+
+# Plot the distribution of $\lambda$ that give rise to each observed value $k$.
+
+for k,Pi in sorted(P.items()):
+    plot(L, Pi/(L[1]-L[0]), label="k=%d"%k, hold=True)
+xlabel(r'$\lambda$')
+ylabel(r'$P(\lambda|k)$')
+xticks([0,1,2,3,4,5,6,7,8,9,10])
+axis([0, 10, 0, 0.5])
+title('Probability of underlying rate $\lambda$ for different observed $k$')
+legend()
+grid(True)
+show()
+
+
+# Output:
+#
+# .. parsed-literal::
+#
+#    Expected value of L for a given observed k
+#    0 0.989473184121
+#    1 2.00279003084
+#    2 2.99802515025
+#    3 3.9990621889
+#    Variance of L for a given observed k
+#    0 0.998074244206
+#    1 2.00796671097
+#    2 2.99095589399
+#    3 3.99952301552
+#
+# .. figure:: sim.png
+#     :alt: Probability of underlying rate lambda for different observed k
+#
+#     The figure clearly shows that the maximum likelihood value for $\lambda$
+#     is equal to the observed counts $k$.  Because the histogram is skew
+#     right, the expected value is a little larger, with an estimated value
+#     of $k+1$, as seen from the output.
diff --git a/doc/examples/entropy/check_entropy.py b/doc/examples/entropy/check_entropy.py
new file mode 100644
index 0000000..66ab775
--- /dev/null
+++ b/doc/examples/entropy/check_entropy.py
@@ -0,0 +1,71 @@
+# Check the entropy calculator
+# ============================
+#
+# Many of the probability distributions in scipy.stats include a method
+# to compute the entropy of the distribution.  We can use these to test
+# the values from bumps against known good values.
+
+from math import log
+from scipy.stats import distributions
+from bumps.names import *
+
+# Create the distribution using the name and parameters from the command line.
+# Provide some handy help if the no distribution is given.
+
+USAGE = """
+Usage: bumps check_entropy.py dist p1 p2 ...
+
+where dist is one of the distributions in scipy.stats.distributions and
+p1, p2, ... are the arguments for the distribution in the order that they
+appear. For example, for the normal distribution, x ~ N(3, 0.8), use:
+
+    bumps --fit=dream --entropy  --store=/tmp/T1 check_entropy.py norm 3 0.2
+"""
+if len(sys.argv) > 1:
+    D_class = getattr(distributions, sys.argv[1])
+    args = [float(v) for v in sys.argv[2:]]
+    D = D_class(*args)
+else:
+    print(USAGE)
+    sys.exit()
+
+# Set the fitting problem using the direct PDF method.  In this case, bumps
+# is not being used to fit data, but instead to explore the probability
+# distribution directly through the negative log likelihood function.  The
+# only argument to this function is the parameter value x, which becomes the
+# fitting parameter.  This model file will not work for multivariate
+# distributions.
+
+def D_nllf(x):
+    return -D.logpdf(x)
+M = PDF(D_nllf, x=0.9)
+M.x.range(-inf, inf)
+
+problem = FitProblem(M)
+
+# Before fitting, print the expected entropy from the fit.
+
+print("*** Expected entropy: %.4f"%(D.entropy()/log(2)))
+
+# To exercise the entropy calculator, try fitting some non-normal
+# distributions:
+#
+# .. parsed-literal::
+#
+#       t 84            # close to normal
+#       t 4             # high kurtosis
+#       uniform -5 100  # high entropy
+#       cauchy 0 1      # undefined variance
+#       expon 0.1 0.2   # asymmetric, narrow
+#       beta 0.5 0.5    # 'antimodal' u-shaped pdf
+#       beta 2 5        # skewed
+#
+# Ideally, the entropy estimated by bumps will match the predicted entropy
+# when using *--fit=dream*.  This is not the case for *beta 0.5 0.5*.  For
+# the other distributions, the estimated entropy is within uncertainty of
+# actual value, but the uncertainty is a bit high.
+#
+# The other fitters, which use the curvature at the peak to estimate
+# the entropy, do not work reliably when the fit is not normal.  Try
+# the same distributions with *--fit=amoeba* to see this.
+#
diff --git a/doc/examples/entropy/peak.py b/doc/examples/entropy/peak.py
new file mode 100644
index 0000000..4729654
--- /dev/null
+++ b/doc/examples/entropy/peak.py
@@ -0,0 +1,50 @@
+# Bayesian Experimental Design
+# ============================
+#
+# Perform a tradeoff comparison between point density and counting time when
+# measuring a peak in a poisson process.
+#
+# Usage:
+#
+# .. parsed-literal::
+#
+#    bumps peak.py N --entropy --store=/tmp/T1 --fit=dream
+#
+# The parameter N is the number of data points to use within the range.
+#
+
+from bumps.names import *
+from numpy import exp, sqrt, pi, inf
+
+# Define the peak shape as a gaussian plus background
+def peak(x, scale, center, width, background):
+    return scale*exp(-0.5*(x-center)**2/width**2)/sqrt(2*pi*width**2) + background
+
+# Get the number of points from the command line
+if len(sys.argv) == 2:
+    npoints = int(sys.argv[1])
+else:
+    raise ValueError("Expected number of points n in the fit")
+
+# set a constant number of counts, equally divided between points
+x = np.linspace(5, 20, npoints)
+scale = 10000/npoints
+
+# Build the model, along with the valid fitting range. there is no data yet,
+# so y is None
+M = PoissonCurve(peak, x, y=None, scale=scale, center=15, width=1.5, background=1)
+M.scale.range(0, inf)
+dx = max(x)-min(x)
+M.center.range(min(x) - 0.2*dx, max(x) + 0.2*dx)
+M.width.range(0, 0.7*dx)
+M.background.range(0, inf)
+
+# Make a fake dataset from the give x spacing
+M.simulate_data()
+
+problem = FitProblem(M)
+
+
+# Running this problem for a few values of the number of points is showing
+# that adding points and reducing counting time per point is better able
+# to recover the peak parameters.
diff --git a/doc/examples/index.rst b/doc/examples/index.rst
new file mode 100644
index 0000000..877a71d
--- /dev/null
+++ b/doc/examples/index.rst
@@ -0,0 +1,17 @@
+.. _tutorial-index:
+
+########
+Tutorial
+########
+
+This tutorial will describe walk through the steps of setting up a
+model with Python scripting.  Scripting allows the user to create
+complex models with many constraints relatively easily.
+
+.. toctree::
+   :maxdepth: 2
+
+   curvefit/readme.rst
+   peaks/readme.rst
+   test_functions/readme.rst
+   entropy/check_entropy.rst
diff --git a/doc/examples/peaks/XY_mesh2.txt b/doc/examples/peaks/XY_mesh2.txt
new file mode 100644
index 0000000..7ac2944
--- /dev/null
+++ b/doc/examples/peaks/XY_mesh2.txt
@@ -0,0 +1,23 @@
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 -0.518 24.750 22.000 29.500 22.250 26.750 20.000 24.250 20.750 25.250 30.000 30.750 21.500 30.250  4.131  3.841  4.912  3.733  4.465  3.708  3.865  3.363  4.161  4.743  4.684  4.272  4.657 
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 -0.516 34.000 23.250 26.500 22.250 29.750 22.750 29.750 24.750 23.250 32.500 25.500 29.000 28.250  5.136  4.265  4.345  4.206  4.828  4.008  4.630  4.131  4.039  4.873  4.062  4.690  4.750
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 -0.514 30.500 26.750 17.250 24.000 35.000 23.750 29.500 27.750 25.500 31.500 26.750 26.250 26.250  4.770  4.465  3.649  4.198  5.184  3.832  4.717  4.724  4.500  5.196  4.465  4.437  4.437
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 -0.512 31.750 20.500 23.500 33.500 38.500 26.000 31.750 23.250 35.000 33.750 28.500 24.250 30.000  4.737  3.984  4.387  5.292  5.523  4.730  5.117  4.265  5.362  5.031  4.458  3.865  5.123
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 -0.510 26.750 23.000 19.500 21.250 23.250 24.000 28.000 21.000 23.750 31.750 21.000 25.000 30.250  4.670  3.905  3.674  3.913  4.265  4.198  4.637  4.016  4.070  4.931  3.775  4.031  5.226
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 -0.508 26.380 25.630 24.380 24.000 30.380 31.130 27.750 37.000 29.880 31.380 30.750 25.630 28.750  3.180  3.150  2.945  2.806  3.403  3.291  3.122  3.733  3.170  3.300  3.312  2.918  3.162
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 -0.506 27.500 21.000 28.250 27.000 17.500 28.250 37.500 39.250 38.500 27.250 29.500 34.000 30.000  4.610  3.775  4.548  4.783  3.260  4.750  5.477  5.804  5.523  4.493  4.514  5.136  4.743
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 -0.504 22.250 34.250 25.750 37.750 37.000 36.000 30.250 29.000 41.250 32.250 32.250 24.000 29.250  4.206  5.056  4.409  5.573  5.454  5.408  4.854  4.486  5.890  4.763  5.321  4.416  4.395
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 -0.502 26.000 27.750 36.000 27.750 28.000 34.250 49.500 39.000 32.750 29.250 30.250 32.500 24.750  4.316  4.308  5.408  4.724  4.637  5.056  6.452  5.196  4.981  4.802  4.451  5.062  4.352
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 -0.500 35.290 28.760 31.760 22.760 36.530 37.590 45.000 55.240 51.000 27.000 31.760 22.240 21.710  4.139  3.534  4.101  3.362  4.355  4.180  4.686  5.413  5.184  3.445  3.985  3.264  3.162
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 -0.498 28.250 22.500 36.750 33.000 42.250 54.750 71.500 76.500 47.250 42.000 20.500 29.500 29.250  4.750  4.108  5.528  4.704  5.772  6.581  7.382  7.794  5.826  5.339  3.742  4.717  4.802
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 -0.496 29.290 27.530 30.530 28.760 51.710 53.470 78.180 71.820 52.590 32.470 36.880 34.410 31.240  3.993  3.668  3.870  3.664  5.118  5.178  6.267  5.930  5.102  4.016  4.369  4.043  3.781
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 -0.492 24.530 24.880 28.410 59.000 82.000 95.750 80.250 62.750 62.750 52.250 37.750 33.500 25.750  3.454  3.472  3.647  6.810  8.085  8.664  7.487  6.878  6.878  6.486  5.403  4.924  4.191
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 -0.490 25.750 39.250 41.500 54.250 81.750 98.500 70.750 51.250 54.750 71.000 42.000 22.500 36.000  4.191  5.804  5.657  6.418  8.363  8.246  7.420  6.149  6.290  7.237  5.679  4.108  5.408
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 -0.488 28.500 37.750 45.000 59.250 70.500 65.250 56.750 75.250 92.250 64.500 42.500 34.750 36.250  4.861  5.403  5.646  6.887  7.220  6.694  6.369  7.446  8.671  6.874  5.184  4.893  4.969
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 -0.486 32.000 33.750 37.000 59.000 56.250 63.250 68.750 87.000 93.250 67.000 37.500 29.250 36.500  4.650  4.841  5.099  6.671  6.047  7.163  6.960  8.239  8.482  7.228  5.303  4.802  5.256
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 -0.484 24.500 41.750 37.750 51.750 48.250 55.500 68.000 103.500 75.500 42.000 34.500 37.250 25.750 4.000  5.750  5.739  6.016  5.540  6.393  7.133  8.617  7.263  5.339  5.339  5.202  4.191
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 -0.482 35.000 27.500 43.750 48.000 46.250 47.750 70.750 72.500 66.750 49.500 31.000 28.500 22.750  5.362  4.402  5.836  6.093  5.783  6.005  7.420  7.159  6.887  6.000  4.596  4.458  4.008
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 -0.480 29.250 28.500 31.000 42.000 50.250 60.000 71.000 61.750 50.250 31.000 27.250 27.500 23.500  4.994  4.458  4.796  5.842  6.260  6.708  7.616  6.562  5.953  4.596  4.493  4.183  4.168
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 -0.478 20.250 40.750 43.250 34.000 53.500 47.000 68.250 53.000 41.000 25.500 24.500 32.250 28.750  3.326  5.190  5.815  4.950  6.605  5.895  7.336  6.295  5.635  4.287  4.228  5.142  4.576
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 -0.476 28.500 29.250 25.500 27.500 47.500 43.000 38.500 35.250 30.250 27.250 26.250 32.250 34.000  4.664  4.603  4.500  4.402  6.374  5.208  5.350  4.918  4.854  4.493  4.437  5.142  5.489
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 -0.474 29.000 39.500 29.000 27.000 28.500 34.750 42.750 30.500 31.250 25.750 30.750 24.750 20.250  5.074  5.895  4.690  4.583  4.458  5.081  5.629  4.770  5.093  4.409  4.880  4.131  3.597
+0.484 0.486 0.488 0.49 0.492 0.494 0.496 0.498 0.5 0.502 0.504 0.506 0.508 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 -0.472 19.500 35.000 33.250 32.000 26.750 27.000 29.500 26.000 27.500 31.250 29.750 23.000 32.500  3.674  5.362  5.190  4.848  4.465  4.783  4.717  4.093  4.610  4.710  4.630  4.138  5.244
diff --git a/doc/examples/peaks/model.py b/doc/examples/peaks/model.py
new file mode 100644
index 0000000..52b0b66
--- /dev/null
+++ b/doc/examples/peaks/model.py
@@ -0,0 +1,113 @@
+from __future__ import print_function
+
+# Look for the peak fitter in the same file as the modeller
+import os, sys
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+import numpy as np
+from peaks import Peaks, Gaussian, Background
+from bumps.names import Parameter, pmath, FitProblem
+
+
+
+def read_data():
+#    data= Z1.T
+    X = np.linspace(0.4840, 0.5080,13)
+    Y = np.linspace(-0.5180,-0.4720,23)
+    X,Y = np.meshgrid(X, Y)
+    A = np.genfromtxt('XY_mesh2.txt',unpack=True)
+    Z1 = A[26:39]
+    data= Z1.T
+    err=np.sqrt(data)
+    #err= A[39:54]
+    return X, Y, data, err
+
+def build_problem():
+
+    M = Peaks([Gaussian(name="G1-"),
+               Gaussian(name="G2-"),
+               #Gaussian(name="G3-"),
+               #Gaussian(name="G4-"),
+               Background()],
+               *read_data())
+    background = np.min(M.data)
+    background += np.sqrt(background)
+    signal = np.sum(M.data) - M.data.size*background
+    M.parts[-1].C.value = background
+    peak1 = M.parts[0]
+
+    peak1.xc.range(0.45,0.55)
+    peak1.yc.range(-0.55,-0.4)
+    peak1.xc.value = 0.500
+    peak1.yc.value = -0.485
+
+    if 0:
+        # Peak centers are independent
+        for peak in M.parts[1:-1]:
+            peak.xc.range(0.45,0.55)
+            peak.yc.range(-0.55,-0.4)
+        M.parts[1].xc.value = 0.495
+        M.parts[1].yc.value = -0.495
+    else:
+        # Peak centers lie on a line
+        theta=Parameter(45, name="theta")
+        theta.range(0,90)
+        for i,peak in enumerate(M.parts[1:-1]):
+            delta=Parameter(.0045, name="delta-%d"%(i+1))
+            delta.range(0.0,0.015)
+            peak.xc = peak1.xc + delta*pmath.cosd(theta)
+            peak.yc = peak1.yc + delta*pmath.sind(theta)
+
+        # Initial values
+        cx, cy = 0.4996-0.4957, -0.4849+0.4917
+        theta.value = np.degrees(np.arctan2(cy,cx))
+        delta.value = np.sqrt(cx**2+cy**2)
+
+    # Initial values
+    for peak in M.parts[:-1]:
+        peak.A.value = signal/(len(M.parts)-1)  # Equal size peaks
+    dx, dy = 0.4997-0.4903, -0.4969+0.4851
+    dxm, dym = 0.4951-0.4960, -0.4941+0.4879
+    peak1.s1.value = np.sqrt(dx**2+dy**2)/2.35/2
+    peak1.s2.value = np.sqrt(dxm**2+dym**2)/2.35/2
+    peak1.theta.value = np.degrees(np.arctan2(dy,dx))
+
+
+    # Peak intensity varies
+    for peak in M.parts[:-1]:
+        peak.A.range(0.1*signal,1.1*signal)
+
+    peak1.s1.range(0.002,0.02)
+    peak1.s2.range(0.001,0.02)
+    peak1.theta.range(-90, -0)
+
+    if 1:
+        # Peak shape is the same across all peaks
+        for peak in M.parts[1:-1]:
+            peak.s1 = peak1.s1
+            peak.s2 = peak1.s2
+            peak.theta = peak1.theta
+    else:
+        for peak in M.parts[1:-1]:
+            peak.s1.range(*peak1.s1.bounds.limits)
+            peak.s2.range(*peak1.s2.bounds.limits)
+            peak.theta.range(*peak1.theta.bounds.limits)
+
+    if 1:
+        M.parts[-1].C.pmp(100.0)
+
+    if 1:
+        for peak in M.parts[:-1]:
+            peak.s1.value = 0.006
+            peak.s2.value = 0.002
+            peak.theta.value = -60.0
+            peak.A.value = signal/2
+
+    if 0:
+        print("shape",peak1.s1.value,peak1.s2.value,peak1.theta.value)
+        print("centers theta,delta",theta.value,delta.value)
+        print("centers",(peak1.xc.value,peak1.yc.value),
+              (M.parts[1].xc.value,M.parts[1].yc.value))
+    return FitProblem(M)
+
+problem = build_problem()
diff --git a/doc/examples/peaks/peaks.py b/doc/examples/peaks/peaks.py
new file mode 100644
index 0000000..0a0d618
--- /dev/null
+++ b/doc/examples/peaks/peaks.py
@@ -0,0 +1,111 @@
+from __future__ import division, print_function
+
+from math import radians, sin, cos
+
+import numpy as np
+
+from bumps.parameter import Parameter, varying
+
+def plot(X,Y,theory,data,err):
+    import pylab
+
+    #print "theory",theory[1:6,1:6]
+    #print "data",data[1:6,1:6]
+    #print "delta",(data-theory)[1:6,1:6]
+    pylab.subplot(131)
+    pylab.pcolormesh(X,Y, data)
+    pylab.subplot(132)
+    pylab.pcolormesh(X,Y, theory)
+    pylab.subplot(133)
+    pylab.pcolormesh(X,Y, (data-theory)/(err+1))
+
+class Gaussian(object):
+    def __init__(self, A=1, xc=0, yc=0, s1=1, s2=1, theta=0, name=""):
+        self.A = Parameter(A,name=name+"A")
+        self.xc = Parameter(xc,name=name+"xc")
+        self.yc = Parameter(yc,name=name+"yc")
+        self.s1 = Parameter(s1,name=name+"s1")
+        self.s2 = Parameter(s2,name=name+"s2")
+        self.theta = Parameter(theta,name=name+"theta")
+
+    def parameters(self):
+        return dict(A=self.A,
+                    xc=self.xc, yc=self.yc,
+                    s1=self.s1, s2=self.s2,
+                    theta=self.theta)
+
+    def __call__(self, x, y):
+        height = self.A.value
+        s1 = self.s1.value
+        s2 = self.s2.value
+        t  = -radians(self.theta.value)
+        xc = self.xc.value
+        yc = self.yc.value
+        if s1==0 or s2==0: return np.zeros_like(x)
+        a =  cos(t)**2/s1**2 + sin(t)**2/s2**2
+        b = sin(2*t)*(-1/s1**2 + 1/s2**2)
+        c =  sin(t)**2/s1**2 + cos(t)**2/s2**2
+        xbar,ybar = x-xc,y-yc
+        Zf = np.exp( -0.5*(a*xbar**2 + b*xbar*ybar + c*ybar**2) )
+        #normalization=1.0/(2*np.pi*s1*s2)
+        #print "norm",np.sum(Zf)*normalization
+        total = np.sum(Zf)
+        if False and (np.isnan(total) or total==0):
+            print("G(A,s1,s2,t,xc,yc) ->",total,(height,s1,s2,t,xc,yc))
+            print("a,b,c",a,b,c)
+        return Zf/total*abs(height) if total>0 else np.zeros_like(x)
+
+class Background(object):
+    def __init__(self, C=0, name=""):
+        self.C = Parameter(C,name=name+"background")
+    def parameters(self):
+        return dict(C=self.C)
+    def __call__(self, x, y):
+        return self.C.value
+
+class Peaks(object):
+    def __init__(self, parts, X, Y, data, err):
+        self.X,self.Y,self.data,self.err = X, Y, data, err
+        self.parts = parts
+
+    def numpoints(self):
+        return np.prod(self.data.shape)
+
+    def parameters(self):
+        return [p.parameters() for p in self.parts]
+
+    def theory(self):
+        #return self.parts[0](self.X,self.Y)
+        #parts = [M(self.X,self.Y) for M in self.parts]
+        #for i,p in enumerate(parts):
+        #    if np.any(np.isnan(p)): print "NaN in part",i
+        return sum(M(self.X,self.Y) for M in self.parts)
+
+    def residuals(self):
+        #if np.any(self.err ==0): print "zeros in err"
+        return (self.theory()-self.data)/(self.err+1)
+
+    def nllf(self):
+        R = self.residuals()
+        #if np.any(np.isnan(R)): print "NaN in residuals"
+        return 0.5*np.sum(R**2)
+
+    def __call__(self):
+        return 2*self.nllf()/self.dof
+
+    def plot(self, view='linear'):
+        plot(self.X, self.Y, self.theory(), self.data, self.err)
+
+    def save(self, basename):
+        import json
+        pars = [(p.name,p.value) for p in varying(self.parameters())]
+        out = json.dumps(dict(theory=self.theory().tolist(),
+                              data=self.data.tolist(),
+                              err=self.err.tolist(),
+                              X = self.X.tolist(),
+                              Y = self.Y.tolist(),
+                              pars = pars))
+        open(basename+".json","w").write(out)
+
+    def update(self):
+        pass
diff --git a/doc/examples/peaks/plot.py b/doc/examples/peaks/plot.py
new file mode 100755
index 0000000..f754861
--- /dev/null
+++ b/doc/examples/peaks/plot.py
@@ -0,0 +1,33 @@
+import sys
+import json
+
+import numpy as np
+import pylab
+
+def plot(X,Y,theory,data,err):
+    #print "theory",theory[1:6,1:6]
+    #print "data",data[1:6,1:6]
+    #print "delta",(data-theory)[1:6,1:6]
+    pylab.subplot(3,1,1)
+    pylab.pcolormesh(X,Y, data)
+    pylab.subplot(3,1,2)
+    pylab.pcolormesh(X,Y, theory)
+    pylab.subplot(3,1,3)
+    pylab.pcolormesh(X,Y, (data-theory)/(err+1))
+
+def load_results(filename):
+    """
+    Reload results from the json file created by Peaks.save
+    """
+    data = json.load(open(filename))
+    # Convert array info back into numpy arrays
+    data.update( (k,np.array(data[k]))
+                 for k in ('X', 'Y', 'data', 'err', 'theory') )
+    return data
+
+def main():
+    data = load_results(sys.argv[1])
+    plot(data['X'],data['Y'],data['theory'],data['data'],data['err'])
+    pylab.show()
+
+if __name__ == "__main__": main()
diff --git a/doc/examples/peaks/readme.rst b/doc/examples/peaks/readme.rst
new file mode 100644
index 0000000..653865d
--- /dev/null
+++ b/doc/examples/peaks/readme.rst
@@ -0,0 +1,8 @@
+.. _peaks-example:
+
+Peak Fitting
+************
+
+This example shows how to develop multipart models using bumps parameters.
+The data format is 2D, so the usual 1D x-y plots are not sufficient, and
+a special plot method is needed to display the data.
diff --git a/doc/examples/pymc/disaster_model.py b/doc/examples/pymc/disaster_model.py
new file mode 100644
index 0000000..713d259
--- /dev/null
+++ b/doc/examples/pymc/disaster_model.py
@@ -0,0 +1,3 @@
+from bumps.pymcfit import PyMCProblem
+from pymc.examples import disaster_model as model
+problem = PyMCProblem(model)
diff --git a/doc/examples/pymc/model.py b/doc/examples/pymc/model.py
new file mode 100644
index 0000000..1543e0f
--- /dev/null
+++ b/doc/examples/pymc/model.py
@@ -0,0 +1,11 @@
+import sys
+from importlib import import_module
+from bumps.pymcfit import PyMCProblem
+
+if len(sys.argv) != 2:
+    raise ValueError("Expected name of pymc file containing a model")
+
+module =sys.argv[1]
+__name__ = module.split('.')[-1]
+model = import_module(module)
+problem = PyMCProblem(model)
diff --git a/doc/examples/test_functions/anticor.py b/doc/examples/test_functions/anticor.py
new file mode 100755
index 0000000..0a99450
--- /dev/null
+++ b/doc/examples/test_functions/anticor.py
@@ -0,0 +1,40 @@
+# Anticorrelation demo
+# ====================
+#
+# Model with strong correlations between the fitted parameters.
+#
+# We use a*x = y + N(0,1) made complicated by defining a=p1+p2.
+#
+# The expected distribution for p1 and p2 will be uniform, with p2 = a-p1 in
+# each sample.  Because this distribution is inherently unbounded, artificial
+# bounds are required on a least one of the parameters for finite duration
+# simulations.
+#
+# The expected distribution for p1+p2 can be determined from the linear model
+# y = a*x.  This is reported along with the values estimated from MCMC.
+
+from bumps.names import *
+
+# Anticorrelated function
+
+def fn(x, a, b): return (a+b)*x
+
+# Fake data
+
+sigma = 1
+x = np.linspace(-1., 1, 40)
+dy = sigma*np.ones_like(x)
+y = fn(x,5,5) + np.random.randn(*x.shape)*dy
+
+# Wrap it in a curve fitter
+
+M = Curve(fn, x, y, dy, a=(-20,20), b=(-20,20))
+
+# Alternative representation, fitting a and S=a+b, and setting b=S-a.
+#
+# ::
+#
+#     S = Parameter((-20,20), name="sum")
+#     M.b = S-M.a
+
+problem = FitProblem(M)
diff --git a/doc/examples/test_functions/bounded.py b/doc/examples/test_functions/bounded.py
new file mode 100644
index 0000000..0e2663b
--- /dev/null
+++ b/doc/examples/test_functions/bounded.py
@@ -0,0 +1,37 @@
+# Boundary check
+# ==============
+#
+# Check probability at boundaries.
+#
+# In this case we define the probability density function (PDF) directly
+# in an n-dimensional uniform box.
+#
+# Ideally, the correlation plots and variable distributions will be uniform.
+
+from bumps.names import *
+
+# Adjust scale from 1e-150 to 1e+150 and you will see that DREAM is equally
+# adept at filling the box.
+
+scale = 1
+
+# Uniform cost function.
+
+def box(x):
+    return 0 if np.all(np.abs(x)<=scale) else np.inf
+
+def diamond(x):
+    return 0 if np.sum(np.abs(x))<=scale else np.inf
+
+# Wrap it in a PDF object which turns an arbitrary probability density into
+# a fitting function.  Give it a valid initial value, and set the bounds to
+# a unit cube with one corner at the origin.
+
+M = PDF(lambda a,b: box([a,b]))
+#M = PDF(lambda a,b: diamond([a,b]))
+M.a.range(-2*scale,2*scale)
+M.b.range(-2*scale,2*scale)
+
+# Make the PDF a fit problem that bumps can process.
+
+problem = FitProblem(M)
\ No newline at end of file
diff --git a/doc/examples/test_functions/cross.py b/doc/examples/test_functions/cross.py
new file mode 100755
index 0000000..8278859
--- /dev/null
+++ b/doc/examples/test_functions/cross.py
@@ -0,0 +1,46 @@
+# Cross-shaped anti-correlation
+# =============================
+#
+# Example model with strong correlations between the fitted parameters.
+#
+# In this case we define the probability density function (PDF) directly
+# as an 'X' pattern, with width sigma.
+#
+# Ideally, the a-b correlation plot will show the 'X' completely filled
+# within the bounds.
+
+from bumps.names import *
+
+# Adjust scale from 1e-150 to 1e+150 and you will see that DREAM is equally
+# adept at filling the cross. However, if sigma gets too small relative to
+# scale the fit will get stuck on one of the arms, and if sigma gets too
+# large, then the whole space will be filled and the x will not form.
+
+scale = 10
+sigma = 0.1*scale
+#sigma = 0.001*scale  # Too small
+#sigma = 10*scale   # Too large
+
+# Simple gaussian cost function based on the distance to the closest ridge
+# *x=y* or *x=-y*.
+
+def fn(a, b):
+    return 0.5*min(abs(a+b),abs(a-b))**2/sigma**2 + 1
+
+# Wrap it in a PDF object which turns an arbitrary probability density into
+# a fitting function.  Give it an initial value away from the cross.
+
+M = PDF(fn, a=3*scale, b=1.2*scale)
+
+# Set the range of values to include the cross.  You can skip the center of
+# the cross by setting b.range to (1,3), and for reasonable values of sigma
+# both arms will still be covered.  Extend the range too far (e.g.,
+# a.range(-3000,3000), b.range(-1000,3000)), and like a value of sigma
+# that is too small, only one arm of the cross will be filled.
+
+M.a.range(-3*scale,3*scale)
+M.b.range(-1*scale,3*scale)
+
+# Make the PDF a fit problem that bumps can process.
+
+problem = FitProblem(M)
diff --git a/doc/examples/test_functions/mixture.py b/doc/examples/test_functions/mixture.py
new file mode 100755
index 0000000..983f02a
--- /dev/null
+++ b/doc/examples/test_functions/mixture.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+
+"""
+Multimodal demonstration using gaussian mixture model.
+
+The model is a mixture model representing the probability density from a
+product of gaussians.
+
+This example show performance of the algorithm on multimodal densities,
+with adjustable number of densities and degree of separation.
+
+The peaks are distributed about the x-y plane so that the marginal densities
+in x and y are spaced every 2 units using latin hypercube sampling.  For small
+peak widths, this means that the densities will not overlap, and the marginal
+maximum likelihood for a given x or y value should match the estimated density.
+With overlap, the marginal density will over estimate the marginal maximum
+likelihood.
+
+Adjust the width of the peaks, *S*, to see the effect of relative diameter of
+the modes on sampling.  Adjust the height of the peaks, *I*, to see the
+effects of the relative height of the modes.  Adjust the count *n* to see
+the effects of the number of modes.
+
+Note that dream.diffev.de_step adds jitter to the parameters at the 1e-6 level,
+so *S* < 1e-4 cannot be modeled reliably.
+
+*draws* is set to 1000 samples per mode.  *burn* is set to 100 samples per mode.
+Population size *h* is set to 20 per mode.  A good choice for number of
+sequences *k* is not yet determined.
+"""
+import numpy as np
+from bumps.dream.model import MVNormal, Mixture
+from bumps.names import *
+
+if 1: # Fixed layout of 5 minima
+    n = 5
+    S = [0.1]*5
+    x = [-4, -2, 0, 2, 4]
+    y = [2, -2, -4, 0, 4]
+    I = [5, 2.5, 1, 4, 1]
+else: # Semirandom layout of n minima
+    n = 40
+    S = [0.1]*n
+    x = np.linspace(-n+1,n-1,n)
+    y = np.random.permutation(x)
+    I = 2*np.linspace(-1,1,n)**2 + 1
+
+dims = 2
+centers = [x, y] + [np.random.permutation(x) for _ in range(2, dims)]
+centers = np.asarray(centers).T
+args = [] # Sequence of density, weight, density, weight, ...
+for mu_i,Si,Ii in zip(centers,S,I):
+    args.extend( (MVNormal(mu_i,Si*np.eye(dims)), Ii) )
+model = Mixture(*args)
+
+def plot2d(fn, args=None, range=(-10,10)):
+    """
+    Return a mesh plotter for the given function.
+
+    *args* are the function arguments that are to be meshed (usually the
+    first two arguments to the function).  *range* is the bounding box
+    for the 2D mesh.
+
+    All arguments except the meshed arguments are held fixed.
+    """
+    if args is None:
+        args = [0, 1]
+    def plotter(p, view=None):
+        import pylab
+        if len(p) == 1:
+            x = p[0]
+            r = np.linspace(range[0], range[1], 400)
+            pylab.plot(x+r, [fn(v) for v in x+r])
+            pylab.xlabel(args[0])
+            pylab.ylabel("-log P(%s)"%args[0])
+        else:
+            r = np.linspace(range[0], range[1], 20)
+            x, y = p[args[0]], p[args[1]]
+            data = np.empty((len(r),len(r)),'d')
+            for j, xj in enumerate(x+r):
+                for k, yk in enumerate(y+r):
+                    p[args[0]], p[args[1]] = xj, yk
+                    data[j, k] = fn(p)
+            pylab.pcolormesh(x+r, y+r, data)
+            pylab.plot(x, y, 'o', hold=True, markersize=6,
+                       markerfacecolor='red', markeredgecolor='black',
+                       markeredgewidth=1, alpha=0.7)
+            pylab.xlabel(args[0])
+            pylab.ylabel(args[1])
+    return plotter
+
+
+M = VectorPDF(model.nllf, p=[0.]*dims, plot=plot2d(model.nllf))
+for _, p in M.parameters().items():
+    p.range(-10, 10)
+problem = FitProblem(M)
diff --git a/doc/examples/test_functions/model.py b/doc/examples/test_functions/model.py
new file mode 100644
index 0000000..17a1efc
--- /dev/null
+++ b/doc/examples/test_functions/model.py
@@ -0,0 +1,255 @@
+"""
+
+Surjanovic, S. & Bingham, D. (2013).
+Virtual Library of Simulation Experiments: Test Functions and Datasets.
+Retrieved April 18, 2016, from http://www.sfu.ca/~ssurjano.
+"""
+from __future__ import print_function
+
+from functools import reduce, wraps
+import inspect
+
+from numpy import sin, cos, linspace, meshgrid, e, pi, sqrt, exp
+from bumps.names import *
+
+class ModelFunction(object):
+    _registered = {}  # class mutable containing list of registered functions
+    def __init__(self, f, xmin, fmin, bounds, dim):
+        self.name = f.__name__
+        self.xmin = xmin
+        self.fmin = fmin
+        self.bounds = bounds
+        self.dim = dim
+        self.f = f
+
+        # register the function in the list of available functions
+        ModelFunction._registered[self.name] = self
+
+    def __call__(self, x):
+        return self.f(x)
+
+    def fk(self, k, **kw):
+        """
+        Return a function with *k* arguments f(x, y, z1, z2, ..., zk-2)
+        """
+        wrapper = wraps(self.f)
+        if k == 1:
+            calculator = wrapper(lambda x: self.f((x,), **kw))
+        elif k == 2:
+            calculator = wrapper(lambda x, y: self.f((x, y), **kw))
+        else:
+            args = ",".join("x%d"%j for j in range(1,k+1))
+            context = {'self': self, 'kw': kw}
+            calculator = wrapper(eval("lambda %s: self.f((%s), **kw)"
+                                      %(args, args), context))
+        return calculator
+
+    def fv(self, k, **kw):
+        """
+        Return a function with arguments f(v) for vector v.
+        """
+        wrapper = wraps(self.f)
+        calculator = wrapper(lambda x: self.f(x, **kw))
+        return calculator
+
+    @staticmethod
+    def lookup(name):
+        return ModelFunction._registered.get(name, None)
+
+    @staticmethod
+    def available():
+        return list(sorted(ModelFunction._registered.keys()))
+
+def select_function(argv, vector=True):
+    if len(sys.argv) == 0:
+        raise ValueError("no function provided")
+
+    model = ModelFunction.lookup(argv[0])
+    if model is None:
+        raise ValueError("unknown model %s"%model)
+
+    dim = int(argv[1]) if len(argv) > 1 else 2
+
+    return model.fv(dim) if vector else model.fk(dim)
+
+
+def fit_function(xmin=None, fmin=None, bounds=(-inf, inf), dim=None):
+    return lambda f: ModelFunction(f, xmin, fmin, bounds, dim)
+
+
+# ================ Model functions ====================
+
+def prod(L):
+    return reduce(lambda x,y: x*y, L, 1)
+
+ at fit_function(fmin=0.0, xmin=0.0)
+def gauss(x):
+    """
+    Multivariate gaussian distribution
+    """
+    return sum(0.5*(xi - 3.)**2 for xi in x)
+
+ at fit_function(fmin=0., xmin=3.)
+def laplace(x):
+    """
+    Product of Laplace distributions, mu=3, b=0.1.
+    """
+    return sum(abs(xi-3.)/0.1 for xi in x)
+
+ at fit_function(fmin=0., xmin=3.)
+def sin_plus_quadratic(x, c=3., d=2., m=5., h=2.):
+    """
+    Sin + quadratic.  Multimodal with global minimum.
+
+    *c* is the center point where the function is minimized.
+
+    *d* is the distance between modes, one per dimension.
+
+    *h* is the sine wave amplitude, on per dimension, which controls
+    the height of the barrier between modes.
+
+    *m* is the curvature of the quadratic, one per dimension.
+    """
+    n = len(x)
+    if np.isscalar(c): c = [c]*n
+    if np.isscalar(d): d = [d]*n
+    if np.isscalar(h): h = [h]*n
+    if np.isscalar(m): m = [m]*n
+    return (sum(hi*(sin((2*pi/di)*xi-ci)+1.) for xi,ci,di,hi in zip(x, c, d, h))
+            + sum(((xi-ci)/float(mi))**2 for xi,ci,mi in zip(x, c, m)))
+
+ at fit_function(fmin=0.0, xmin=0.0)
+def stepped_well(x):
+    return sum(np.floor(abs(xi)) for xi in x)
+
+ at fit_function(xmin=0.0, fmin=0.0, bounds=(-32.768, 32.768))
+def ackley(x, a=20., b=0.2, c=2*pi):
+    """
+    Multimodal with deep global minimum.
+    """
+    n = len(x)
+    return (-a*exp(-b*sqrt(sum(xi**2 for xi in x)/n))
+            - exp(sum(cos(c*xi) for xi in x)/n) + a + e)
+
+ at fit_function(dim=2, xmin=(3, 0.5), fmin=0.0, bounds=(-4.5, 4.5))
+def beale(xy):
+    x, y = xy
+    return (1.5 - x*y)**2 + (2.25 - x + x*y**2)**2 + (2.625 - x + x*y**3)**2
+
+_TRAY=1.34941
+ at fit_function(dim=2, fmin=-2.06261, bounds=(-10, 10),
+              xmin=((_TRAY, _TRAY), (_TRAY, -_TRAY),
+                      (-_TRAY, _TRAY), (-_TRAY, -_TRAY)))
+def cross_in_tray(xy):
+    x, y = xy
+    return -0.0001*(abs(sin(x)*sin(y)*exp(abs(100-sqrt(x**2+y**2)/pi)))+1)**0.1
+
+ at fit_function(bounds=(-600, 600), xmin=0.0, fmin=0.0)
+def griewank(x):
+    return (1 + sum(xi**2 for xi in x)**2/4000
+            - prod(cos(xi/sqrt(i+1)) for i,xi in enumerate(x)))
+
+
+ at fit_function(bounds=(-5.12, 5.12), xmin=0.0, fmin=0.0)
+def rastrigin(x, A=10.):
+    """
+    Multimodal with global minimum near local minima.
+    """
+    n = len(x)
+    return A*n + sum(xi**2 - A*cos(2*pi*xi) for xi in x)
+
+
+# could also use bounds=(-2.048, 2.048)
+ at fit_function(bounds=(-5, 10), xmin=1., fmin=0.)
+def rosenbrock(x):
+    """
+    Unimodal with narrow parabolic valley.
+    """
+    return sum(100*(xn-xp**2)**2 + (xp-1)**2 for xp, xn in zip(x[:-1], x[1:]))
+
+
+# ========================== wrapper ==================
+
+def plot2d(fn, args=None, range=(-10,10)):
+    """
+    Return a mesh plotter for the given function.
+
+    *args* are the function arguments that are to be meshed (usually the
+    first two arguments to the function).  *range* is the bounding box
+    for the 2D mesh.
+
+    All arguments except the meshed arguments are held fixed.
+    """
+    fnargs, _, _, _ = inspect.getargspec(fn)
+    if len(fnargs) < 2:
+        args = fnargs[:1]
+        def plot1d(view=None, **kw):
+            import pylab
+            x = kw[args[0]]
+            r = linspace(range[0], range[1], 500)
+            kw[args[0]] = x+r
+            pylab.plot(x+r, fn(**kw))
+            pylab.xlabel(args[0])
+            pylab.ylabel("-log P(%s)"%args[0])
+        return plot1d
+
+    if args is None:
+        args = fnargs[:2]
+    if not all(k in fnargs for k in args):
+        raise ValueError("args %s not part of function"%str(args))
+
+    def plotter(view=None, **kw):
+        import pylab
+        x, y = kw[args[0]], kw[args[1]]
+        r = linspace(range[0], range[1], 200)
+        X, Y = meshgrid(x+r, y+r)
+        kw[args[0]], kw[args[1]] = X, Y
+        pylab.pcolormesh(x+r, y+r, fn(**kw))
+        pylab.plot(x, y, 'o', hold=True, markersize=6,
+                   markerfacecolor='red', markeredgecolor='black',
+                   markeredgewidth=1, alpha=0.7)
+        pylab.xlabel(args[0])
+        pylab.ylabel(args[1])
+    return plotter
+
+
+def columnize(L, indent="", width=79):
+    # type: (List[str], str, int) -> str
+    """
+    Format a list of strings into columns.
+
+    Returns a string with carriage returns ready for printing.
+    """
+    column_width = max(len(w) for w in L) + 1
+    num_columns = (width - len(indent)) // column_width
+    num_rows = len(L) // num_columns
+    L = L + [""] * (num_rows*num_columns - len(L))
+    columns = [L[k*num_rows:(k+1)*num_rows] for k in range(num_columns)]
+    lines = [" ".join("%-*s"%(column_width, entry) for entry in row)
+             for row in zip(*columns)]
+    output = indent + ("\n"+indent).join(lines)
+    return output
+
+USAGE = """\
+Given the name of the test function followed by dimension.  Dimension
+defaults to 2.  Available models are:
+
+""" + columnize(ModelFunction.available(), indent="    ")
+
+try:
+    nllf = select_function(sys.argv[1:], vector=False)
+except:
+    print(USAGE, file=sys.stderr)
+    sys.exit(1)
+
+plot=plot2d(nllf, range=(-10,10))
+
+M = PDF(nllf, plot=plot)
+
+for p in M.parameters().values():
+    # TODO: really should pull value and range out of the bounds for the
+    # function, if any are provided.
+    p.value = 400*(np.random.rand()-0.5)
+    p.range(-200,200)
+
+problem = FitProblem(M)
diff --git a/doc/examples/test_functions/readme.rst b/doc/examples/test_functions/readme.rst
new file mode 100644
index 0000000..e74c85b
--- /dev/null
+++ b/doc/examples/test_functions/readme.rst
@@ -0,0 +1,15 @@
+**************
+Test functions
+**************
+
+.. contents:: :local:
+
+Test a variety of more difficult problems to see how well DREAM can recover
+the correct probability definition.
+
+
+.. toctree::
+
+    anticor.rst
+    bounded.rst
+    cross.rst
diff --git a/doc/genmods.py b/doc/genmods.py
new file mode 100644
index 0000000..490403d
--- /dev/null
+++ b/doc/genmods.py
@@ -0,0 +1,225 @@
+"""
+Generate api docs for all modules in a package.
+
+Drop this file in your sphinx doc directory, and change the constants at
+the head of this file as appropriate.  Make sure this file is on the python
+path and add the following to the end of conf.py::
+
+    import genmods
+    genmods.make()
+
+OPTIONS are the options for gen_api_files().
+
+PACKAGE is the dotted import name for the package.
+
+MODULES is the list fo modules to include in table of contents order.
+
+PACKAGE_TEMPLATE is the template for the api index file.
+
+MODULE_TEMPLATE is the template for each api module.
+"""
+
+from __future__ import with_statement
+
+PACKAGE_TEMPLATE=""".. Autogenerated by genmods.py -- DO NOT EDIT --
+
+.. _%(package)s-index:
+
+##############################################################################
+Reference: %(package)s
+##############################################################################
+
+.. only:: html
+
+   :Release: |version|
+   :Date: |today|
+
+.. toctree::
+   :hidden:
+
+   %(rsts)s
+
+.. currentmodule:: %(package)s
+
+.. autosummary::
+
+   %(mods)s
+
+"""
+
+MODULE_TEMPLATE=""".. Autogenerated by genmods.py -- DO NOT EDIT --
+
+******************************************************************************
+%(prefix)s%(module)s - %(title)s
+******************************************************************************
+
+.. currentmodule:: %(package)s.%(module)s
+
+.. autosummary::
+   :nosignatures:
+
+   %(members)s
+
+.. automodule:: %(package)s.%(module)s
+   :members:
+   :undoc-members:
+   :inherited-members:
+   :show-inheritance:
+
+"""
+
+# ===================== Documentation generator =====================
+
+from os import makedirs
+from os.path import exists, dirname, getmtime, join as joinpath, abspath
+import inspect
+import sys
+
+def newer(file1, file2):
+    return not exists(file1) or (getmtime(file1) < getmtime(file2))
+
+def get_members(package, module):
+    name = package+"."+module
+    __import__(name)
+    M = sys.modules[name]
+    try:
+        L = M.__all__
+    except:
+        L = [s for s in sorted(dir(M))
+             if inspect.getmodule(getattr(M,s)) == M and not s.startswith('_')]
+    return L
+
+def gen_api_docs(package, modules, dir='api', absolute=True, root=None):
+    """
+    Generate .rst files in *dir* from *modules* in *package*.
+
+    *dir* defaults to 'api'
+
+    *absolute* is True if modules are listed as package.module in the table
+    of contents.  Default is True.
+
+    *root* is the path to the package source.  This may be different from
+    the location of the package in the python path if the documentation is
+    extracted from the build directory rather than the source directory.
+    The source is used to check if the module definition has changed since
+    the rst file was built.
+    """
+
+    # Get path to package source
+    if root is None:
+        __import__(package)
+        M = sys.modules[package]
+        root = abspath(dirname(M.__file__))
+
+    # Build path to documentation tree
+    prefix = package+"." if absolute else ""
+    if not exists(dir):
+        makedirs(dir)
+
+    # Update any modules that are out of date.  Compiled modules
+    # will always be updated since we only check for .py files.
+    for (module, title) in modules:
+        modfile = joinpath(root, module+'.py')
+        rstfile = joinpath(dir, module+'.rst')
+        if newer(rstfile, modfile):
+            members = "\n    ".join(get_members(package, module))
+            #print("writing %s"%rstfile)
+            with open(rstfile, 'w') as f:
+                f.write(MODULE_TEMPLATE%locals())
+
+    # Update the table of contents, but only if the configuration
+    # file containing the module list has changed.  For now, that
+    # is the current file.
+    api_index = joinpath(dir, 'index.rst')
+    if newer(api_index, __file__):
+        rsts = "\n   ".join(module+'.rst' for module,_ in modules)
+        mods = "\n   ".join(prefix+module for module,_ in modules)
+        #print("writing %s"%api_index)
+        with open(api_index,'w') as f:
+            f.write(PACKAGE_TEMPLATE%locals())
+
+
+# bumps api
+
+BUMPS_OPTIONS = {
+    'absolute': False, # True if package.module in table of contents
+    'dir': 'api', # Destination directory for the api docs
+    'root': None, # Source directory for the package, or None for default
+}
+
+BUMPS_PACKAGE = 'bumps'
+
+BUMPS_MODULES = [
+    #('__init__', 'Top level namespace'),
+    ('bounds', 'Parameter constraints'),
+    ('bspline', 'B-Spline interpolation library'),
+    #('_reduction','Low level calculations'),
+    ('cheby', 'Freeform - Chebyshev'),
+    ('cli', 'Command line interface'),
+    ('curve', 'Model a fit function'),
+    ('data', 'Data handling utilities'),
+    ('errplot','Plot sample profile uncertainty'),
+    ('fitproblem', 'Interface between models and fitters'),
+    ('fitservice', 'Remote job plugin for fit jobs'),
+    ('fitters', 'Wrappers for various optimization algorithms'),
+    ('formatnum', 'Format numbers and uncertainties'),
+    ('history', 'Optimizer evaluation trace'),
+    ('initpop', 'Population initialization strategies'),
+    ('lsqerror', 'Least squares eorror analysis'),
+    ('mapper', 'Parallel processing implementations'),
+    ('monitor', 'Monitor fit progress'),
+    ('mono', 'Freeform - Monotonic Spline'),
+    ('names', 'External interface'),
+    ('parameter', 'Optimization parameter definition'),
+    ('partemp', 'Parallel tempering optimizer'),
+    ('pdfwrapper', 'Model a probability density function'),
+    ('plotutil', 'Plotting utilities'),
+    ('plugin', 'Domain branding'),
+    ('pmath', 'Parametric versions of standard functions'),
+    #('pytwalk', 'MCMC error analysis using T-Walk steps'),
+    ('quasinewton', 'BFGS quasi-newton optimizer'),
+    ('random_lines', 'Random lines and particle swarm optimizers'),
+    ('simplex','Nelder-Mead simplex optimizer (amoeba)'),
+    ('util','Miscellaneous functions'),
+    ('wsolve','Weighted linear and polynomial solver with uncertainty'),
+    ]
+
+DREAM_OPTIONS = {
+    'absolute': False, # True if package.module in table of contents
+    'dir': 'dream', # Destination directory for the api docs
+    'root': None, # Source directory for the package, or None for default
+}
+
+DREAM_PACKAGE = 'bumps.dream'
+
+DREAM_MODULES = [
+    #('__init__', 'Top level namespace'),
+    ('acr', 'A C Rencher normal outlier test'),
+    ('bounds', 'Bounds handling'),
+    ('core', 'DREAM core'),
+    ('corrplot', 'Correlation plots'),
+    ('crossover', 'Adaptive crossover support'),
+    ('diffev', 'Differential evolution MCMC stepper'),
+    ('entropy', 'Entropy calculation'),
+    ('exppow', 'Exponential power density parameter calculator'),
+    ('formatnum', 'Format values and uncertainties nicely for printing'),
+    ('gelman', 'R-statistic convergence test'),
+    ('geweke', 'Geweke convergence test'),
+    ('initpop', 'Population initialization routines'),
+    ('ksmirnov', 'Kolmogorov-Smirnov test for MCMC convergence'),
+    ('mahal', 'Mahalanobis distance calculator'),
+    #('matlab', 'Environment for running matlab DREAM models in python'),
+    ('metropolis', 'MCMC step acceptance test'),
+    ('model', 'MCMC model types'),
+    ('outliers', 'Chain outlier tests'),
+    ('state', 'Sampling history for MCMC'),
+    ('stats', 'Statistics helper functions'),
+    ('tile', 'Split a rectangle into n panes'),
+    ('util', 'Miscellaneous utilities'),
+    ('views', 'MCMC plotting methods'),
+    #('walk', 'Demo of different kinds of random walk'),
+    ]
+
+def make():
+    gen_api_docs(BUMPS_PACKAGE, BUMPS_MODULES, **BUMPS_OPTIONS)
+    gen_api_docs(DREAM_PACKAGE, DREAM_MODULES, **DREAM_OPTIONS)
diff --git a/doc/gentut.py b/doc/gentut.py
new file mode 100644
index 0000000..ce6bbbd
--- /dev/null
+++ b/doc/gentut.py
@@ -0,0 +1,101 @@
+"""
+Generate tutorial docs from the pylit examples directory.
+
+Drop this file in your sphinx doc directory, and change the constants at
+the head of this file as appropriate.  Add pylit.py as well. Make sure this
+file is on the python path and add the following to the end of conf.py::
+
+    import gentut
+    gentut.make()
+
+You may want to change SOURCE_PATH and TARGET_PATH.  Be sure to exclude
+the source path using exclude_trees directive in conf.py.
+"""
+
+SOURCE_PATH = "examples"
+TARGET_PATH = "tutorial"
+
+# =======================================================================
+from os.path import join as joinpath, isdir, basename, getmtime, exists
+from os import makedirs
+from glob import glob
+from shutil import copyfile
+import pylit
+
+# CRUFT: python 2.x needs to convert unicode to bytes when writing to file
+try:
+    # python 2.x
+    unicode
+    def write(fid, s):
+        fid.write(s)
+except NameError:
+    # python 3.x
+    def write(fid, s):
+        fid.write(s.encode('utf-8') if isinstance(s, str) else s)
+
+def make():
+    if not exists(TARGET_PATH):
+        makedirs(TARGET_PATH)
+
+    # table of contents
+    index_source = joinpath(SOURCE_PATH, "index.rst")
+    index_target = joinpath(TARGET_PATH, "index.rst")
+    if newer(index_target, index_source):
+        copyfile(index_source, index_target)
+
+    # examples
+    examples = (f for f in glob(joinpath(SOURCE_PATH,'*')) if isdir(f))
+    for f in examples:
+        #print "weaving directory",f
+        weave(f, joinpath(TARGET_PATH, basename(f)))
+
+def newer(file1, file2):
+    return not exists(file1) or (getmtime(file1) < getmtime(file2))
+
+def weave(source, target):
+    if not exists(target):
+        makedirs(target)
+    for f in glob(joinpath(source,'*')):
+        if f.endswith('__pycache__') or f.endswith('.pyc'):
+            # skip python runtime droppings
+            continue
+        #print "processing",f
+        if f.endswith(".py") and ispylit(f):
+            rstfile = joinpath(target, basename(f).replace('.py','.rst'))
+            pyclean = joinpath(target, basename(f))
+            if newer(rstfile, f):
+                # Convert executable literate file to rst file with embedded code
+                pylit.main(["--codeindent=4", f, rstfile])
+                attach_download(rstfile, basename(f))
+                # Convert rst file with embedded code to clean code file
+                pylit.main([rstfile, pyclean, "-s", "-t"])
+        else:
+            dest = joinpath(target, basename(f))
+            if newer(dest, f):
+                if f.endswith(".py"):
+                    print("Warning: file %r is not a pylit file"%(f,))
+                #print "copying",f,dest
+                copyfile(f, dest)
+
+def attach_download(rstfile, target):
+    with open(rstfile, "ab") as fid:
+        write(fid, "\n\n.. only:: html\n\n   Download: :download:`%s <%s>`.\n"%(target, target))
+
+def ispylit(f):
+    """
+    Assume it is a pylit file if it starts with a comment and not a docstring.
+    """
+    with open(f) as fid:
+        line = fid.readline()
+        # skip shebang if present
+        if line.startswith("#!"):
+            line = fid.readline()
+        # skip blank lines
+        while line != "" and line.strip() == "":
+            line = fid.readline()
+
+    return line.startswith("#")
+
+
+if __name__ == "__main__":
+    make()
diff --git a/doc/getting_started/contributing.rst b/doc/getting_started/contributing.rst
new file mode 100755
index 0000000..956418f
--- /dev/null
+++ b/doc/getting_started/contributing.rst
@@ -0,0 +1,53 @@
+.. _contributing:
+
+********************
+Contributing Changes
+********************
+
+.. contents:: :local:
+
+The best way to contribute to the Bumps package is to work from a copy of 
+the source tree in the revision control system.
+
+The bumps project is hosted on github at:
+
+    http://github.com/bumps
+
+You can obtain a copy via git using::
+
+    git clone https://github.com/bumps/bumps.git
+    cd bumps
+    python setup.py develop
+
+By using the *develop* keyword on setup.py, changes to the files in the
+package are immediately available without the need to run setup each time
+you change the code.
+
+Track updates to the original package using::
+
+    git pull
+
+If you find you need to modify the package, please update the documentation 
+and add tests for your changes.  We use doctests on all of our examples to 
+help keep the documentation synchronized with the code.  More thorough tests 
+are found in the test directory.  Using the the nose test package, you can 
+run both sets of tests::
+
+    pip install nose
+    python2.5 tests.py
+    python2.6 tests.py
+
+When all the tests run, generate a patch and send it to pkienzle at nist.gov::
+
+    git diff > patch
+
+Windows user can use `TortoiseGit <http://code.google.com/p/tortoisegit/>`_ 
+package which provides similar operations.
+
+Instead of sending patches, you can set up a github account and create
+your own bumps fork.  This allows you to develop code at your leisure with
+the safety of source control, and issue pull requests when your code is ready
+to merge with the main repository.
+
+Please make sure that the documentation is up to date, and can be properly
+processed by the sphinx documentation system.  See `_docbuild` for details.
diff --git a/doc/getting_started/index.rst b/doc/getting_started/index.rst
new file mode 100644
index 0000000..b17f916
--- /dev/null
+++ b/doc/getting_started/index.rst
@@ -0,0 +1,33 @@
+.. _getting-started-index:
+
+###############
+Getting Started
+###############
+
+Bumps is a set of routines for curve fitting and uncertainty analysis from
+a Bayesian perspective.  In addition to traditional optimizers which search
+for the best minimum they can find in the search space, bumps provides
+uncertainty analysis which explores all viable minima and finds confidence
+intervals on the parameters based on uncertainty in the measured values.
+Bumps has been used for systems of up to 100 parameters with tight
+constraints on the parameters.  Full uncertainty analysis requires hundreds
+of thousands of function evaluations, which is only feasible for cheap
+functions, systems with many processors, or lots of patience.
+
+Bumps includes several traditional local optimizers such as Nelder-Mead
+simplex, BFGS and differential evolution. Bumps uncertainty analysis uses
+Markov chain Monte Carlo to explore the parameter space. Although
+it was created for curve fitting problems, Bumps can explore any probability 
+density function, such as those defined by PyMC.  In particular, the
+bumps uncertainty analysis works well with correlated parameters.
+
+Bumps can be used as a library within your own applications, or as a framework
+for fitting, complete with a graphical user interface to manage your models.
+
+.. toctree::
+   :maxdepth: 2
+
+   install.rst
+   server.rst
+   contributing.rst
+   license.rst
diff --git a/doc/getting_started/install.rst b/doc/getting_started/install.rst
new file mode 100755
index 0000000..396fa96
--- /dev/null
+++ b/doc/getting_started/install.rst
@@ -0,0 +1,254 @@
+.. _installing:
+
+**************************
+Installing the application
+**************************
+
+.. contents:: :local:
+
+Bumps |version| is provided as a Windows installer or as source:
+
+    - Windows installer: :slink:`%(winexe)s`
+    - Apple installer: :slink:`%(macapp)s`
+    - Source: :slink:`%(srczip)s`
+
+The Windows installer walks through the steps of setting the program up
+to run on your machine and provides the sample data to be used in the
+tutorial.
+
+Building from source
+====================
+
+Before building bumps, you will need to set up your python environment.
+We depend on many external packages.  The versions listed below are a
+snapshot of a configuration that we are using.  The program may work with
+older versions of the package, and we will try to keep it compatible with
+the latest versions.
+
+Our base scientific python environment contains:
+
+    - python 2.7 (also tested on 2.6 and 3.5)
+    - matplotlib 1.4.3
+    - numpy 1.9.2
+    - scipy 0.14.0
+    - wxPython 3.0.0.0
+    - setuptools 20.1.1
+
+To run tests we use:
+
+    - nose 1.3.0
+
+To build the HTML documentation we use:
+
+    - sphinx 1.3.1
+    - docutils 0.12
+    - jinja2 2.8
+
+The PDF documentation requires a working LaTeX installation.
+
+You can install directly from PyPI using pip::
+
+    pip install bumps
+
+If this fails, then follow the instructions to install from the source
+archive directly. Platform specific details for setting up your environment
+are given below.
+
+Windows
+-------
+
+There are a number of python environments for windows, including:
+
+* `Anaconda <https://store.continuum.io/cshop/anaconda/>`_
+* `Canopy <https://www.enthought.com/products/canopy/>`_
+* `Python(X,Y) <http://code.google.com/p/pythonxy/>`_
+* `WinPython <http://winpython.sourceforge.net/>`_
+
+You can also build your environment from the individually distributed
+python packages.
+
+You may want a C compiler to speed up parts of bumps. Microsoft Visual C++
+for Python 2.7 is one option.  Once it is installed, you will need to
+enable the compiler using vcvarsall 64.
+
+Alternatively, your python environment may supply the MinGW C/C++ compiler,
+but fail to set it as the default compiler.  To do so you will need to create
+distutils configuration file in the python lib directory (usually
+*C:\Python27\Lib\distutils\distutils.cfg*) with the following content::
+
+    [build]
+    compiler=mingw32
+
+Next start a Windows command prompt in the directory containing the source.
+This will be a command like the following::
+
+    cd "C:\Documents and Settings\<username>\My Documents\bumps-src"
+
+Now type the command to build and install::
+
+    python setup.py install
+    python test.py
+
+Now change to your data directory::
+
+    cd "C:\Documents and Settings\<username>\My Documents\data"
+
+To run the program use::
+
+    python -m bumps.cli -h
+
+
+Linux
+-----
+
+Many linux distributions will provide the base required packages.  You
+will need to refer to your distribution documentation for details.
+
+On Ubuntu you can use:
+
+    sudo apt-get install python-matplotlib python-scipy python-nose python-sphinx
+    sudo apt-get install python-wxgtk3.0
+
+From a terminal, change to the directory containing the bumps source and type::
+
+    python setup.py build
+    python test.py
+    sudo python setup.py install
+
+This should install the application somewhere on your path.
+
+To run the program use::
+
+    bumps -h
+
+OS/X
+----
+
+Building a useful python environment on OS/X is somewhat involved, and
+frequently evolving so this document will likely be out of date.
+We've had success using the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_
+64-bit python 2.7 environment from Continuum Analytics, which provides
+the required packages, but other distributions should work as well.
+
+You will need to install XCode from the app store, and set the preferences
+to install the command line tools so that a C compiler is available (look
+in the Downloads tab of the preferences window).  If any of your models
+require fortran, you can download
+`gfortran binaries <http://r.research.att.com/tools/>`_ from
+r.research.att.com/tools (scroll down to the  Apple Xcode gcc-42 add-ons).
+This sets up the basic development environment.
+
+From a terminal, change to the directory containing the source and type::
+
+    conda create -n bumps numpy scipy matplotlib nose sphinx wxpython
+    source activate bumps
+    python setup.py install
+    python test.py
+    cd ..
+
+    # Optional: allow bumps to run from outside the bumps environment
+	mkdir ~/bin # create user terminal app directory if it doesn't already exist
+    ln -s `python -c "import sys;print sys.prefix"`/bin/bumps ~/bin
+
+
+To run the program, start a new Terminal shell and type::
+
+    bumps -h
+
+
+.. _docbuild:
+
+Building Documentation
+======================
+
+Building the package documentation requires a working Sphinx installation and
+a working LaTex installation.  Your latex distribution should include the
+following packages:
+
+    multirow, titlesec, framed, threeparttable, wrapfig,
+    collection-fontsrecommended
+
+You can then build the documentation as follows::
+
+    (cd doc && make clean html pdf)
+
+Windows users please note that this only works with a unix-like environment
+such as *gitbash*, *msys* or *cygwin*.  There is a skeleton *make.bat* in
+the directory that will work using the *cmd* console, but it doesn't yet
+build PDF files.
+
+You can see the result of the doc build by pointing your browser to::
+
+    bumps/doc/_build/html/index.html
+    bumps/doc/_build/latex/Bumps.pdf
+
+ReStructured text format does not have a nice syntax for superscripts and
+subscripts.  Units such as |g/cm^3| are entered using macros such as
+\|g/cm^3| to hide the details.  The complete list of macros is available in
+
+        doc/sphinx/rst_prolog
+
+In addition to macros for units, we also define cdot, angstrom and degrees
+unicode characters here.  The corresponding latex symbols are defined in
+doc/sphinx/conf.py.
+
+There is a bug in older sphinx versions (1.0.7 as of this writing) in which
+latex tables cannot be created.  You can fix this by changing::
+
+    self.body.append(self.table.colspec)
+
+to::
+
+    self.body.append(self.table.colspec.lower())
+
+in site-packages/sphinx/writers/latex.py.  This may have been fixed in
+newer versions.
+
+Windows Installer
+=================
+
+To build a windows standalone executable with py2exe you may first need
+to create an empty file named
+*C:\\Python27\\Lib\\numpy\\distutils\\tests\\__init__.py*.
+Without this file, py2exe raises an error when it is searching for
+the parts of the numpy package.  This may be fixed on recent versions
+of numpy. Next, update the __version__ tag in bumps/__init__.py to mark
+it as your own.
+
+Now you can build the standalone executable using::
+
+    python setup_py2exe
+
+This creates a dist subdirectory in the source tree containing
+everything needed to run the application including python and
+all required packages.
+
+To build the Windows installer, you will need two more downloads:
+
+    - Visual C++ 2008 Redistributable Package (x86) 11/29/2007
+    - `Inno Setup <http://www.jrsoftware.org/isdl.php>`_ 5.3.10 QuickStart Pack
+
+The C++ redistributable package is needed for programs compiled with the
+Microsoft Visual C++ compiler, including the standard build of the Python
+interpreter for Windows.  It is available as vcredist_x86.exe from the
+`Microsoft Download Center <http://www.microsoft.com/downloads/>`_.
+Be careful to select the version that corresponds to the one used
+to build the Python interpreter --- different versions can have the
+same name.  For the Python 2.7 standard build, the file is 1.7 Mb
+and is dated 11/29/2007.  We have a copy (:slink:`%(vcredist)s`) on
+our website for your convenience.  Save it to the *C:\\Python27*
+directory so the installer script can find it.
+
+Inno Setup creates the installer executable.  When installing Inno Setup,
+be sure to choose the 'Install Inno Setup Preprocessor' option.
+
+With all the pieces in place, you can run through all steps of the
+build and install by changing to the top level python directory and
+typing::
+
+    python master_builder.py
+
+This creates the redistributable installer bumps-<version>-win32.exe for
+Windows one level up in the directory tree.  In addition, source archives
+in zip and tar.gz format are produced as well as text files listing the
+contents of the installer and the archives.
diff --git a/doc/getting_started/license.rst b/doc/getting_started/license.rst
new file mode 100644
index 0000000..dd8eccd
--- /dev/null
+++ b/doc/getting_started/license.rst
@@ -0,0 +1,122 @@
+.. _license:
+
+*******
+License
+*******
+
+Bumps is in the public domain.
+
+Code in individual files has copyright and license set by the authors.  Only
+free and open source software is used in this package.
+
+
+Bumps GUI
+---------
+
+Copyright (C) 2006-2011, University of Maryland
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+DREAM
+-----
+
+Copyright (c) 2008, Los Alamos National Security, LLC
+All rights reserved.
+
+Copyright 2008. Los Alamos National Security, LLC. This software was produced under U.S.
+Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL), which is
+operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S.
+Government has rights to use, reproduce, and distribute this software.
+
+NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES A NY WARRANTY, EXPRESS OR
+IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.  If software is modified to
+produce derivative works, such modified software should be clearly marked, so as not to
+confuse it with the version available from LANL.
+
+Additionally, redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of
+  conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this list of
+  conditions and the following disclaimer in the documentation and/or other materials
+  provided with the distribution.
+* Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL
+  the U.S. Government, nor the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS
+ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+numdifftools
+------------
+
+Copyright (c) 2014, Per A. Brodtkorb, John D'Errico
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of the {organization} nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*******
+Credits
+*******
+
+Bumps package was developed under DANSE project and is maintained by
+its user community.
+
+Please cite:
+
+        Kienzle, P.A., Krycka, J., Patel, N., & Sahin, I. (2011).
+        Bumps (Version |release|) [Computer Software].
+        College Park, MD: University of Maryland.  Retrieved |today|.
+
+We are grateful for the existence of many fine open source packages such
+as `NumPy <http://numpy.scipy.org/>`_ and `Python <http://www.python.org/>`_
+without which this package would be much more difficult to write.
diff --git a/doc/getting_started/server.rst b/doc/getting_started/server.rst
new file mode 100644
index 0000000..028fdc0
--- /dev/null
+++ b/doc/getting_started/server.rst
@@ -0,0 +1,177 @@
+.. _server-installation:
+
+*******************
+Server installation
+*******************
+
+.. warning::
+
+    The remote fitting feature is not actively maintained and will likely
+    not work.
+
+.. contents:: :local:
+
+Bumps jobs can be submitted to a remote batch queue for processing.  This
+allows users to share large clusters for faster processing of the data.  The
+queue consists of several components.
+
+* job controller
+
+   http service layer which allows users to submit jobs and view results
+
+* queue
+
+   cluster management layer which distributes jobs to the working nodes
+
+* worker
+
+   process monitor which runs a job on the working nodes
+
+* mapper
+
+   mechanism for evaluating R(x_i) for different x_i on separate CPUs
+
+If you are setting up a local cluster for performing Bumps analysis then you 
+will need to read this section, otherwise you can continue to the next section.
+
+Assuming that the bumps server is installed as user 'bumps' in a virtualenv 
+of ~/bumpserve, MPLCONFIGDIR is set to ~/bumpserve/.matplotlib,
+and bumpworkd has been configured, you can start with the following profile::
+
+    TODO: fill in some details on bumps server
+
+Job Controller
+==============
+
+:mod:`jobqueue` is an independent package within bumps.  It implements
+an http API for interacting with jobs.
+
+It is implemented as a WSGI python application using
+`Flask <http://flask.pocoo.org>`_
+
+Here is our WSGI setup for apache for our reflectometry modeling service::
+
+    <VirtualHost *:80>
+        ServerAdmin pkienzle at nist.gov
+        ServerName www.reflectometry.org
+        ServerAlias reflectometry.org
+        ErrorLog logs/bumps-error_log
+        CustomLog logs/bumps-access_log common
+
+        WSGIDaemonProcess bumps_serve user=pkienzle group=refl threads=3
+        WSGIScriptAlias /queue /home/pkienzle/bumps/www/jobqueue.wsgi
+
+        <Directory "/home/pkienzle/bumps/www">
+                WSGIProcessGroup bumps_serve
+                WSGIApplicationGroup %{GLOBAL}
+                Order deny,allow
+                Allow from all
+        </Directory>
+
+        DocumentRoot /var/www/bumps
+        <Directory "/var/www/bumps/">
+                AllowOverride All
+        </Directory>
+
+    </VirtualHost>
+
+
+There is a choice of two different queuing systems to configure.  If your
+environment supports a traditional batch queue you can use it to
+manage cluster resources.  New jobs are added to the queue, and
+when they are complete, they leave their results in the job results
+directory.  Currently only slurm is supported, but supporting torque
+as well would only require a few changes.
+
+You can also set up a central dispatcher.  In that case, you will have
+remote clusters pull jobs from the server when they are available, and post
+the results to the job results directory when they are complete. The remote
+cluster may be set up with its own queuing system such as slurm, only
+taking a few jobs at a time from the dispatcher so that other clusters
+can share the load.
+
+
+Cluster
+=======
+
+If you are using the dispatcher queuing system, you will need to set up
+a work daemon on your cluster to pull jobs from the queue.  This requires
+adding bumpworkd to your OS initialization scripts.
+
+Security
+========
+
+Because the jobqueue can run without authentication we need to be
+especially concerned about the security of our system.  Techniques
+such as AppArmor or virtual machines with memory mapped file systems
+provide a relatively safe environment to support anonymous computing.
+
+To successfully set up AppArmor, there are a few operations you need.
+
+Each protected application needs a profile, usually stored in
+/etc/apparmor.d/path.to.application.  With the reflenv virtural
+environment in the reflectometry user, the following profile
+would be appropriate for the worker daemon::
+
+    -- /etc/apparmor.d/home.bumps.bumpsenv.bin.bumpworkd
+    #include <tunables/global>
+
+    /home/bumps/bumpsenv/bin/bumpworkd {
+     #include <abstractions/base>
+     #include <abstractions/python>
+
+     /bin/dash cx,
+     /home/bumps/bumpsenv/bin/python cx,
+     /home/bumps/bumpsenv/** r,
+     /home/bumps/bumpsenv/**.{so,pyd} mr,
+     /home/bumps/.bumpserve/.matplotlib/* rw,
+     /home/bumps/.bumpserve/worker/** rw,
+    }
+
+This gives read/execute access to python and its C extensions,
+and read access to everything else in the bumps virtual environment.
+
+The rw access to .bumpserve is potentially problematic.  Hostile
+models can interfere with each other if they are running at the same time.
+In particular, they could inject html into the returned data set which can
+effectively steal authentication credentials from other users through
+cross site scripting attacks, and so would not be appropriate on an 
+authenticated service.  Restricting individual models to their own job
+directory at .bumpserve/worker/jobid/** would reduce this risk, but this 
+author does not know how to do so without elevating bumpworkd privileges to root.
+
+Once the profile is in place, restart the apparmor.d daemon to enable it::
+
+    sudo service apparmor restart
+
+You can debug the profile by running a trace while the program runs
+unrestricted.  To start the trace, use::
+
+   sudo genprof /path/to/application
+
+Switch to another window then run::
+
+   /path/to/app
+
+When your application is complete, return to the genprof window
+and hit 'S' to scan /var/log/syslog for file and network access.
+Follow the prompts to update the profile.  The documentation on
+`AppArmor on Ubuntu <https://help.ubuntu.com/community/AppArmor>`_
+and
+`AppArmor on SUSE <http://doc.opensuse.org/products/opensuse/openSUSE/opensuse-security/cha.apparmor.profiles.html>`_
+is very helpful here.
+
+To reload a profile after running the trace, use::
+
+     sudo apparmor_parser -r /etc/apparmor.d/path.to.application
+
+To delete a profile that you no longer need::
+
+     sudo rm /etc/apparmor.d/path.to.application
+     sudo service apparmor restart
+
+Similar profiles could be created for the job server, and indeed, any web
+service you have on your machine to reduce the risk that bugs in your code
+can be used to compromise your security, but this is less critical since 
+your code is not running in general running with arbitrary user defined functions.
+
diff --git a/doc/guide/corr.png b/doc/guide/corr.png
new file mode 100644
index 0000000..2d94701
Binary files /dev/null and b/doc/guide/corr.png differ
diff --git a/doc/guide/data.rst b/doc/guide/data.rst
new file mode 100644
index 0000000..75e27d8
--- /dev/null
+++ b/doc/guide/data.rst
@@ -0,0 +1,10 @@
+.. _data-guide:
+
+*******************
+Data Representation
+*******************
+
+.. contents:: :local:
+
+Data is x,y,dy.  Anything more complicated you will need to define yourself.
+
diff --git a/doc/guide/dream-complete.png b/doc/guide/dream-complete.png
new file mode 100644
index 0000000..64e0c98
Binary files /dev/null and b/doc/guide/dream-complete.png differ
diff --git a/doc/guide/dream-incomplete.png b/doc/guide/dream-incomplete.png
new file mode 100644
index 0000000..87c3d76
Binary files /dev/null and b/doc/guide/dream-incomplete.png differ
diff --git a/doc/guide/entropy-continuous.png b/doc/guide/entropy-continuous.png
new file mode 100644
index 0000000..dcfa5ba
Binary files /dev/null and b/doc/guide/entropy-continuous.png differ
diff --git a/doc/guide/entropy-discrete.png b/doc/guide/entropy-discrete.png
new file mode 100644
index 0000000..c9192b8
Binary files /dev/null and b/doc/guide/entropy-discrete.png differ
diff --git a/doc/guide/entropy.rst b/doc/guide/entropy.rst
new file mode 100644
index 0000000..9c6b4ba
--- /dev/null
+++ b/doc/guide/entropy.rst
@@ -0,0 +1,100 @@
+.. _entropy-guide:
+
+*******************
+Calculating Entropy
+*******************
+
+.. contents:: :local:
+
+Entropy is a measure of how much uncertainty is in the parameters.   We can
+start with the simple case of a discrete parameter which can take on limited
+set of values. Using the formula for discrete entropy:
+
+.. math::
+
+    H(x) = - \sum_x p(x) \log_2(x)
+
+where $x$ is the set of possible states of the parameter, we can examine a
+simple system with four states of equal probability:
+
+.. image:: entropy-discrete.png
+
+Before the experiment, the entropy is $-4 (1/4) \log_2(1/4) = 2$ bits.  After
+the experiment, which eliminates the states on the right, only two states are
+remaining with an entropy of 1 bit.  The difference in entropy before and
+after the experiment is the information gain, which is 1 bit in this case.
+
+Extending this concept to continuous parameters, we use:
+
+.. math::
+
+    h(x) = - \int_{x \in X} p(x) \log_2(x) dx
+
+For a parameter which is normally distributed, $x \sim N(\mu, \sigma)$, the
+entropy is:
+
+.. math::
+
+    h(x) = \tfrac12 \log_2 (2 \pi e \sigma^2)
+
+Consider an experiment in which the parameter uncertainty $\sigma$ is reduced
+from $\sigma=1$ before the experiment to $\sigma=\tfrac12$ after the
+experiment:
+
+.. image:: entropy-continuous.png
+
+This experiment reduces the entropy from 2.05 bits to 1.05 bits, for an
+information gain of 1 bit.
+
+For a multivariate normal $N(\bar\mu, \Sigma)$, the entropy is
+
+.. math::
+
+    h(N) = \tfrac{n}{2} \log_2 (2 \pi e) + \tfrac12 \log_2 \lvert \Sigma \rvert
+
+where $n$ is the number of fitting parameters and $\Sigma$ is the
+covariance matrix relating the parameters.  For an uncorrelated system, this
+is proportional to $\sum_{i=1}^n \log_2 \sigma_i$, with the individual parameter
+uncertainties $\sigma_i$. In effect, the entropy is a measure of overall
+uncertainty resulting after the fit.
+
+Within bumps, most models start with a uniform prior distribution for the
+parameters set using the *x.range(low,high)* or *x.pm(delta)* for some
+parameter *x*.  Some models set the prior probability to a normal distribution
+using *x.dev(sigma)*.  Arbitrary prior probability distributions can be
+set using *x.bounds = Distribution(D)* where *D* is a distribution following
+the *scipy.stats* interface.  The uncertainty on the data points does not
+directly enter into the entropy calculation.  Instead, it has a direct
+influence on the calculation of the probability of seeing the data given
+the parameter, and so it influences the probability of the parameters after
+the fit.  Increasing the error bars will increase the variance in the
+parameter estimation which will increase the entropy.
+
+There are three ways that bumps can evaluate entropy.  For the fitters
+which return a sample from the posterior distribution, such as DREAM,
+BUMPS can estimate the entropy directly from the sample.  If the distribution
+is approximately normal, we can compute the covariance matrix from the sample
+and use the formula above for the multivariate normal.   For the remaining
+fitters, we can use an estimate of the covariance matrix that results from
+the fit (Levenberg-Marquardt, BFGS), or we can compute the Hessian at the
+minimum (differential evolution, Nelder-Mead simplex).  Again, this can be
+used in the formula above to give an estimate of the entropy.
+
+We can use the difference in entropy between fits for experimental design.
+After setting up the model system, we can simulate a dataset using the
+expected statistics from the experiment, then fit the simulated data.  This
+will give us the the expected uncertainty on our individual parameters, and
+the overall entropy.  We can then play with different experimental parameters
+such as instrument configurations, sample variants and measurement time and
+select a combination which provides the most information about the parameters
+of interest.  This can be done from the command line using
+:ref:`option-simulate`, :ref:`option-noise` and :ref:`option-entropy`.
+
+The information gain from the fit is not quite meaningful.  We can calculate
+the prior entropy by looking at the fitting range of the parameters, and the
+particular choice of fitting ranges can alter the output of the fit.  So for
+example, if we set the fitting range to eliminate solutions, we will have
+reduced the prior entropy as well as the posterior entropy, and likely
+decreased the number of bits of information gain.  Conversely, if the fit
+converges to the same distribution regardless of the parameter range, we can
+drive the information gain to infinity by setting an unbounded input range.
diff --git a/doc/guide/error.png b/doc/guide/error.png
new file mode 100644
index 0000000..36e4321
Binary files /dev/null and b/doc/guide/error.png differ
diff --git a/doc/guide/experiment.rst b/doc/guide/experiment.rst
new file mode 100644
index 0000000..c2a021d
--- /dev/null
+++ b/doc/guide/experiment.rst
@@ -0,0 +1,256 @@
+.. _experiment-guide:
+
+**********
+Experiment
+**********
+
+.. contents:: :local:
+
+It is the responsibility of the user to define their own experiment
+structure.  The usual definition will describe the sample of interest,
+the instrument configuration, and the measured data, and will provide
+a theory function which computes the expected data given the sample
+and instrument parameters.  The theory function frequently has a
+physics component for computing the ideal data given the sample and
+an instrument effects component which computes the expected data from
+the ideal data.  Together, sample, instrument, and theory function
+define the fitting model which needs to match the data.
+
+The curve fitting problem can be expressed as:
+
+.. math::
+
+    P(\text{model}\ |\ \text{data}) =
+        {P(\text{data}\ |\ \text{model})P(\text{model}) \over P(\text{data})}
+
+That is, the probability of seeing a particular set of model parameter values
+given the observed data depends on the probability of seeing the measured
+data given a proposed set of parameter values scaled by the probability of
+those parameter values and the probability of that data being measured.
+The experiment definition must return the negative log likelihood as
+computed using the expression on the right.  Bumps will explore the
+space of the sample and instrument parameters in the model, returning the
+maximum likelihood and confidence intervals on the parameters.
+
+There is a strong relationship between the usual $\chi^2$ optimization
+problem and the maximum likelihood problem. Given Gaussian uncertainty
+for data measurements, we find that data $y_i$ measured with
+uncertainty $\sigma_i$ will be observed for sample parameters $p$
+when the instrument is at position $x_i$ with probability
+
+.. math::
+
+    P(y_i\ |\ f(x_i;p)) = \frac{1}{\sqrt{2\pi\sigma_i^2}}
+        \exp\left(-\frac{(y_i-f(x_i;p))^2}{2\sigma_i^2}\right)
+
+The negative log likelihood of observing all points in the data set for
+the given set of sample parameters is
+
+.. math::
+
+   -\log \prod_i{P(y_i\ |\ f(x_i;p))} =
+       \tfrac12 \sum_i{\frac{(y_i-f(x_i;p))^2}{\sigma_i^2}}
+       - \tfrac12 \sum_i{\log 2 \pi \sigma_i^2}
+       = \tfrac12 \chi^2 + C
+
+Note that this is the unnormalized $\chi^2$, whose expected value is the 
+number of degrees of freedom in the model, not the reduced $\chi^2_R$ whose
+expected value is $1$.  The Bumps fitting process is not sensitive to the
+constant $C$ and it can be safely ignored.
+
+Casting the problem as a log likelihood problem rather than $\chi^2$
+provides several advantages.  We can support a richer set of measurement
+techniques whose uncertainties do not follow a Gaussian distribution.
+For example, if we have a Poisson process with a low count rate, the
+likelihood function will be asymmetric, and a gaussian fit will tend
+to overestimate the rate.  Furthermore, we can properly handle
+background rates since we can easily compute the probability of seeing
+the observed number of counts given the proposed signal plus background
+rate.  Gaussian modeling can lead to negative rates for signal or
+background, which is fundamentally wrong. See :ref:`poisson-fit` for
+a demonstration of this effect.
+
+We can systematically incorporate prior information into our models, such
+as uncertainty in instrument configuration.  For example,  if our sample
+angle control motor position follows a Gaussian distribution with a target
+position of 3\ |deg| and an uncertainty of 0.2\ |deg|, we can set
+
+.. math::
+
+   -\log P(\text{model}) = -\frac{1}{2} \frac{(\theta-3)^2}{0.2^2}
+
+ignoring the scaling constant as before, and add this to $\tfrac12\chi^2$
+to get log of the product of the uncertainties.  Similarly, if we
+know that our sample should have a thickness of 100 |pm| 3.5 |Ang|
+based on how we constructed the sample, we can incorporate this
+information into our model in the same way.
+
+Simple experiments
+====================
+
+The simplest experiment is defined by a python function which takes
+a list of instrument configuration and has arguments defining the 
+parameters.  For example, to fit a line you would need::
+
+    def line(x, m, b):
+        return m*x + b
+
+Assuming the data was in a 3 column ascii file with x, y and
+uncertainty, you would turn this into a bumps model file using::
+
+    # 3 column data file with x, y and uncertainty
+    x,y,dy = numpy.loadtxt('line.txt').T  
+    M = Curve(line, x, y, dy)
+
+Using the magic of python introspection, 
+:class:`Curve <bumps.curve.Curve>` is able to determine
+the names of the fittable parameters from the arguments to the
+function.  These are converted to 
+:class:`Parameter <bumps.parameter.Parameter>` objects, the 
+basis of the Bumps modeling system.  For each parameter, we can set
+bounds or values::
+
+    M.m.range(0,1)  # limit slope between 0 and 45 degrees
+    M.b.value = 1   # the intercept is set to 1.
+
+We could even set a parameter to a probability distribution, using
+:meth:`Parameter.dev <bumps.parameter.Parameter.dev>` for Gaussian
+distributions or setting parameter.bounds to
+:class:`Distribution <bumps.bounds.Distribution>` for other distributions.
+
+Bumps includes code for polynomial interpolation including
+:func:`B-splines <bumps.bspline>`,
+:func:`monotonic splines <bumps.mono>`,
+and :func:`chebyshev polynomials <bumps.cheby>`.
+
+For counts data, :class:`PoissonCurve <bumps.curve.PoissonCurve>` is also
+available.
+
+Likelihood functions
+====================
+
+If you are already have the negative log likelihood function and you don't
+need to manage data, you can use it with :class:`PDF <bumps.pdfwrapper.PDF>`::
+
+    x,y,dy = numpy.loadtxt('line.txt').T
+    def nllf(m, b):
+        return numpy.sum(((y - (m*x + b))/dy)**2)
+    M = PDF(nllf)
+
+You can use *M.m* and *M.b* to the parameter ranges as usual, then return
+the model as a fitting problem::
+
+    M.m.range(-inf,inf)
+    M.b.range(-inf,inf)
+    problem = FitProblem(M)
+
+.. _fitness:
+
+Complex models
+==============
+
+More sophisticated models, with routines for data handling and specialized
+plotting should define the :class:`Fitness <bumps.fitproblem.Fitness>`
+interface.  The :ref:`peaks-example` example sets up a problem for fitting
+multiple peaks plus a background against a 2-D data set.
+
+Models are parameterized using :class:`Parameter <bumps.parameter.Parameter>`
+objects, which identify the fitted parameters in the model, and the bounds over
+which they may vary.  The fitness object must provide a set of fitting
+parameters to the fit problem using the
+:meth:`parameters <bumps.fitproblem.Fitness.parameters>`  method.
+Usually this returns a dictionary, with the key corresponding to the
+attribute name for the parameter and the value corresponding to a
+parameter object.  This allows the user of the model to guess that
+parameter "p1" for example can be referenced using *model.p1*.  If the
+model consists of parts, the parameters for each part must be returned.
+The usual approach is to define a *parameters* method for each part
+and build up the dictionary when needed (the *parameters* function is
+only called at the start of the fit, so it does not need to be efficient).
+This allows the user to guess that parameter "p1" of part "a" can be
+referenced using *model.a.p1*.  A set of related parameters, p1, p2, ...
+can be placed in a list and referenced using, e.g., *model.a.p[i]*.
+
+The fitness constructor should accept keyword arguments for each
+parameter giving reasonable defaults for the initial value.  The
+parameter attribute should be created using
+:meth:`Parameter.default <bumps.parameter.Parameter.default>`.
+This method allows the user to set an initial parameter value when the
+model is defined, or set the value to be another parameter in the fitting
+problem, or to a parameter expression. The name given to the *default*
+method should include the name of the model.  That way when the same
+type of model is used for different data sets, the two sets of parameters
+can be distinguished.  Ideally the model name would be based on the
+data set name so that you can more easily figure out which parameter
+goes with which data.
+
+During an analysis, the optimizer will ask to evaluate a series of
+points in parameter space.  Once the parameters have been set, the
+:meth:`update <bumps.fitproblem.Fitness.update>` method will be called,
+if there is one.  This method should clear any cached results from the
+last fit point.  Next the :meth:`nllf <bumps.fitproblem.Fitness.nllf>`
+method will be called to compute the negative log likelihood of observing
+the data given the current values of the parameters.   This is usually
+just $\sum{(y_i - f(x_i))^2 / (2 \sigma_i^2)}$ for data measured with
+Gaussian uncertainty, but any probability  distribution can be used.
+
+For the Levenberg-Marquardt optimizer, the
+:meth:`residuals <bumps.fitproblem.Fitness.residuals>` method will be
+called instead of *nllf*.  If residuals are unavailable, then the L-M
+method cannot be used.
+
+The :meth:`numpoints <bumps.fitproblem.Fitness.numpoints>` method is used
+to report fitting progress.  With Gaussian measurement uncertainty, the
+*nllf* return value is $\chi^2/2$, which has an expected value of
+the number of degrees of freedom in the fit.  Since this is an awkward
+number, the normalized chi-square,
+$\chi^2_N = \chi^2/\text{DoF} = -2 \ln (P)/(n-p)$, is shown
+instead, where $-\ln P$ is the *nllf* value, $n$ is the of points
+and $p$ is the number of fitted parameters.  $\chi^2_N$ has a value near 1
+for a good fit.  The same calculation is used for non-gaussian
+distributions even though *nllf* is not returning sum squared residuals.
+
+The :meth:`save <bumps.fitproblem.Fitness.save>` and
+:meth:`plot <bumps.fitproblem.Fitness.plot>` methods will  be called at
+the end of the fit.  The *save* method should save the model for the
+current point.  This may include things such as the calculated scattering
+curve and the real space model for scattering inverse problems, or it
+may be a save of the model parameters in a format that can be loaded by
+other programs.  The *plot* method should use the current matplotlib
+figure to draw the model, data, theory and residuals.
+
+The :meth:`resynth_data <bumps.fitproblem.Fitness.resynth_data>` method
+is used for an alternative monte carlo error analysis where random
+data sets are generated from the measured value and the uncertainty
+then fitted.  The resulting fitted parameters can be processed much
+like the MCMC datasets, yielding a different estimate on the uncertainties
+in the parameters.  The
+:meth:`restore_data <bumps.fitproblem.Fitness.restore_data>` method
+restores the data to the originally measured values.  These methods
+are optional, and only used if the alternative error analysis is
+requested.
+
+Linear models
+=============
+
+Linear problems with normally distributed measurement error can be
+solved directly.  Bumps provides :func:`bumps.wsolve.wsolve`, which weights
+values according to the uncertainty.  The corresponding
+:func:`bumps.wsolve.wpolyfit` function fits polynomials with measurement
+uncertainty.
+
+
+Foreign models
+==============
+
+If your modeling environment already contains a sophisticated parameter
+handling system (e.g. sympy or PyMC) you may want to tie into the Bumps
+system at a higher level.  In this case you will need to define a
+class which implements the :class:`FitProblem <bumps.fitproblem.FitProblem>`
+interface.  This has been done already for 
+:class:`PyMCProblem <bumps.pymcfit.PyMCProblem`
+and interested parties are directed therein for a working example.
+
+
+External constraints
+====================
diff --git a/doc/guide/fit-amoeba.png b/doc/guide/fit-amoeba.png
new file mode 100644
index 0000000..910c62e
Binary files /dev/null and b/doc/guide/fit-amoeba.png differ
diff --git a/doc/guide/fit-de.png b/doc/guide/fit-de.png
new file mode 100644
index 0000000..a9858cf
Binary files /dev/null and b/doc/guide/fit-de.png differ
diff --git a/doc/guide/fit-dream.png b/doc/guide/fit-dream.png
new file mode 100644
index 0000000..013083d
Binary files /dev/null and b/doc/guide/fit-dream.png differ
diff --git a/doc/guide/fit-lm.png b/doc/guide/fit-lm.png
new file mode 100644
index 0000000..5b981ee
Binary files /dev/null and b/doc/guide/fit-lm.png differ
diff --git a/doc/guide/fit-newton.png b/doc/guide/fit-newton.png
new file mode 100644
index 0000000..6e2b422
Binary files /dev/null and b/doc/guide/fit-newton.png differ
diff --git a/doc/guide/fitting.rst b/doc/guide/fitting.rst
new file mode 100644
index 0000000..51c38fb
--- /dev/null
+++ b/doc/guide/fitting.rst
@@ -0,0 +1,491 @@
+.. _fitting-guide:
+
+*******
+Fitting
+*******
+
+.. contents:: :local:
+
+
+Obtaining a good fit depends foremost on having the correct model to fit.
+
+For example, if you are modeling a curve with spline, you will overfit
+the data if you have too many spline points, or underfit it if you do not
+have enough.  If the underlying data is ultimately an exponential, then
+the spline order required to model it will require many more parameters
+than the corresponding exponential.
+
+Even with the correct model, there are systematic errors to address
+(see :ref:`data-guide`).  A distorted sample can lead to broader resolution
+than expected for the measurement technique, and you will need to adjust your
+resolution function.  Imprecise instrument control will lead to uncertainty
+in the position of the sample, and corresponding changes to the measured
+values.  For high precision experiments, your models will need to incorporate
+these instrument effects so that the uncertainty in instrument configuration
+can be properly accounted for in the uncertainty in the fitted parameter
+values.
+
+
+Quick Fit
+=========
+
+While generating an appropriate model, you will want to perform a number
+of quick fits.  The :ref:`fit-amoeba` works well for this.  You will want
+to run enough iterations ``--steps=1000`` so the algorithm has a
+chance to  converge.  Restarting a number of times ``--starts=10`` gives
+a reasonably thorough search of the fit space.  Once the fit converges,
+additional starts are very quick.  From the graphical user interface, using
+``--starts=1`` and clicking the fit button to improve the fit as needed works
+pretty well. From the command line interface, the command line will be
+something like::
+
+    bumps --fit=amoeba --steps=1000 --starts=20 --parallel model.py --store=T1
+
+Here, the results are kept in a directory ``--store=T1`` relative to the current
+directory, with files containing the current model in *model.py*, the fit
+result in *model.par* and a plots in *model-\*.png*.  The parallel option
+indicates that multiple cores should be used on the cpu when running the fit.
+
+The fit may be able to be improved by using the current best fit value as
+the starting point for a new fit::
+
+    bumps --fit=amoeba --steps=1000 --starts=20 --parallel model.py --store=T1 --pars=T1/model.par
+
+If the fit is well behaved, and a numerical derivative exists, then
+switching to :ref:`fit-newton` is useful, in that it will very rapidly
+converge to a nearby local minimum.
+
+::
+
+    bumps --fit=newton model.py --pars=T1/model.par --store=T1
+
+:ref:`fit-de` is an alternative to :ref:`fit-amoeba`, perhaps a little
+more likely to find the global minimum but somewhat slower.  This is a
+population based algorithms in which several points from the current
+population are selected, and based on the position and value, a new point
+is generated.  The population is specified as a multiplier on the number
+of parameters in the model, so for example an 8 parameter model with
+DE's default population ``--pop=10`` would create 80 points each generation.
+This algorithms can be called from the command line as follows::
+
+    bumps --fit=de --steps=3000 --parallel model.py --store=T1
+
+Some fitters save the complete state of the fitter on termination so that
+the fit can be resumed.  Use ``--resume=path/to/previous/store`` to resume.
+The resumed fit also needs a ``--store=path/to/store``, which could be the
+same as the resume path if you want to update it, or it could be a completely
+new path.
+
+
+See :ref:`optimizer-guide` for a description of the available optimizers, and
+:ref:`option-guide` for a description of all the bumps options.
+
+Uncertainty Analysis
+====================
+
+More important than the optimal value of the parameters is an estimate
+of the uncertainty in those values.  By casting our problem as the
+likelihood of seeing the data given the model, we not only give
+ourselves the ability to incorporate prior information into the fit
+systematically, but we also give ourselves a strong foundation for
+assessing the uncertainty of the parameters.
+
+Uncertainty analysis is performed using :ref:`fit-dream`.  This is a
+Markov chain Monte Carlo (MCMC) method with a differential evolution
+step generator.  Like simulated annealing, the MCMC explores the space
+using a random walk, always accepting a better point, but sometimes
+accepting a worse point depending on how much worse it is.
+
+DREAM can be started with a variety of initial populations.  The
+random population ``--init=random`` distributes the initial points using
+a uniform distribution across the space of the parameters.  Latin
+hypersquares ``--init=lhs`` improves on random by making sure that
+there is on value for each subrange of every variable. The covariance
+population ``--init=cov`` selects points from the uncertainty ellipse
+computed from the derivative at the initial point.  This method
+will fail if the fitting parameters are highly correlated and the
+covariance matrix is singular.  The $\epsilon$-ball population ``--init=eps``
+starts DREAM from a tiny region near the initial point and lets it
+expand from there.  It can be useful to start with an epsilon ball
+from the previous best point when DREAM fails to converge using
+a more diverse initial population.
+
+The Markov chain will take time to converge on a stable population.
+This burn in time needs to be specified at the start of the analysis.
+After burn, DREAM will collect all points visited for N iterations
+of the algorithm.  If the burn time was long enough, the resulting
+points can be used to estimate uncertainty on parameters.
+
+A common command line for running DREAM is::
+
+   bumps --fit=dream --burn=1000 --samples=1e5 --init=cov --parallel --pars=T1/model.par model.py --store=T2
+
+
+Bayesian uncertainty analysis is described in the GUM Supplement 1,[8]
+and is a valid technique for reporting parameter uncertainties in NIST
+publications.   Given sufficient burn time, points in the search space
+will be visited with probability proportional to the goodness of fit.
+The file T1/model.err contains a table showing for each
+parameter the mean(std), median and best values, and the 68% and 95%
+credible intervals.  The mean and standard deviation are computed from
+all the samples in the returned distribution.  These statistics are not
+robust: if the Markov process has not yet converged, then outliers will
+significantly distort the reported values.  Standard deviation is
+reported in compact notation, with the two digits in parentheses
+representing uncertainty in the last two digits of the mean.  Thus, for
+example, $24.9(28)$ is $24.9 \pm 2.8$.  Median is the best value in the
+distribution.  Best is the best value ever seen.  The 68% and 95%
+intervals are the shortest intervals that contain 68% and 95% of
+the points respectively.  In order to report 2 digits of precision on
+the 95% interval, approximately 1000000 samples drawn from the distribution
+are required, or steps = 1000000/(#parameters  #pop).  The 68% interval
+will require fewer draws, though how many has not yet been determined.
+
+.. image:: var.png
+    :scale: 50
+
+Histogramming the set of points visited will gives a picture of the
+probability density function for each parameter.  This histogram is
+generated automatically and saved in T1/model-var.png.  The histogram
+range represents the 95% credible interval, and the shaded region
+represents the 68% credible interval.  The green line shows the highest
+probability observed given that the parameter value is restricted to
+that bin of the histogram.  With enough samples, this will correspond
+to the maximum likelihood value of the function given that one parameter
+is restricted to that bin.  In practice, the analysis has converged
+when the green line follows the general shape of the histogram.
+
+.. image:: corr.png
+    :scale: 50
+
+The correlation plots show that the parameters are not uniquely
+determined from the data.  For example, the thickness of
+lamellae 3 and 4 are strongly anti-correlated, yielding a 95% CI of
+about 1 nm for each compared to the bulk nafion thickness CI of 0.2 nm.
+Summing lamellae thickness in the sampled points, we see the overall
+lamellae thickness has a CI of about 0.3 nm.  The correlation
+plot is saved in T1/model-corr.png.
+
+
+.. image:: error.png
+    :scale: 50
+
+To assure ourselves that the uncertainties produced by DREAM do
+indeed correspond to the underlying uncertainty in the model, we perform
+a Monte Carlo forward uncertainty analysis by selecting 50 samples from
+the computed posterior distribution, computing the corresponding
+theory function and calculating the normalized residuals.  Assuming that
+our measurement uncertainties are approximately normally distributed,
+approximately 68% of the normalized residuals should be within +/- 1 of
+the residual for the best model, and 98% should be within +/- 2. Note
+that our best fit does not capture all the details of the data, and the
+underlying systematic bias is not included in the uncertainty estimates.
+
+Plotting the profiles generated from the above sampling method, aligning
+them such that the cross correlation with the best profile is maximized,
+we see that the precise details of the lamellae are uncertain but the
+total thickness of the lamellae structure is well determined.  Bayesian
+analysis can also be used to determine relative likelihood of different
+number of layers, but we have not yet performed this analysis.  This plot
+is stored in *T1/model-errors.png*.
+
+The trace plot, *T1/model-trace.png*, shows the mixing properties of the
+first fitting parameter.  If the Markov process is well behaved, the
+trace plot will show a lot of mixing.  If it is ill behaved, and each
+chain is stuck in its own separate local minimum, then distinct lines
+will be visible in this plot.
+
+The convergence plot, *T1/model-logp.png*, shows the log likelihood
+values for each member of the population.  When the Markov process
+has converged, this plot will be flat with no distinct lines visible.
+If it shows a general upward sweep, then the burn time was not
+sufficient, and the analysis should be restarted.  The ability to
+continue to burn from the current population is not yet implemented.
+
+Just because all the plots are well behaved does not mean that the
+Markov process has converged on the best result.  It is practically
+impossible to rule out a deep minimum with a narrow acceptance
+region in an otherwise unpromising part of the search space.
+
+In order to assess the DREAM algorithm for suitability for our
+problem space we did a number of tests.  Given that our fit surface is
+multimodal, we need to know that the uncertainty analysis can return
+multiple modes.  Because the fit problems may also be ill-conditioned,
+with strong correlations or anti-correlations between some parameters,
+the uncertainty analysis needs to be able to correctly indicate that
+the correlations exist. Simple Metropolis-Hastings sampling does not
+work well in these conditions, but we found that DREAM is able to 
+handle them.  We are still affected by the curse of dimensionality.
+For correlated parameters in high dimensional spaces, even DREAM has
+difficulty taking steps which lead to improved likelihood.  For
+example, we can recover an eight point spline with generous ranges
+on its 14 free parameters close to 100% of the time, but a 10 point
+spline is rarely recovered.
+
+
+
+Using the posterior distribution
+================================
+
+You can load the DREAM output population an perform uncertainty analysis
+operations after the fact.  To run an interactive bumps session
+use the following::
+
+    bumps -i
+
+First you need to import some functions::
+
+    import os
+    import matplotlib.pyplot as plt
+
+    from bumps.dream.state import load_state
+    from bumps.dream.views import plot_vars, plot_corrmatrix
+    from bumps.dream.stats import var_stats, format_vars
+
+
+Then you need to reload the MCMC chains::
+
+    store = "/tmp/t1"   # path to the --store=/tmp/t1 directory
+    modelname = "model"  # model file name without .py extension
+
+    # Reload the MCMC data
+    basename = os.path.join(store, modelname)
+    state = load_state(modelname)
+    state.mark_outliers() # ignore outlier chains
+
+    # Attach the labels from the .par file:
+    with open(basename+".par") as fid:
+        state.labels = [" ".join(line.strip().split()[:-1]) for line in fid]
+
+Now you can plot the data::
+
+    state.show()  # Create the standard plots
+
+You can choose to plot only some of the variables::
+
+    # Select the data to plot (the 3rd and the last two in this case):
+    draw = state.draw(vars=[2, -2, -1])
+
+    # Histograms
+    stats = var_stats(draw)  # Compute statistics such as the 90% interval
+    print(format_vars(stats))
+    plt.figure()
+    plot_vars(draw, stats)
+
+    # Correlation plots
+    plt.figure()
+    plot_corrmatrix(draw)
+
+
+You can restrict those variables to a certain range. For example, to
+restrict the third parameter to $[0.8,1.0]$ and the last to $[0.2,0.4]$::
+
+    from bumps.dream import views
+    selection={2: (0.8,1.0), -1:(0.2,0.4),...}
+    draw = state.draw(vars=[2, -2, -1], selection=selection)
+    ...
+
+
+You can add create derived variables using a function to generate the new
+variable from some combination of existing variables.  For example, to add
+the first two variables together to create the derived variable "x+y" use::
+
+    state.derive_vars(lambda p: p[0]+p[1], labels=["x+y"])
+
+You can generate multiple derived parameters at a time with a function
+that returns a sequence::
+
+
+    state.derive_vars(lambda p: (p[0]*p[1],p[0]-p[1]), labels=["x*y","x-y"])
+
+These new parameters will show up in the plots::
+
+    state.show()
+
+The plotting code is somewhat complicated, and matplotlib doesn't have a
+good way of changing plots interactively.  If you are running directly
+from the source tree, you can modify the dream plotting libraries as you
+need for a one-off plot, the replot the graph::
+
+
+    # ... change the plotting code in dream.views/dream.corrplot
+    reload(dream.views)
+    reload(dream.corrplot)
+    state.show()
+
+Be sure to restore the original versions when you are done.  If the change
+is so good that everyone should use it, be sure to feed it back to the
+community via the bumps source control system at
+`github <https://github.com/bumps>`_.
+
+Publication Graphics
+====================
+
+The matplotlib package is capable of producing publication quality
+graphics for your models and fit results, but it requires you to write
+scripts to get the control that you need.  These scripts can be run
+from the Bumps application by first loading the model and the fit
+results then accessing their data directly to produce the plots that
+you need.
+
+The model file (call it *plot.py*) will start with the following::
+
+    import sys
+    from bumps.cli import load_problem, load_best
+
+    model, store = sys.argv[1:3]
+
+    problem = load_problem([model])
+    load_best(problem, os.path.join(store, model[:-3]+".par"))
+    chisq = problem.chisq
+
+    print "chisq",chisq
+
+Assuming your model script is in model.py and you have run a fit with
+``--store=X5``, you can run this file using::
+
+    $ bumps plot.py model.py X5
+
+Now *model.py* is loaded and the best fit parameters are set.
+
+To produce plots, you will need access to the data and the theory.  This
+can be complex depending on how many models you are fitting and how many
+datasets there are per model.  For single experiment models defined
+by :func:`FitProblem <bumps.fitproblem.FitProblem>`, your original
+experiment object  is referenced by *problem.fitness*.  For simultaneous
+refinement defined by *FitProblem* with multiple *Fitness* objects,
+use ``problem.models[k].fitness`` to access the experiment for
+model *k*.  Your experiment object should provide methods for retrieving
+the data and plotting data vs. theory.
+
+How does this work in practice?  Consider the reflectivity modeling
+problem where we have a simple model such as nickel film on a silicon
+substrate.  We measure the specular reflectivity as various angles and
+try to recover the film thickness.  We want to make sure that our
+model fits the data within the uncertainty of our measurements, and
+we want some graphical representation of the uncertainty in our film
+of interest.  The refl1d package provides tools for generating the
+sample profile uncertainty plots.  We access the experiment information
+as follows::
+
+    experiment = problem.fitness
+    z,rho,irho = experiment.smooth_profile(dz=0.2)
+    # ... insert profile plotting code here ...
+    QR = experiment.reflectivity()
+    for p,th in self.parts(QR):
+        Q,dQ,R,dR,theory = p.Q, p.dQ, p.R, p.dR, th[1]
+        # ... insert reflectivity plotting code here ...
+
+Next we can reload the the error sample data from the DREAM MCMC sequence::
+
+    import dream.state
+    from bumps.errplot import calc_errors_from_state, align_profiles
+
+    state = load_state(os.path.join(store, model[:-3]))
+    state.mark_outliers()
+    # ... insert correlation plots, etc. here ...
+    profiles,slabs,Q,residuals = calc_errors_from_state(problem, state)
+    aligned_profiles = align_profiles(profiles, slabs, 2.5)
+    # ... insert profile and residuals uncertainty plots here ...
+
+The function :func:`bumps.errplot.calc_errors_from_state` calls the
+calc_errors function defined by the reflectivity model.  The return value is
+arbitrary, but should be suitable for the show_errors function defined
+by the reflectivity model.
+
+Putting the pieces together, here is a skeleton for a specialized
+plotting script::
+
+    import sys
+    import pylab
+    from bumps.dream.state import load_state
+    from bumps.cli import load_problem, load_best
+    from bumps.errplot import calc_errors_from_state
+    from refl1d.align import align_profiles
+
+    model, store = sys.argv[1:3]
+
+    problem = load_problem([model])
+    load_best(problem, os.path.join(store, model[:-3]+".par"))
+
+    chisq = problem.chisq
+    experiment = problem.fitness
+    z,rho,irho = experiment.smooth_profile(dz=0.2)
+    # ... insert profile plotting code here ...
+    QR = experiment.reflectivity()
+    for p,th in self.parts(QR):
+        Q,dQ,R,dR,theory = p.Q, p.dQ, p.R, p.dR, th[1]
+        # ... insert reflectivity plotting code here ...
+
+    if 1:  # Loading errors is expensive; may not want to do so all the time.
+        state = load_state(os.path.join(store, model[:-3]))
+        state.mark_outliers()
+        # ... insert correlation plots, etc. here ...
+        profiles,slabs,Q,residuals = calc_errors_from_state(problem, state)
+        aligned_profiles = align_profiles(profiles, slabs, 2.5)
+        # ... insert profile and residuals uncertainty plots here ...
+
+    pylab.show()
+    raise Exception()  # We are just plotting; don't run the model
+
+Tough Problems
+==============
+
+.. note::
+
+   DREAM is currently our most robust fitting algorithm.  We are
+   exploring other algorithms such as parallel tempering, but they
+   are not currently competitive with DREAM.
+
+With the toughest fits, for example freeform models with arbitrary 
+control points, DREAM only succeeds if the model is small or the 
+control points are constrained.  We have developed a parallel 
+tempering (fit=pt) extension to DREAM.  Whereas DREAM runs with a 
+constant temperature, $T=1$, parallel tempering runs with multiple
+temperatures concurrently.   The high temperature points are able to 
+walk up steep hills in the search space, possibly crossing over into a
+neighbouring valley.  The low temperature points agressively seek the
+nearest local minimum, rejecting any proposed point that is worse than
+the current.  Differential evolution helps adapt the steps to the shape
+of the search space, increasing the chances that the random step will be
+a step in the right direction.  The current implementation uses a fixed
+set of temperatures defaulting to ``--Tmin=0.1`` through ``--Tmax=10`` in
+``--nT=25`` steps; future versions should adapt the temperature based
+on the fitting problem.
+
+Parallel tempering is run like dream, but with optional temperature
+controls::
+
+   bumps --fit=dream --burn=1000 --samples=1e5 --init=cov --parallel --pars=T1/model.par model.py --store=T2
+
+Parallel tempering does not yet generate the uncertainty plots provided
+by DREAM.  The state is retained along the temperature for each point,
+but the code to generate histograms from points weighted by inverse
+temperature has not yet been written.
+
+Parallel tempering performance has been disappointing.  In theory it
+should be more robust than DREAM, but in practice, we are using a
+restricted version of differential evolution with the population
+defined by the current chain rather than a set of chains running in
+parallel.  When the Markov chain has converged these populations
+should be equivalent, but apparently this optimization interferes
+with convergence.  Time permitting, we will improve this algorithm
+and look for other ways to improve upon the robustness of DREAM.
+
+
+Command Line
+============
+
+The GUI version of Bumps is slower because it frequently updates the graphs
+showing the best current fit.
+
+Run multiple models overnight, starting one after the last is complete
+by creating a batch file (e.g., run.bat) with one line per model.  Append
+the parameter --batch to the end of the command lines so the program
+doesn't stop to show interactive graphs::
+
+    bumps model.py ... --parallel --batch
+
+You can view the fitted results in the GUI the next morning using::
+
+    bumps --edit model.py --pars=T1/model.par
diff --git a/doc/guide/index.rst b/doc/guide/index.rst
new file mode 100644
index 0000000..0412a8e
--- /dev/null
+++ b/doc/guide/index.rst
@@ -0,0 +1,97 @@
+.. _users-guide-index:
+
+############
+User's Guide
+############
+
+Bumps is designed to determine the ideal model parameters for a given
+set of measurements, and provide uncertainty on the parameter values.
+This is an inverse problem, where measured data can be predicted from
+theory, but theory cannot be directly inferred from measured data.  This
+means that bumps must search through parameter space, calling the theory
+function many times to find the parameter values that are most consistent
+with the data.
+
+Unlike traditional Levenburg-Marquardt fitting programs, Bumps does not
+require normally distributed measurement uncertainty.  If a measurement
+comes from counting statistics, for example, you can define your model with
+poisson probability rather than gaussian probability.  Parameter values
+can have constraints.  For example, if the size of a sample is known to
+within 5%, the size parameter in the model can set to a gaussian distribution
+with a standard deviation of 5%.  Simple bounds are also supported.  Parameter
+expressions allow you to set the value of a parameter based on other
+parameters, which allows simultaneous fitting of multiple datasets to
+different models without having to define a specialized fit function.
+
+Bumps includes Markov chain Monte Carlo (MCMC) methods to compute the
+joint distribution of parameter probabilities.  These methods require
+hundreds of thousand function calls to explore the search space, so
+for moderately complex problems, you need to run in parallel.  Bumps
+can fully utilize multiple cores on one computer, or through MPI, it
+runs on supercomputing clusters.
+
+..
+
+    # Data handling has been removed so that we can ship a pure python package.
+    In addition to inverse problem solving, bumps has acquired code for
+    theory building and data handling.  For example, many problems have
+    measurements in which the instrument resolution plays a role, and
+    the theory function must be convolved with a data dependent resolution
+    function.
+
+:ref:`intro-guide`
+
+     Model scripts associate a sample description with data and fitting
+     options to define the system you wish to refine.
+
+:ref:`data-guide`
+
+     Data is loaded from instrument specific file
+     formats into a generic :class:`Probe <bumps.data.Probe>`.  The
+     probe object manages the data view and by extension, the view of
+     the theory.  The probe object also knows the measurement resolution,
+     and controls the set of theory points that must be evaluated
+     in order to computed the expected value at each point.
+
+:ref:`experiment-guide`
+
+     Sample descriptions and data sets are combined into an
+     :class:`Experiment <bumps.experiment.Experiment>` object,
+     allowing the program to compute the expected reflectivity
+     from the sample and the probability that reflectivity measured
+     could have come from that sample.  For complex cases, where the
+     sample varies on a length scale larger than the coherence length
+     of the probe, you may need to model your measurement with a
+     :class:`CompositeExperiment <bumps.experiment.CompositeExperiment>`.
+
+:ref:`parameter-guide`
+
+     The adjustable values in each component of the system are defined
+     by :class:`Parameter <bumps.parameter>` objects.  When you
+     set the range on a parameter, the system will be able to automatically
+     adjust the value in order to find the best match between theory
+     and data.
+
+:ref:`fitting-guide`
+
+     One or more experiments can be combined into a
+     :class:`FitProblem <bumps.fitproblem.FitProblem>`.  This is then
+     given to one of the many fitters, such as
+     :class:`DEFit <refl1d.fitter.PTFit>`, which adjust the fitting
+     parameters, trying to find the best fit.  See :ref:`optimizer-guide`
+     for a description of available optimizers and :ref:`option-guide` for
+     a description of the bumps options.  Entropy can be calculated when
+     the fit is complete.  See :ref:`entropy-guide`.
+
+
+.. toctree::
+    :hidden:
+
+    intro.rst
+    data.rst
+    experiment.rst
+    parameter.rst
+    fitting.rst
+    optimizer.rst
+    options.rst
+    entropy.rst
diff --git a/doc/guide/intro.rst b/doc/guide/intro.rst
new file mode 100644
index 0000000..8697e63
--- /dev/null
+++ b/doc/guide/intro.rst
@@ -0,0 +1,70 @@
+.. _intro-guide:
+
+***********
+Using Bumps
+***********
+
+.. contents:: :local:
+
+The first step in using Bumps is to define a fit file.  This is python
+code defining the function, the fitting parameters and any data that is
+being fitted.
+
+A fit file usually starts with an import statement::
+
+    from bumps.names import *
+
+This imports names from :mod:`bumps.names` and makes the available to the
+model definition.
+
+Next the fit file should load the data with something like *np.loadtxt*
+which loads columnar ASCII data into an array.  This data feeds into a
+:class:`Fitness <bumps.fitproblem.Fitness>` function for a particular
+model that gives the  probability of seeing the data for a given set of
+model parameters.  These model functions can be quite complex, involving
+not only the calculation of the theory function, but also simulating
+instrumental resolution and background signal.
+
+The fitness function will have :class:`Parameter <bumps.parameter.Parameter>`
+objects defining the fittable parameters.  Usually the model is initialized
+without any fitted parameters, allowing the user to set a
+:meth:`range <bumps.parameter.Parameter.range>` on each parameter that
+needs to be fitted.  Although it is a little tedious to set up, keeping the
+fitted ranges separate from the model definition works better in the fitting
+process, which usually involves multiple iterations with different
+configurations. It is convenient to be able to turn on and off fitting for
+individual parameter with a simple comment character ('#') at the start of
+the line.
+
+Every fit file ends with a :func:`FitProblem <bumps.fitproblem.FitProblem>`
+definition::
+
+    problem = FitProblem(model)
+
+In fact, this is the only requirement of the fit file.  The Bumps engine
+loads the fit file, retieves the *problem* symbol and feeds it to the selected
+:mod:`fitter <bumps.fitter>`.  Some fit files do not even use *FitProblem* to
+define *problem*, or use *Parameter* objects for the fitted parameters, so
+long as *problem* implements the
+:class:`BaseFitProblem <bumps.fitproblem.BaseFitProblem>` interface, which
+provides *getp* to get the existing parameter vector, *setp* to set a new
+parameter vector, *bounds* to return the parameter bounds, and *nllf* to
+to compute the negative log likelihood function.  The remaining methods are
+optional.
+
+Note that the pattern of importing all names from a file using
+*from bumps.names import \**, while convenient for simple scripts, can
+make the code more difficult to understand later, and can lead to
+unexpected results when moving code around to other files.  The alternative
+pattern to use is::
+
+    import bumps.names as bmp
+    ...
+    problem = bmp.FitProblem(model)
+
+This documents to the reader unfamiliar with your code (such as you, dear
+reader, when looking at your model files two years from now) exactly where
+the name comes from.
+
+The :ref:`tutorial-index` walks through the process for several different
+data sets.
\ No newline at end of file
diff --git a/doc/guide/optimizer.rst b/doc/guide/optimizer.rst
new file mode 100644
index 0000000..1ee9512
--- /dev/null
+++ b/doc/guide/optimizer.rst
@@ -0,0 +1,767 @@
+.. _optimizer-guide:
+
+*******************
+Optimizer Selection
+*******************
+
+Bumps has a number of different optimizers available, each with its own
+control parameters:
+
+* :ref:`fit-lm`
+* :ref:`fit-amoeba`
+* :ref:`fit-dream`
+* :ref:`fit-de`
+* :ref:`fit-newton`
+* :ref:`fit-rl` [experimental]
+* :ref:`fit-ps` [experimental]
+* :ref:`fit-pt` [experimental]
+
+In general there is a trade-off between convergence
+rate and robustness, with the fastest algorithms most likely to find a
+local minimum rather than a global minimum.   The gradient descent algorithms
+(:ref:`fit-lm`, :ref:`fit-newton`) tend to be fast but they will find local
+minima only, while the population algorithms (:ref:`fit-dream`, :ref:`fit-de`)
+are more robust and likely slower.   :ref:`fit-amoeba` is somewhere between,
+with a small population keeping the search local but more robust than the
+gradient descent algorithms.
+
+Each algorithm has its own set of control parameters for adjusting the
+search process and the stopping conditions.  The same option may mean
+slightly different things to different optimizers.  The bumps package
+provides a dialog box for selecting the optimizer and its options
+when running the fit wx application.  This only includes the common options
+for the most useful optimizers.  For full control, the fit will need to
+be run from the command line interface or through a python script.
+
+For parameter uncertainty, most algorithms use the covariance matrix at
+the optimum to estimate an uncertainty ellipse.  This is okay for a
+preliminary analysis, but only works reliably for weakly correlated parameters.
+For full uncertainty analysis, :ref:`fit-dream` uses a random walk to explore
+the parameter space near the minimum, showing pair-wise correlations
+amongst the parameter values.  In order for :ref:`fit-dream` to return the
+correct uncertainy, the function to be optimized should be a conditional
+probability density, with *nllf* as the negative log likelihood function
+of seeing point $x$ in the parameter space.  Other functions
+can be fitted, but uncertainty estimates will be meaningless.
+
+Most algorithms have been adapted to run in parallel at least to some degree.
+The  implementation is not heavily tuned, either in terms of minimizing the
+overhead per function evaluation or for distributing the problem across
+multiple processors.   If the theory function is implemented in parallel,
+then the optimizer should be run in serial.  Mixed mode is also possible
+when running on a cluster with a multi-threaded theory function.  In this
+case, only one theory function will be evaluated on each cluster node, but
+the optimizer will distribute the parameters values to the cluster nodes
+in parallel.  Do not run serial algorithms (:ref:`fit-lm`, :ref:`fit-newton`) on
+a cluster.
+
+We have included a number of optimizers in Bumps that did not perform
+particularly well on our problem sets.  However, they may be perfect
+for your problem, so we have left them in the package for you to explore.
+They are not available in the GUI selection.
+
+.. _fit-lm:
+
+Levenberg-Marquardt
+===================
+
+.. image:: fit-lm.png
+    :alt: Levenberg-Marquardt option screen.
+    :align: left
+
+The Levenberg-Marquardt algorithm has been
+the standard method for non-linear data fitting.  As a gradient descent
+trust region method, it starts at the initial value of the function and
+steps in the direction of the derivative until it reaches the minimum.
+Set up as an explicit minimization of the sum of square differences between
+theory and model, it uses a numerical approximation of the Jacobian matrix
+to set the step direction and an adaptive algorithm to set the size of
+the trust region.
+
+When to use
+-----------
+
+Use this method when you have a reasonable fit near the minimum, and
+you want to get the best possible value.  This can then be used as the starting
+point for uncertainty analysis using :ref:`fit-dream`.  This method requires
+that the problem definition includes a *residuals* method, but this should
+always be true when fitting data.
+
+When modeling the results of an experiment, the best fit value is an
+accident of the measurement.  Redo the same measurement, and the slightly
+different values you measure will lead to a different best fit.  The
+important quantity to report is the credible interval covering
+68%  (1-\ $\sigma$) or 95% (2-\ $\sigma$\ ) of the range of
+parameter values that are somewhat consistent with the data.
+
+This method uses *lmfit* from *scipy*, and does not run in parallel.
+
+Options
+-------
+
+*Steps* is the number of gradient steps to take.  Each step requires
+a calculation of the Jacobian matrix to determine the direction.  This
+needs $2 m n$ function evaluations, where $n$ is the number of parameters and
+each function is evaluated and $m$ data points (assuming center point
+formula for finite difference estimate of the derivative).  The resulting
+linear equation is then solved, but for small $n$ and expensive function
+evaluation this overhead can be ignored.  Use ``--steps=n`` from
+the command line.
+
+*f(x) tolerance* and *x tolerance* are used to determine when
+the fit has reached the point where no significant improvement is expected.
+If the function value does not improve significantly within the step, or
+the step is too short, then the fit will terminate.  Use ``--ftol=v`` and
+``--xtol=v`` from the command line.
+
+From the command line, ``--starts=n`` will automatically restart the algorithm
+after it has converged so that a slightly better value can be found. If
+``--keep_best`` is included then restart will use a value near the minimum,
+otherwise it will restart the fit from a random point in the parameter space.
+
+Use ``--fit=lm`` to select the Levenberg-Marquardt fitter from the command line.
+
+References
+----------
+
+.. [Levenberg1944]
+    Levenberg, K.
+    *Quarterly Journal of Applied Mathmatics*
+    1944, II (2), 164–168.
+
+.. [Marquardt1963]
+    Marquardt, D. W.
+    *Journal of the Society for Industrial and Applied Mathematics*
+    1963, 11 (2), 431–441.
+    DOI: `10.1137/0111030 <http://dx.doi.org/10.1137/0111030>`_
+
+.. _fit-amoeba:
+
+Nelder-Mead Simplex
+===================
+
+.. image:: fit-amoeba.png
+    :alt: Nelder-Mead Simplex option screen.
+    :align: left
+
+The Nelder-Mead downhill simplex algorithm is a robust optimizer which
+does not require the function to be continuous or differentiable.
+It uses the relative values of the function at the corners of a
+simplex (an n-dimensional triangle) to decide which points of the simplex
+to update.  It will take the worst value and try moving it inward or
+outward, or reflect it through the centroid of the remaining values
+stopping if it finds a better value.  If none of these values are
+better, then it will shrink the simplex and start again.  The name
+amoeba comes from the book *Numerical Recipes* [Press1992]_ wherein they
+describe the search as acting like an amoeba, squeezing through narrow valleys
+as it makes its way down to the minimum.
+
+When to use
+-----------
+
+Use this method as a first fit to your model.  If your fitting function
+is well behaved with few local minima this will give a quick estimate of
+the model, and help you decide if the model needs to be refined.  If your
+function is poorly behaved, you will need to select a good initial value
+before fitting, or use a more robust method such
+as :ref:`fit-de` or :ref:`fit-dream`.
+
+The uncertainty reported comes from a numerical derivative estimate at the
+minimum.
+
+This method requires a series of function updates, and does not benefit
+much from running in parallel.
+
+Options
+-------
+
+*Steps* is the simplex update iterations to perform.  Most updates
+require one or two function evaluations, but shrinking the simplex evaluates
+every value in the simplex. Use ``--steps=n`` from the command line.
+
+*Starts* tells the optimizer to restart a given number of times.
+Each time it restarts it uses a random starting point.   Use
+``--starts=n`` from the command line.
+
+*Simplex radius* is the initial size of the simplex, as a portion of
+the bounds defining the parameter space.  If a parameter is unbounded, then
+the radius will be treated as a portion of the parameter value. Use
+``--radius=n`` from the command line.
+
+*x tolerance* and *f(x) tolerance* are used to determine when the
+fit has reached the point where no significant improvement is expected.
+If the simplex is tiny (that is, the corners are close to each other) and
+flat (that is, the values at the corners are close to each other),
+then the fit will terminate.  Use ``--xtol=v`` and ``--ftol=v`` from
+the command line.
+
+From the command line, use ``--keep_best`` so that restarts are centered on a
+value near the minimum rather than restarting from a random point within the
+parameter bounds.
+
+Use ``--fit=amoeba`` to select the Nelder-Mead simplex fitter from the
+command line.
+
+References
+----------
+
+.. [Nelder1965]
+    Nelder, J. A.; Mead, R.
+    *The Computer Journal*
+    1965, 7 (4), 308–313.
+    DOI: `10.1093/comjnl/7.4.308 <http://dx.doi.org/10.1093/comjnl/7.4.308>`_
+
+.. [Press1992]
+   Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; Vetterling, W. T.
+   In *Numerical Recipes in C: The Art of Scientific Computing, Second Edition*;
+   Cambridge University Press: Cambridge; New York, 1992; pp 408–412.
+
+
+.. _fit-newton:
+
+Quasi-Newton BFGS
+=================
+
+.. image:: fit-newton.png
+    :alt: Quasi-Newton BFGS option screen.
+    :align: left
+
+Broyden-Fletcher-Goldfarb-Shanno is a gradient descent method which uses the
+gradient to determine the step direction and an approximation of the Hessian
+matrix to estimate the curvature and guess a step size.  The step is further
+refined with a one-dimensional search in the direction of the gradient.
+
+When to use
+-----------
+
+Like :ref:`fit-lm`, this method converges quickly to the minimum.  It does
+not assume that the problem is in the form of a sum of squares and does not
+require a *residuals* method.
+
+The $n$ partial derivatives are computed in parallel.
+
+Options
+-------
+
+*Steps* is the number of gradient steps to take.  Each step requires
+a calculation of the Jacobian matrix to determine the direction.  This
+needs $2 m n$ function evaluations, where $n$ is the number of parameters and
+each function is evaluated and $m$ data points (assuming center point
+formula for finite difference estimate of the derivative).  The resulting
+linear equation is then solved, but for small $n$ and expensive function
+evaluation this overhead can be ignored.
+Use ``--steps=n`` from the command line.
+
+*Starts* tells the optimizer to restart a given number of times.
+Each time it restarts it uses a random starting point.
+Use ``--starts=n`` from the command line.
+
+*f(x) tolerance* and *x tolerance* are used to determine when
+the fit has reached the point where no significant improvement is expected.
+If the function is small or the step is too short then the fit
+will terminate.  Use ``--ftol=v`` and ``--xtol=v`` from the command line.
+
+From the command line, ``--keep_best`` uses a value near the previous minimum
+when restarting instead of using a random value within the parameter bounds.
+
+Use ``--fit=newton`` to select BFGS from the commandline.
+
+References
+----------
+
+.. [Dennis1987]
+    Dennis, J. E.; Schnabel, R. B.
+    *Numerical Methods for Unconstrained Optimization and Nonlinear Equations*;
+    Society for Industrial and Applied Mathematics: Philadelphia, 1987.
+
+
+.. _fit-de:
+
+Differential Evolution
+======================
+
+.. image:: fit-de.png
+    :alt: Differential Evolution option screen.
+    :align: left
+
+Differential evolution is a population based algorithm which uses differences
+between points as a guide to selecting new points.  For each member of the
+population a pair of points is chosen at random, and a difference vector is
+computed.  This vector is scaled, and a random subset of its components are
+added to the current point based on crossover ratio. This new point is
+evaluated, and if its value is lower than the current point, it replaces
+it in the population.   There are many variations available within DE that
+have not been exposed in Bumps.  Interested users can modify
+:class:`bumps.fitters.DEFit` and experiment with different crossover and
+mutation algorithms, and perhaps add them as command line options.
+
+Differential evolution is a robust directed search strategy.  Early in the
+search, when the population is disperse, the difference vectors are large
+and the search remains broad.  As the search progresses, more of the
+population goes into the valleys and eventually all the points end up in
+local minima.  Now the differences between random pairs will often be small
+and the search will become more localized.
+
+The population is initialized according to the prior probability distribution
+for each each parameter.  That is, if the parameter is bounded, it will use
+a uniform random number generate within the bounds.  If it is unbounded, it
+will use a uniform value in [0,1].  If the parameter corresponds to the result
+of a previous measurement with mean $\mu$ and standard deviation $\sigma$,
+then the initial values will be pulled from a gaussian random number generator.
+
+When to use
+-----------
+
+Convergence with differential evolution will be slower, but more robust.
+
+Each update will evaluate $k$ points in parallel, where $k$ is the size
+of the population.
+
+Options
+-------
+
+*Steps* is the number of iterations.  Each step updates each member
+of the population.  The population size scales with the number of fitted
+parameters. Use ``--steps=n`` from the command line.
+
+*Population* determines the size of the population.  The number of
+individuals, $k$, is equal to the number of fitted parameters times the
+population scale factor.  Use ``--pop=k`` from the command line.
+
+*Crossover ratio* determines what proportion of the dimensions to update
+at each step.  Smaller values will likely lead to slower convergence, but
+more robust results.  Values must be between 0 and 1.  Use ``--CR=v`` from
+the command line.
+
+*Scale* determines how much to scale each difference vector before adding
+it to the candidate point.  The selected mutation algorithm chooses a scale
+factor uniformly in $[0,F]$.  Use ``--F=v`` from the command line.
+
+*f(x) tolerance* and *x tolerance* are used to determine when the
+fit has reached the point where no significant improvement is expected.
+If the population is flat (that is, the minimum and maximum values are
+within tolerance) and tiny (that is, all the points are close to each
+other) then the fit will terminate.  Use ``ftol=v`` and ``xtol=v`` from the
+command line.
+
+Use ``--fit=de`` to select diffrential evolution from the commandline.
+
+References
+----------
+
+.. [Storn1997]
+    Storn, R.; Price, K.
+    *Journal of Global Optimization*
+    1997, 11 (4), 341–359.
+    DOI: `10.1023/A:1008202821328 <http://dx.doi.org/10.1023/A:1008202821328>`_
+
+
+
+.. _fit-dream:
+
+DREAM
+=====
+
+.. image:: fit-dream.png
+    :alt: DREAM option screen.
+    :align: left
+
+DREAM is a population based algorithm like differential evolution, but
+instead of only keeping individuals which improve each generation, it
+will sometimes keep individuals which get worse.  Although it is not
+fast and does not give the very best value for the function, we have
+found it to be a robust fitting engine which will give a good value given
+enough time.
+
+The progress of each individual in the population from generation to
+generation can considered a Markov chain, whose transition probability
+is equal to the probability of taking the step times the probability
+that it keeps the step based on the difference in value between the points.
+By including a purely random stepper with some probability, the detailed
+balance condition is preserved, and the Markov chain converges onto
+the underlying equilibrium distribution.  If the theory function represents
+the conditional probability of selecting each point in the parameter
+space, then the resulting chain is a random draw from the posterior
+distribution.
+
+This means that the DREAM algorithm can be used to determine the parameter
+uncertainties.  Unlike the hessian estimate at the minimum that is
+used to report uncertainties from the other fitters, the resulting
+uncertainty need not gaussian.  Indeed, the resulting distribution can
+even be multi-modal.  Fits to measured data using theory functions that
+have symmetric solutions have shown all equivalent solutions with approximately
+equal probability.
+
+When to use
+-----------
+
+Use DREAM when you need a robust fitting algorithm.  It takes longer but
+it does an excellent job of exploring different minima and getting close
+to the global optimum.
+
+Use DREAM when you want a detailed analysis of the parameter uncertainty.
+
+Like differential evolution, DREAM will evaluate $k$ points in parallel,
+where $k$ is the size of the population.
+
+Options
+-------
+
+*Samples* is the number of points to be drawn from the Markov chain.
+To estimate the 68% interval to two digits of precision, at least
+1e5 (or 100,000) samples are needed.  For the 95% interval, 1e6
+(or 1,000,000) samples are needed.  The default 1e4 samples
+gives a rough approximation of the uncertainty relatively quickly.
+Use ``--samples=n`` from the command line.
+
+*Burn-in Steps* is the number of iterations to required for the Markov
+chain to converge to the equilibrium distribution.  If the fit ends
+early, the tail of the burn will be saved to the start of the steps.
+Use ``--burn=n`` from the command line.
+
+*Population* determines the size of the population.  The number of
+individuals, $k$, is equal to the number of fitted parameters times the
+population scale factor.  Use ``--pop=k`` from the command line.
+
+*Initializer* determines how the population will be initialized.
+The options are as follows:
+
+     *eps* (epsilon ball), in which the entire initial population is chosen
+     at random from within a tiny hypersphere centered about the initial point
+
+     *lhs* (latin hypersquare), which chops the bounds within each dimension
+     in $k$ equal sized chunks where $k$ is the size of the population and
+     makes sure that each parameter has at least one value within each chunk
+     across the population.
+
+     *cov* (covariance matrix), in which the uncertainty is estimated using
+     the covariance matrix at the initial point, and points are selected
+     at random from the corresponding gaussian ellipsoid
+
+     *random* (uniform random), in which the points are selected at random
+     within the bounds of the parameters
+
+Use ``--init=type`` from the command line.
+
+
+*Thinning* is the amount of thinning to use when collecting the
+population.  If the fit is somewhat stuck, with most steps not improving
+the fit, then you will need to thin the population to get proper
+statistics.  Use ``--thin=k`` from the command line.
+
+*Calculate entropy*, if true, computes the entropy for the fit.  This is
+an estimate of the amount of information in the data.  Use ``--entropy``
+from the command line.
+
+*Steps*, if not zero, determines the number of iterations to use for
+drawing samples after burn in. Each iteration updates the full population,
+which is (population x number of fitted parameters) points. This option
+is available for compatibility; it is more useful to set the number of
+samples directly.  Use ``--steps=n`` from the command line.
+
+Use ``--fit=dream`` to select DREAM from the commandline.
+
+Output
+------
+
+DREAM produces a number of different outputs, and there are a number of
+things to check before using its reported uncertainty values.  The main
+goal of selecting ``--burn=n`` is to wait long enough to reach the
+equilibrium distribution.
+
+.. figure:: dream-incomplete.png
+    :alt: example of incomplete fit
+
+    This DREAM fit is incomplete, as can be seen on all four plots.  The
+    *Convergence* plot is still decreasing, *Parameter Trace* plot does not
+    show random mixing of Markov chain values, the *Correlations* plots are
+    fuzzy and mostly empty, the *Uncertainty* plot shows black histograms
+    (indicating that there are a few stray values far away from the best) and
+    green maximum likelihood spikes not matching the histogram (indicating
+    that the region around the best value has not been adequately explored).
+
+.. figure:: dream-complete.png
+    :alt: example of a completed fit
+
+    This DREAM fit completed successfully.  The *Convergence* plot is flat,
+    the *Parameter Trace* plot is flat and messy, the *Correlateions* plots
+    show nice blobs (and a bit of correlation between the *M1.radius* parameter
+    and the *M1.radius.width* parameter), and the uncertainty plots show
+    a narrow range of -log(P) values in the mostly brown histograms and
+    a good match to the green constrained maximum likelihood line.
+
+For each parameter in the fit, DREAM finds the mean, median and best value,
+as well as the 68% and 95% credible intervals.  The mean value is
+defined as $\int x P(x) dx$, which is just the expected value of the
+probability distribution for the parameter.  The median value is the 50%
+point in the probability distribution, and the best value is the maximum
+likelihood value seen in the random walk.  The credible intervals are the
+central intervals which capture 68% and 95% of the parameter values
+respectively.  You need approximately 100,000 samples to get two digits of
+precision on the 68% interval, and 1,000,000 samples for the 95% interval.
+
+.. table:: Example fit output
+
+    = =============== ============ ======== ======== ================= =================
+    #  Parameter         mean       median    best   [   68% interval] [   95% interval]
+    = =============== ============ ======== ======== ================= =================
+    1   M1.background 0.059925(41) 0.059924 0.059922 [0.05988 0.05997] [0.05985 0.06000]
+    2       M1.radius   2345.3(15) 2345.234 2345.174 [2343.83 2346.74] [2342.36 2348.29]
+    3 M1.radius.width  0.00775(41)  0.00774  0.00777 [ 0.0074  0.0081] [ 0.0070  0.0086]
+    4        M1.scale  0.21722(20) 0.217218 0.217244 [0.21702 0.21743] [0.21681 0.21761]
+    = =============== ============ ======== ======== ================= =================
+
+The *Convergence* plot shows the range of $\chi^2$ values in the population
+for each iteration.  The band shows the 68% of values around the median, and
+the solid line shows the minimum value.  If the distribution has reached
+equilibrium, then convergence graph should be roughly flat, with little
+change in the minimum value throughout the graph.  If there is no convergence,
+then the remaining plots don't mean much.
+
+The *Correlations* plot shows cross correlation between each pair of
+parameters.  If the parameters are completely uncorrelated then the boxes
+should contain circles.  Diagonals indicate strong correlation.  Square
+blocks indicate that the fit is not sensitive to one of the parameters.
+The range plotted on the correlation plot is determined by the 95% interval
+of the data.  The individual correlation plots are too small to show the
+range of values for the parameters.  These can instead be read from the
+*Uncertainty* plot for each parameter, which covers the same range of values
+and indicates 68% and 95% intervals.  If there are some chains that are
+wandering around away from the minimum, then the plot will look fuzzy, and
+not have a nice blob in the center.  If a correlation plot has multiple blobs,
+then there are multiple minima in your problem space, usually because there
+are symmetries in the problem definition.  For example, a model fitting
+$x + a^2$ will have identical solutions for $\pm\,a$.
+
+The *Uncertainty* plot shows histograms for each fitted parameter generated
+from the values for that parameter across all chains.  Within each histogram
+bar the values are sorted and displayed as a gradient from black to copper,
+with black values having the lowest $\chi^2$ and copper values having the
+highest.  The resulting histogram should be dark brown, with a black hump
+in the center and light brown tips.  If there are large lumps of light brown,
+or excessive black then its likely that the optimizer did not converge.  The
+green line over the histogram shows the best value seen within each
+histogram bin (the maximum likelihood given $p_k == x$).
+With enough samples and proper convergence, it should roughly follow the
+outline of the histogram.  The yellow band in the center of the plot
+represents the 68% interval for the data.  The histogram cuts off at 95%.
+These values along with the median are shown as labels along the x axis.
+The green asterisk represents the best value, the green *E* the mean value
+and the vertical green line the median value.  If the fit is not sensitive
+to a parameter, or if two parameters are strongly correlated, the parameter
+histogram will show a box rather than a hump.  Spiky shapes (either in the
+histogram or the maximum likelihood line) indicate lack of convergence or
+maybe not enough steps.  A chopped histograms indicates that the range for
+that parameter is too small.
+
+The *Parameter Trace* plot is diagnostic for models which have poor mixing.
+In this cases no matter how the parameter values are changing, they are
+landing on much worse values for the $\chi^2$.  This can happen if the
+problem is highly constrained with many tight and twisty values.
+
+The *Data and Theory* plot should show theory and data lining up pretty well,
+with the theory overlaying about 2/3 of the error bars on the data
+(1-\ $\sigma$ = 68%).  The *Residuals* plot shows the difference between
+theory and data divided by uncertainty.  The residuals should be 2/3 within
+[-1, 1], They should not show any structure, such as humps where the theory
+misses the data for long stretches.  This indicates some feature missing
+from the model, or a lack of convergence to the best model.
+
+If entropy is requested, then bumps will show the total number of bits of
+information in the fit.  This derives from the entropy term:
+
+.. math:
+
+    S = \int_\Theta p(\Theta) \log p(\Theta) d\Theta
+
+Using entropy and simulation we hope to be able to make experiment
+planning decisions in a way that maximizes information, by estimating
+whether it is better to measure more precisely or to measure different
+but related values and fit them with shared parameters.
+
+
+References
+----------
+
+.. [Vrugt2009]
+    Vrugt, J. A.; Ter Braak, C. J. F.; Diks, C. G. H.; Robinson, B. A.;
+    Hyman, J. M.; Higdon, D.
+    *International Journal of Nonlinear Sciences and Numerical Simulation*
+    2009, 10 (3), 273–290.
+    DOI: `10.1515/IJNSNS.2009.10.3.273 <http://dx.doi.org/10.1515/IJNSNS.2009.10.3.273>`_
+
+.. [Kramer2010]
+    Kramer, A.; Hasenauer, J.; Allgower, F.; Radde, N.
+    *In 2010 IEEE International Conference on Control Applications (CCA)*
+    2010; pp 493–498.
+    DOI: `10.1109/CCA.2010.5611198 <http://dx.doi.org/10.1109/CCA.2010.5611198>`_
+
+.. [JCGM2008]
+    JCGM.
+    *Evaluation of measurement data — Supplement 1 to the “Guide to the
+    expression of uncertainty in measurement” — Propagation of distributions
+    using a Monte Carlo method*; Joint Committee for Guides in Metrology,
+    JCGM 101:2008; Geneva, Switzerland, 2008; p 90.
+    `<http://www.bipm.org/utils/common/documents/jcgm/JCGM_101_2008_E.pdf>`_
+
+
+
+.. _fit-ps:
+
+Particle Swarm
+==============
+
+Inspired by bird flocking behaviour, the particle swarm algorithm is a
+population-based method which updates an individual according to its
+momentum and a force toward the current best fit parameter values.  We
+did not explore variations of this algorithm in any detail.
+
+When to use
+-----------
+
+Particle swarm performed well enough in our low dimensional test problems,
+but made little progress when more fit parameters were added.
+
+The population updates can run in parallel, but the tiny population size
+limits the amount of parallelism.
+
+Options
+-------
+
+``--steps=n`` is the number of iterations.  Each step updates each member
+of the population.  The population size scales with the number of fitted
+parameters.
+
+``--pop=k`` determines the size of the population.  The number of
+individuals, $k$, is equal to the number of fitted parameters times the
+population scale factor.  The default scale factor is 1.
+
+Use ``--fit=ps`` to select particle swarm from the commandline.
+
+Add a few more lines
+
+References
+----------
+
+.. [Kennedy1995]
+    Kennedy, J.; Eberhart, R.
+    Particle Swarm Optimization
+    *Proceedings of IEEE International Conference on Neural Networks. IV.*
+    1995; pp 1942–1948.
+    DOI: `10.1109/ICNN.1995.48896 <http://dx.doi.org/810.1109/ICNN.1995.488968>`_
+
+
+.. _fit-rl:
+
+Random Lines
+============
+
+Most of the population based algorithms ignore the value of the function
+when choosing the points in the next iteration.  Random lines is a new
+style of algorithm which fits a quadratic model to a selection from the
+population, and uses that model to propose a new point in the next
+generation of the population.  The hope is that the method will inherit
+the robustness of the population based algorithms as well as the rapid
+convergence of the newton descent algorithms.
+
+When to use
+-----------
+
+Random lines works very well for some of our test problems, showing
+rapid convergence to the optimum, but on other problems it makes
+very little progress.
+
+The population updates can run in parallel.
+
+Options
+-------
+
+``--steps=n`` is the number of iterations.  Each step updates each member
+of the population.  The population size scales with the number of fitted
+parameters.
+
+``--pop=k`` determines the size of the population.  The number of
+individuals, $k$, is equal to the number of fitted parameters times the
+population scale factor.  The default scale factor is 0.5.
+
+``--CR=v`` is the crossover ratio, determining what proportion of the
+dimensions to update at each step.  Values must be between 0 and 1.
+
+``--starts=n`` tells the optimizer to restart a given number of times.
+Each time it restarts it uses a random starting point.
+
+``--keep_best`` uses a value near the previous minimum when restarting
+instead of using a random value within the parameter bounds.  This option is
+not available in the options dialog.
+
+Use ``--fit=rl`` to select random lines from the commandline.
+
+References
+----------
+
+.. [Sahin2013]
+
+    Sahin, I.
+    *An International Journal of Optimization and Control:  Theories & Applications (IJOCTA)*
+    2013, 3 (2), 111–119.
+
+
+
+.. _fit-pt:
+
+Parallel Tempering
+==================
+
+Parallel tempering is an MCMC algorithm for uncertainty analysis.  This
+version runs at multiple temperatures simultaneously, with chains at high
+temperature able to more easily jump between minima and chains at low
+temperature to fully explore the minima.  Like :ref:`fit-dream` it has a
+differential evolution stepper, but this version uses the chain history
+as the population rather than maintaining a population at each temperature.
+
+This is an experimental algorithm which does not yet perform well.
+
+When to use
+-----------
+
+When complete, parallel tempering should be used for problems with widely
+spaced local minima which dream cannot fit.
+
+Options
+-------
+
+``--steps=n`` is the number of iterations to include in the Markov
+chain.  Each iteration updates the full population.  The population size
+scales with the number of fitted parameters.
+
+``--burn=n`` is the number of iterations to required for the Markov
+chain to converge to the equilibrium distribution.  If the fit ends
+early, the tail of the burn will be saved to the start of the steps.
+
+``--CR=v`` is the differential evolution crossover ratio to use when
+computing step size and direction.  Use a small value to step through the
+dimensions one at a time, or a large value to step through all at once.
+
+``-nT=k``, ``-Tmin=v`` and ``--Tmax=v`` specify a log-spaced initial
+distribution of temperatures.  The default is 25 points between
+0.1 and 10.  :ref:`fit-dream` runs at a fixed temperature of 1.0.
+
+Use ``--fit=pt`` to select parallel tempering from the commandline.
+
+References
+----------
+
+.. [Swendsen1986]
+    Swendsen, R. H.; Wang J. S.
+    Replica Monte Carlo simulation of spin glasses
+    *Physical Review Letters*
+    1986, 57, 2607-2609
+
+
+..
+    SNOBFIT (fit=snobfit) attempts to construct a locally quadratic model of
+    the entire search space.  While promising because it can begin to offer
+    some guarantees that the search is complete given reasonable assumptions
+    about the fitting surface, initial trials did not perform well and the
+    algorithm has not yet been tuned to our problems.
+
diff --git a/doc/guide/options.rst b/doc/guide/options.rst
new file mode 100644
index 0000000..e233fbd
--- /dev/null
+++ b/doc/guide/options.rst
@@ -0,0 +1,535 @@
+.. :
+
+    Fit option names are defined in bumps/fitters.py  Make sure any changes
+    are done both hear and there.
+
+.. _option-guide:
+
+~~~~~~~~~~~~~
+Bumps Options
+~~~~~~~~~~~~~
+
+*Bumps* has a number of options available to control the fits and the
+output.  On the command line, each option is either *--option* if it
+is True/False or *--option=value* if the option takes a value.  The
+fit control form is used by graphical users interfaces to set the optimizer
+and its controls and stopping conditions.  The long form name of the the
+option will be used on the form.  Not all controls will appear on the form,
+and will be set from the command line.
+
+**Need to describe the array of output files produced by optimizers,
+particularly dream.  Some of them (convergence plot, model plot, par file,
+model file) are common to all.  Others (mcmc points) are specific to one
+optimizer**
+
+
+Bumps Command Line
+==================
+
+Usage::
+
+    bumps [options] modelfile [modelargs]
+
+The modelfile is a Python script (i.e., a series of Python commands)
+which sets up the data, the models, and the fittable parameters.
+The model arguments are available in the modelfile as sys.argv[1:].
+Model arguments may not start with '-'.  The options all start with
+'-' and can appear in any order anywhere on the command line.
+
+
+
+
+
+
+Problem Setup
+=============
+
+.. _option-pars:
+
+``--pars``
+----------
+
+Set initial parameter values from a previous fit.  The par file is a list
+of lines with parameter name followed by parameter value on each line.
+The parameters must appear with the same name and in the same order as
+the fitted parameters in the model.  ``--preview`` will show the
+model parameters.
+
+.. _option-shake:
+
+``--shake``
+-----------
+
+Set random initial values for the parameters in the model.  Note that
+shake happens after ``--simulate`` so that you can simulate a random
+model, shake it, then try to recover its initial values.
+
+.. _option-simulate:
+
+``--simulate``
+--------------
+
+Simulate a dataset using the initial problem parameters.  This is useful
+when setting up a model before an experiment to see what data it might
+produce, and for seeing how well the fitting program might recover the
+parameters of interest.
+
+.. _option-simrandom:
+
+``--simrandom``
+---------------
+
+Simulate a dataset using random initial parameters.  Because ``--shake``
+is applied after ``--simulate``, we need a separate way to shake the
+parameters before simulating the model.
+
+.. _option-noise:
+
+``--noise``
+-----------
+
+Set the noise percentage on the simulated data.  The default is 5 for 5%
+normally distributed uncertainty in the measured values.  Use ``--noise=data``
+to use the uncertainty on a dataset in the simulation.
+
+.. _option-seed:
+
+``--seed``
+----------
+
+Set a specific seed to the random number generator.  This happens before
+shaking and simulating so that fitting tests, and particularly failures,
+can be reliably reproduced.  The numpy random number generator is used
+for all values, so any consistency guarantees between versions of bumps
+over time and across platforms depends on the consistency of the numpy
+generators.
+
+
+
+
+
+Stopping Conditions
+===================
+
+.. _option-steps:
+
+``--steps``
+-----------
+
+*Steps* is the number of iterations that the algorithm will perform.  The
+meaning of iterations will differ from optimizer to optimizer.  In the case
+of population based optimizers such as :ref:`fit-de`, each step is an update to
+every member of the population.  For local descent optimizers such as
+:ref:`fit-amoeba` each step is an iteration of the algorithm.
+:ref:`fit-dream` uses steps plus ``--burn`` for the total number
+of iterations.
+
+
+.. _option-ftol:
+
+``--ftol``
+----------
+
+*f(x) tolerance* uses differences in the function value to decide when the
+fit is complete.  The different fitters will interpret this in different
+ways.  The Newton descent algorithms (:ref:`fit-newton`, :ref:`fit-lm`) will use
+this as the minimum improvement of the function value with each step.  The
+population-based algorithms (:ref:`fit-de`, :ref:`fit-amoeba`) will use the
+maximum difference between highest and lowest value in the population.
+:ref:`fit-dream` does not use this stopping condition.
+
+
+.. _option-xtol:
+
+``--xtol``
+----------
+
+*x tolerance* uses differences in the parameter value to decide when the
+fit is complete.  The different fitters will interpret this in different
+ways.  The Newton descent algorithms (:ref:`fit-newton`, :ref:`fit-lm`) will use
+this as the minimum change in the parameter values with each step.   The
+population-based algorithgms (:ref:`fit-de`, :ref:`fit-amoeba`) will use the
+maximum difference between highest and lowest parameter in the population.
+:ref:`fit-dream` does not use this stopping condition.
+
+
+.. _option-time:
+
+time
+----------
+
+*Max time* is the maximum running time of the optimizer.  This forces
+the optimizer to stop even if tolerance or steps conditions are not met.
+It is particularly useful for batch jobs run in an environment where the
+queuing system stops the job unceremoniously when the time allocation is
+complete.  Time is checked between iterations, so be sure to set it well
+below the queue allocation so that it does not stop in the middle of an
+iteration, and so that it has time to save its state.
+
+
+
+
+
+
+Optimizer Controls
+==================
+
+
+.. _option-fit:
+
+``--fit``
+---------
+
+*Fit Algorithm* selects the optimizer.  The available optimizers are:
+
+  ====== ================
+  amoeba :ref:`fit-amoeba`
+  de     :ref:`fit-de`
+  dream  :ref:`fit-dream`
+  lm     :ref:`fit-lm`
+  newton :ref:`fit-newton`
+  pt     :ref:`fit-pt`
+  ps     :ref:`fit-ps`
+  rl     :ref:`fit-rl`
+  ====== ================
+
+The default fit method is ``--fit=amoeba``.
+
+
+.. _option-pop:
+
+``--pop``
+---------
+
+*Population* determines the size of the population.  For :ref:`fit-de` and
+:ref:`fit-dream` it is a scale factor, where the number of individuals, $k$, is
+equal to the number of fitted parameters times pop.  For :ref:`fit-amoeba`
+the number of individuals is one plus the number of fitted parameters, as
+determined by the size of the simplex.
+
+
+.. _option-init:
+
+``--init``
+----------
+
+*Initializer*  is used by population-based algorithms (:ref:`fit-dream`)
+to set the initial population.  The options are as follows:
+
+     *lhs* (latin hypersquare), which chops the bounds within each dimension
+     in $k$ equal sized chunks where $k$ is the size of the population and
+     makes sure that each parameter has at least one value within each chunk
+     across the population.
+
+     *eps* (epsilon ball), in which the entire initial population is chosen
+     at random from within a tiny hypersphere centered about the initial point
+
+     *cov* (covariance matrix), in which the uncertainty is estimated using
+     the covariance matrix at the initial point, and points are selected
+     at random from the corresponding gaussian ellipsoid
+
+     *rand* (uniform random), in which the points are selected at random
+     within the bounds of the parameters
+
+:ref:`fit-amoeba` uses ``--radius`` to initialize its simplex.
+:ref:`fit-de` uses a random number from the prior distribution for the
+parameter, if any.
+
+
+
+.. _option-burn:
+
+``--burn``
+----------
+
+*Burn-in Steps* is the number of iterations to required for the Markov
+chain to converge to the equilibrium distribution.  If the fit ends
+early, the tail of the burn will be saved to the start of the steps.
+:ref:`fit-dream` uses burn plus steps as the total number of iterations to run.
+
+
+
+.. _option-thin:
+
+``--thin``
+----------
+
+*Thinning* is used by the Markov chain analysis to give samples time to
+wander to different points in parameter space.  In an ideal chain, there
+would be no correlation between points in the chain other than that which
+is dictated by the equilibrium distribution.  However, if the space has
+complicated boundaries and taking a step can easily lead to a highly
+improbable point, then the chain may be stuck at the same value for
+long periods of time.  If this is observed, then thinning can be used to
+only keep every $n^\text{th}$ step, giving the saved chain a better opportunity
+for good mixing.
+
+
+.. _option-CR:
+
+``--CR``
+--------
+
+*Crossover ratio* indicates the proportion of mixing which occurs with
+each iteration.  This is a value in [0,1] giving the probability that
+each individual dimension will be selected for update in the next generation.
+
+.. _option-F:
+
+``--F``
+-------
+
+*Scale* is a factor applied to the difference vector before adding it to
+the parent in differential evolution.
+
+
+.. _option-radius:
+
+``--radius``
+------------
+
+*Simplex radius* is the radius of the initial simplex in :ref:`fit-amoeba`
+
+
+.. _option-nT:
+
+``--nT``
+--------
+
+*# Temperatures*  is the number of temperature chains to run using parallel
+tempering.  Default is 25.
+
+.. _option-Tmin:
+
+``--Tmin``
+----------
+
+*Min temperature* is the minimum temperature in the log-spaced series of
+temperatures to run using parallel tempering.  Default is 0.1.
+
+.. _option-Tmax:
+
+``--Tmax``
+----------
+
+*Max temperature* is the maximum temperature in the log-spaced series of
+temperatures to run using parallel tempering.  Default is 10.
+
+.. _option-starts:
+
+``--starts``
+------------
+
+*Starts* is the number of times to run the fit from random starting points.
+
+.. _option-keep-best:
+
+``--keep_best``
+---------------
+
+If *Keep best* is set, then the each subsequent restart for the multi-start
+fitter keeps the best value from the previous fit(s).
+
+
+
+Execution Controls
+==================
+
+.. _option-store:
+
+``--store``
+-----------
+
+Directory in which to store the results of the fit.  Fits produce multiple
+files and plots.  Rather than cluttering up the current directory, all the
+outputs are written to the store directory along with a copy of the model
+file.
+
+
+
+.. _option-overwrite:
+
+``--overwrite``
+---------------
+
+If the store directory already exists then you need to include overwrite on
+the command line to reuse it.  While inconvenient, this prevents accidental
+overwriting of fits that may have taken hours to generate.
+
+
+.. _option-resume:
+
+``--resume``
+------------
+
+Continue fit from a previous store directory.
+
+.. _option-parallel:
+
+``--parallel``
+--------------
+
+Run fit using multiprocessing for parallelism.
+
+.. _option-mpi:
+
+``--mpi``
+---------
+
+Run fit using MPI for parallelism (use command "mpirun -n cpus ...")
+
+.. _option-batch:
+
+``--batch``
+-----------
+
+Run fit in batch mode.  Progress updates are sent to *STORE/MODEL.mon*, and
+can be monitored using *tail -f* (unix, mac).  When the fit is complete, the
+plot png files are created as usual, but the interactive plots are not shown.
+This allows you to set up a sequence of runs in a shell script where the
+first run completes before the next run starts.  Batch is also useful for
+cluster computing where the cluster nodes do not have access to the outside
+network and can't display an interactive window.  Batch is automatic
+when running with ``--mpi``.
+
+.. _option-stepmon:
+
+``--stepmon``
+-------------
+
+Create a log file tracking each point examined during the fit.  This does
+not provide any real utility except for generating plots of the population
+over time, which can be useful for understanding the different fitting
+methods.
+
+
+
+
+Output Controls
+===============
+
+
+.. _option-cov:
+
+``--cov``
+---------
+
+Compute the covariance matrix for the model at the minimum.
+
+
+.. _option-entropy:
+
+``--entropy``
+-------------
+
+*Calculate entropy* is a flag which indicates whether entropy should be
+computed for the final fit.  Entropy an estimate of the number of bits of
+information available from the fit.
+
+
+.. _option-plot:
+
+``--plot``
+----------
+
+For problems that have different view options for plotting, select the default
+option to display.  For example, when fitting a power law to a dataset, you
+may want to choose *log* or *linear* as the output plot type.
+
+
+
+Bumps Controls
+==============
+
+.. _option-preview:
+
+``--preview``
+-------------
+
+If the command contains *preview* then display model but do not perform
+a fitting operation.  Use this to see the initial model before running a fit.
+It will also show the fit range.
+
+.. _option-chisq:
+
+``--chisq``
+-----------
+
+If the command contains *chisq* then show $\chi^2$ and exit.  Use this to
+check that the model does not have any syntax errors.
+
+.. _option-edit:
+
+``--edit``
+----------
+
+If the command contains *edit* then start the Bumps user interface so that
+you can interact with the model, adjusting fitted parameters with a slider
+and seeing how they impact the result.
+
+.. _option-resynth:
+
+``--resynth``
+-------------
+
+Run a resynth uncertainty analysis on the model.  After finding a good
+minimum, you can rerun bumps with:
+
+     bumps --store=T1 --pars=T1/model.par --fit=amoeba --resynth=20 model.py
+
+This will generate 20 data simulated datasets using the initial data
+values as the mean and the data uncertainty as the standard deviation.
+Each of these datasets will be fit with the specified optimizer, and the
+resulting parameters saved in *T1/model.rsy*.  On completion, the parameter
+values can be loaded into python and averaged or histogrammed.
+
+.. _option-help:
+
+``--help``
+----------
+
+Use *?*, *h* or *help* to show a brief description of each command line option.
+
+
+.. _option-python:
+
+``--python``
+------------
+
+The bumps program can be used as a python interpreter with numpy, scipy,
+matplotlib and bumps included.  This is useful if you do not have python
+set up on your system, and you are using a bundled executable like Bumps
+or Refl1D on windows.  Even if you have python, you may want to run the
+bumps post-analysis scripts through the bumps command which already has
+the appropriate path set up to bumps on your system.
+
+The options are:
+
+    *i* run an interactive interpreter
+
+    *m* run a module as main, much like *python -m module.path*
+
+    *c* run a python command and quit
+
+    *p* run a python script
+
+
+.. _option-timer:
+
+``--timer``
+-----------
+
+Run the model ``--steps`` times and find the average run time per step.
+If ``--parallel`` is used, then the models will be run in parallel.
+
+
+.. _option-profiler:
+
+``--profiler``
+--------------
+
+Run the model ``--steps`` times using the python profiler.  This can
+be useful for identifying slow parts of your model definition, or
+alternatively, finding out that the model runtime is smaller than the
+Bumps overhead.  Use a larger value of steps for better statistics.
+
diff --git a/doc/guide/parameter.rst b/doc/guide/parameter.rst
new file mode 100644
index 0000000..2142a05
--- /dev/null
+++ b/doc/guide/parameter.rst
@@ -0,0 +1,180 @@
+.. py:currentmodule:: bumps.parameter
+
+.. _parameter-guide:
+
+**********
+Parameters
+**********
+
+.. contents:: :local:
+
+Bumps fitting is centered on :class:`Parameter` objects.  Parameters define
+the search space, the uncertainty analysis and even the user interface.
+Constraints within and between models are implemented through parameters.
+Prior probabilities are defined by for parameters.
+
+Model classes for Bumps should make it easy to define the initial
+value of fitting parameters and tie parameters together.  When creating
+a model, you should be able specify *parameter=value* for each of the
+model parameters.  Later, you should be able to reference the parameter
+within the model using *M.parameter*.  Parameters can also be tied together
+by assigning the same *Parameter* object to two different parameters.
+For example, a hollow cylinder can be created using::
+
+    solvent = Parameter("solvent", value=1.2)
+    shell = Parameter("shell", value=4.5)
+    M = CoreShellCylinder(core=solvent, shell=shell, solvent=solvent,
+                          radius=95, thickness=10, length=100)
+
+The model parameter can also be a derived value that is the result of
+a parameter expression.  For example, the following creates a cylinder
+whose length is twice the radius::
+
+     radius = Parameter("radius", value=3)
+     M = Cylinder(radius=radius, length=2*radius)
+
+Any time you ask for *M.length.value* it will compute the result as
+*2\*radius.value* and return that.
+
+You can also tie parameters together after the fact.  For example, you
+can create the constrained cylinder using::
+
+    M = Cylinder(radius=3, length=6)
+    M.length = 2*M.radius
+
+The advantage of this method is that you can easily comment out the
+constraint when exploring the model space, and fit *length* and *radius*
+freely.
+
+Once you have defined your models and constraints you can set up
+you fitting parameters.  There are several parameter methods which
+are helpful:
+
+- :meth:`range <Parameter.range>` forces the parameter to lie within
+  a fixed range.  The parameter value can take on any value within
+  the range with equal probability, and has zero probability outside
+  the range.
+- :meth:`pm <Parameter.pm>` is a convenient way to set up a range
+  based on the initial value of parameter.  For example, *M.thickness.pm(10)*
+  will allow the thickness parameter to vary by plus or minus 10.  You
+  can do asymmetric ranges by calling *pm* with plus and minus values,
+  such as *M.thickness.pm(-3,2)*.  The actual range gets set to a
+  :func:`nice_range <bumps.bounds.nice_range>` that includes the bounds.
+- :meth:`pmp <Parameter.pmp>` is like *pm* but the range is specified as
+  a percent.  For example, to let thickness vary by 10%, use
+  *M.thickness.pmp(10)*.  Again, a *nice_range* is used.
+- :meth:`dev <Parameter.dev>` sets up a parameter whose prior probability
+  is not equal across its range, but instead follows a normal distribution.
+  If for example, you have measure the thickness to be $32.1 \pm 0.6$
+  by some other technique, you can use this information to constrain your
+  model by initializing *thickness* to 32.1 and setting
+  *M.thickness.dev(0.6)* as a fitting constraint.  The *dev* method also
+  accepts absolute limits, creating a truncated normal distribution.  You
+  can set the central value *mu* as well, but you probably want to do this
+  in the model initialization so that you are free to turn fitting of the
+  parameter on and off by commenting out the *dev* line.
+- :meth:`soft_range <Parameter.soft_range>` is a combination of *range*
+  and *dev* in that the parameter has equal probability within [*low*,*high*]
+  but Gaussian probability of width *std* as it strays outside of the range.
+- :meth:`pdf <Parameter.pdf>` is like *dev* but works with any continuous
+  `scipy.stats distribution <http://docs.scipy.org/doc/scipy/reference/stats.html>`_.
+
+All these methods set the *bounds* attribute on the parameter in one way
+or another.  See :mod:`bumps.bounds` for details.  Technically, setting
+the parameter to *dev*, *soft_range* or *pdf* is equivalent to creating
+a probability distribution model with a single data point and
+:meth:`Fitness.nllf <bumps.fitproblem.Fitness.nllf>` equal to the negative
+log likelihood of seeing the parameter value in the distribution.  This
+*PDF* model would be fit simultaneously with your target model with the
+parameter shared between them.  The result is statistically sound (it is
+just more prior information), and conveniently, it does not affect the
+number of degrees of freedom in the fit.
+
+When defining new model classes, use the static method
+:meth:`Parameter.default` to initialize the parameter.  This will
+accept the input argument passed in by the user and depending on its
+type, either create a new parameter slot and set its initial value,
+or link the slot to another parameter.
+
+
+.. _freevariables:
+
+Free Variables
+==============
+
+When fitting multiple datasets, you will undoubtedly have models with
+many shared parameters, and some parameters that differ between the models.
+Common patterns include:
+
+- different measurements may use the same material but different contrast agents,
+- they may use the same contrast agent but different materials,
+- the same material and contrast, but different sizes, or
+- a cross product with several materials and several sizes.
+
+Often with complex models the parameter of interest is buried within the
+model structure.  One approach is to clone the models using a deep copy of
+the entire structure, then tie together parameters for the bits
+that are changing.  This proves to be confusing and difficult for new python
+programmers, so instead :func:`FitProblem <bumps.fitproblem.FitProblem>` was
+extended to support :class:`FreeVariables`.  The FreeVariables class allows
+you to use the same model structure with different data sets, but have
+some parameters that vary between the models.  Each varying parameter
+is a slot, and FreeVariables keeps an array of parameters
+(actually a :class:`ParameterSet`) to fill that slot, one for each model.
+
+
+To define the free variables, you need the names of the different
+models, a parameter slot to hold the values, and a list of the
+different parameter values for each model.  You then define the
+free variables as follows::
+
+    free = FreeVariables(names=["model1", "model2", ...],
+                     p1=model.p1, p2=model.p2, ...)
+    ...
+    problem = FitProblem(experiments, freevars=free)
+
+The slots can be referenced by name, with the underlying parameters
+referenced by variable number.  In the above, *free.p1[1]* refers to
+the parameter p1 when fitting data2.  You can also refer to
+the slots by name, such as *free.p1[data2.name]*.  The parameters in the
+slots have the usual properties of parameters, such as values and
+fit ranges.  Setting the fit range makes the parameter a fitted parameter,
+and the fit will give the uncertainty on each parameter independently.
+Parameters can be copied, so that a pair of models can share the same value.
+
+The following examples shows a neutron scattering problems with two datasets,
+one measured with light water and the other measured with heavy water, you
+can share the same material object, but use the light water scattering
+factors in the first and the heavy water scattering factors in the
+second.  The problem would be composed as follows::
+
+    material = SLD('silicon', rho=2.07)
+    solvent = SLD('solvent') # unspecified rho
+    model = Sphere(radius=10, material=material, solvent=solvent)
+    M1 = ScatteringFitness(model, hydrogenated_data)
+    M2 = ScatteringFitness(model, deuterated_data)
+    free = FreeVariables(names=['hydrogenated', 'deuterated'],
+                         solvent=solvent.sld)
+    free.solvent.values = [-0.561, 6.402]
+    model.radius.range(1,35)
+    problem = FitProblem([M1, M2], freevars=free)
+
+In this particular example, the solvent is fixed for each measurement, and
+the sphere radius is allowed to vary between 1 and 35.  Since the radius
+is not a free variable, the fitted radius will be chosen such that it minimizes
+the combined fitness of both models.   In a more complicated situation, we may
+not know either the sphere radius or the solvent densities, but still the
+radius is shared between the two models.  In this case we could set::
+
+    fv.solvent.range(-1,7)
+
+and the SLD of the solvent would be fitted independently in the two data sets.
+Notice that we did not refer to the individual model index when setting the
+range.  This is a convenience---range, pm and pmp can be set on the entire
+set as above, or individually using, e.g.,
+
+::
+
+    fv.solvent[0].range(-1,0)
+    fv.solvent[1].range(6,7)
+
diff --git a/doc/guide/var.png b/doc/guide/var.png
new file mode 100644
index 0000000..07544ff
Binary files /dev/null and b/doc/guide/var.png differ
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..0e6a226
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,19 @@
+.. htmlonly::
+
+   :Release: |version|
+   :Date:    |today|
+
+   :ref:`Index <genindex>` :ref:`Search <search>`
+
+Contents
+========
+
+.. toctree::
+   :maxdepth: 2
+   :includehidden:
+
+   getting_started/index.rst
+   tutorial/index.rst
+   guide/index.rst
+   api/index.rst
+   dream/index.rst
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..19bba9e
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,155 @@
+ at ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\bumpsdocumentation.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\bumpsdocumentation.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+:end
diff --git a/doc/pylit.py b/doc/pylit.py
new file mode 100755
index 0000000..573f595
--- /dev/null
+++ b/doc/pylit.py
@@ -0,0 +1,1869 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+
+# pylit.py
+# ********
+# Literate programming with reStructuredText
+# ++++++++++++++++++++++++++++++++++++++++++
+#
+# :Date:      $Date$
+# :Revision:  $Revision$
+# :URL:       $URL$
+# :Copyright: � 2005, 2007 G�nter Milde.
+#             Released without warranty under the terms of the
+#             GNU General Public License (v. 2 or later)
+#
+# ::
+
+"""pylit: bidirectional text <-> code converter
+
+Covert between a *text source* with embedded computer code and a *code source*
+with embedded documentation.
+"""
+
+# .. contents::
+#
+# Frontmatter
+# ===========
+#
+# Changelog
+# ---------
+#
+# .. class:: borderless
+#
+# ======  ==========  ===========================================================
+# 0.1     2005-06-29  Initial version.
+# 0.1.1   2005-06-30  First literate version.
+# 0.1.2   2005-07-01  Object orientated script using generators.
+# 0.1.3   2005-07-10  Two state machine (later added 'header' state).
+# 0.2b    2006-12-04  Start of work on version 0.2 (code restructuring).
+# 0.2     2007-01-23  Published at http://pylit.berlios.de.
+# 0.2.1   2007-01-25  Outsourced non-core documentation to the PyLit pages.
+# 0.2.2   2007-01-26  New behaviour of `diff` function.
+# 0.2.3   2007-01-29  New `header` methods after suggestion by Riccardo Murri.
+# 0.2.4   2007-01-31  Raise Error if code indent is too small.
+# 0.2.5   2007-02-05  New command line option --comment-string.
+# 0.2.6   2007-02-09  Add section with open questions,
+#                     Code2Text: let only blank lines (no comment str)
+#                     separate text and code,
+#                     fix `Code2Text.header`.
+# 0.2.7   2007-02-19  Simplify `Code2Text.header`,
+#                     new `iter_strip` method replacing a lot of ``if``-s.
+# 0.2.8   2007-02-22  Set `mtime` of outfile to the one of infile.
+# 0.3     2007-02-27  New `Code2Text` converter after an idea by Riccardo Murri,
+#                     explicit `option_defaults` dict for easier customisation.
+# 0.3.1   2007-03-02  Expand hard-tabs to prevent errors in indentation,
+#                     `Text2Code` now also works on blocks,
+#                     removed dependency on SimpleStates module.
+# 0.3.2   2007-03-06  Bug fix: do not set `language` in `option_defaults`
+#                     renamed `code_languages` to `languages`.
+# 0.3.3   2007-03-16  New language css,
+#                     option_defaults -> defaults = optparse.Values(),
+#                     simpler PylitOptions: don't store parsed values,
+#                     don't parse at initialisation,
+#                     OptionValues: return `None` for non-existing attributes,
+#                     removed -infile and -outfile, use positional arguments.
+# 0.3.4   2007-03-19  Documentation update,
+#                     separate `execute` function.
+#         2007-03-21  Code cleanup in `Text2Code.__iter__`.
+# 0.3.5   2007-03-23  Removed "css" from known languages after learning that
+#                     there is no C++ style "// " comment string in CSS2.
+# 0.3.6   2007-04-24  Documentation update.
+# 0.4     2007-05-18  Implement Converter.__iter__ as stack of iterator
+#                     generators. Iterating over a converter instance now
+#                     yields lines instead of blocks.
+#                     Provide "hooks" for pre- and postprocessing filters.
+#                     Rename states to reduce confusion with formats:
+#                     "text" -> "documentation", "code" -> "code_block".
+# 0.4.1   2007-05-22  Converter.__iter__: cleanup and reorganisation,
+#                     rename parent class Converter -> TextCodeConverter.
+# 0.4.2   2007-05-23  Merged Text2Code.converter and Code2Text.converter into
+#                     TextCodeConverter.converter.
+# 0.4.3   2007-05-30  Replaced use of defaults.code_extensions with
+#                     values.languages.keys().
+#                     Removed spurious `print` statement in code_block_handler.
+#                     Added basic support for 'c' and 'css' languages
+#                     with `dumb_c_preprocessor`_ and `dumb_c_postprocessor`_.
+# 0.5     2007-06-06  Moved `collect_blocks`_ out of `TextCodeConverter`_,
+#                     bug fix: collect all trailing blank lines into a block.
+#                     Expand tabs with `expandtabs_filter`_.
+# 0.6     2007-06-20  Configurable code-block marker (default ``::``)
+# 0.6.1   2007-06-28  Bug fix: reset self.code_block_marker_missing.
+# 0.7     2007-12-12  prepending an empty string to sys.path in run_doctest()
+#                     to allow imports from the current working dir.
+# 0.7.1   2008-01-07  If outfile does not exist, do a round-trip conversion
+#                     and report differences (as with outfile=='-').
+# 0.7.2   2008-01-28  Do not add missing code-block separators with
+#                     `doctest_run` on the code source. Keeps lines consistent.
+# 0.7.3   2008-04-07  Use value of code_block_marker for insertion of missing
+#                     transition marker in Code2Text.code_block_handler
+#                     Add "shell" to defaults.languages
+# 0.7.4   2008-06-23  Add "latex" to defaults.languages
+# 0.7.5   2009-05-14  Bugfix: ignore blank lines in test for end of code block
+# 0.7.6   2009-12-15  language-dependent code-block markers (after a
+#                     `feature request and patch by jrioux`_),
+#                     use DefaultDict for language-dependent defaults,
+#                     new defaults setting `add_missing_marker`_.
+# 0.7.7   2010-06-23  New command line option --codeindent.
+# 0.7.8   2011-03-30  bugfix: do not overwrite custom `add_missing_marker` value,
+#                     allow directive options following the 'code' directive.
+# 0.7.9   2011-04-05  Decode doctest string if 'magic comment' gives encoding.
+# pak     2013-12-18  2to3
+# ======  ==========  ===========================================================
+#
+# ::
+
+from __future__ import print_function
+
+_version = "0.7.9"
+
+__docformat__ = 'restructuredtext'
+
+
+# Introduction
+# ------------
+#
+# PyLit is a bidirectional converter between two formats of a computer
+# program source:
+#
+# * a (reStructured) text document with program code embedded in
+#   *code blocks*, and
+# * a compilable (or executable) code source with *documentation*
+#   embedded in comment blocks
+#
+#
+# Requirements
+# ------------
+#
+# ::
+
+import os, sys
+import re, optparse
+
+
+# DefaultDict
+# ~~~~~~~~~~~
+# As `collections.defaultdict` is only introduced in Python 2.5, we
+# define a simplified version of the dictionary with default from
+# http://code.activestate.com/recipes/389639/
+# ::
+
+class DefaultDict(dict):
+    """Minimalistic Dictionary with default value."""
+    def __init__(self, default=None, *args, **kwargs):
+        self.update(dict(*args, **kwargs))
+        self.default = default
+
+    def __getitem__(self, key):
+        return self.get(key, self.default)
+
+
+# Defaults
+# ========
+#
+# The `defaults` object provides a central repository for default
+# values and their customisation. ::
+
+defaults = optparse.Values()
+
+# It is used for
+#
+# * the initialisation of data arguments in TextCodeConverter_ and
+#   PylitOptions_
+#
+# * completion of command line options in `PylitOptions.complete_values`_.
+#
+# This allows the easy creation of back-ends that customise the
+# defaults and then call `main`_ e.g.:
+#
+# >>> import pylit
+# >>> pylit.defaults.comment_string = "## "
+# >>> pylit.defaults.codeindent = 4
+# >>> pylit.main()
+#
+# The following default values are defined in pylit.py:
+#
+# languages
+# ---------
+#
+# Mapping of code file extensions to code language::
+
+defaults.languages  = DefaultDict("python", # fallback language
+                                  {".c":   "c",
+                                   ".cc":  "c++",
+                                   ".css": "css",
+                                   ".py":  "python",
+                                   ".sh":  "shell",
+                                   ".sl":  "slang",
+                                   ".sty": "latex",
+                                   ".tex": "latex"
+                                  })
+
+# Will be overridden by the ``--language`` command line option.
+#
+# The first argument is the fallback language, used if there is no
+# matching extension (e.g. if pylit is used as filter) and no
+# ``--language`` is specified. It can be changed programmatically by
+# assignment to the ``.default`` attribute, e.g.
+#
+# >>> defaults.languages.default='c++'
+#
+#
+# .. _text_extension:
+#
+# text_extensions
+# ---------------
+#
+# List of known extensions of (reStructured) text files. The first
+# extension in this list is used by the `_get_outfile_name`_ method to
+# generate a text output filename::
+
+defaults.text_extensions = [".txt", ".rst"]
+
+
+# comment_strings
+# ---------------
+#
+# Comment strings for known languages. Used in Code2Text_ to recognise
+# text blocks and in Text2Code_ to format text blocks as comments.
+# Defaults to ``'# '``.
+#
+# **Comment strings include trailing whitespace.** ::
+
+defaults.comment_strings = DefaultDict('# ',
+                                       {"css":    '// ',
+                                        "c":      '// ',
+                                        "c++":    '// ',
+                                        "latex":  '% ',
+                                        "python": '# ',
+                                        "shell":  '# ',
+                                        "slang":  '% '
+                                       })
+
+
+# header_string
+# -------------
+#
+# Marker string for a header code block in the text source. No trailing
+# whitespace needed as indented code follows.
+# Must be a valid rst directive that accepts code on the same line, e.g.
+# ``'..admonition::'``.
+#
+# Default is a comment marker::
+
+defaults.header_string = '..'
+
+
+# .. _code_block_marker:
+#
+# code_block_markers
+# ------------------
+#
+# Markup at the end of a documentation block.
+# Default is Docutils' marker for a `literal block`_::
+
+defaults.code_block_markers = DefaultDict('::')
+
+# The `code_block_marker` string is `inserted into a regular expression`_.
+# Language-specific markers can be defined programmatically, e.g. in a
+# wrapper script.
+#
+# In a document where code examples are only one of several uses of
+# literal blocks, it is more appropriate to single out the source code
+# ,e.g. with the double colon at a separate line ("expanded form")
+#
+#   ``defaults.code_block_marker.default = ':: *'``
+#
+# or a dedicated ``.. code-block::`` directive [#]_
+#
+#   ``defaults.code_block_marker['c++'] = '.. code-block:: *c++'``
+#
+# The latter form also allows code in different languages kept together
+# in one literate source file.
+#
+# .. [#] The ``.. code-block::`` directive is not (yet) supported by
+#    standard Docutils.  It is provided by several add-ons, including
+#    the `code-block directive`_ project in the Docutils Sandbox and
+#    Sphinx_.
+#
+#
+# strip
+# -----
+#
+# Export to the output format stripping documentation or code blocks::
+
+defaults.strip = False
+
+# strip_marker
+# ------------
+#
+# Strip literal marker from the end of documentation blocks when
+# converting  to code format. Makes the code more concise but looses the
+# synchronisation of line numbers in text and code formats. Can also be used
+# (together with the auto-completion of the code-text conversion) to change
+# the `code_block_marker`::
+
+defaults.strip_marker = False
+
+# add_missing_marker
+# ------------------
+#
+# When converting from code format to text format, add a `code_block_marker`
+# at the end of documentation blocks if it is missing::
+
+defaults.add_missing_marker = True
+
+# Keep this at ``True``, if you want to re-convert to code format later!
+#
+#
+# .. _defaults.preprocessors:
+#
+# preprocessors
+# -------------
+#
+# Preprocess the data with language-specific filters_
+# Set below in Filters_::
+
+defaults.preprocessors = {}
+
+# .. _defaults.postprocessors:
+#
+# postprocessors
+# --------------
+#
+# Postprocess the data with language-specific filters_::
+
+defaults.postprocessors = {}
+
+# .. _defaults.codeindent:
+#
+# codeindent
+# ----------
+#
+# Number of spaces to indent code blocks in `Code2Text.code_block_handler`_::
+
+defaults.codeindent =  2
+
+# In `Text2Code.code_block_handler`_, the codeindent is determined by the
+# first recognised code line (header or first indented literal block
+# of the text source).
+#
+# overwrite
+# ---------
+#
+# What to do if the outfile already exists? (ignored if `outfile` == '-')::
+
+defaults.overwrite = 'update'
+
+# Recognised values:
+#
+#  :'yes':    overwrite eventually existing `outfile`,
+#  :'update': fail if the `outfile` is newer than `infile`,
+#  :'no':     fail if `outfile` exists.
+#
+#
+# Extensions
+# ==========
+#
+# Try to import optional extensions::
+
+try:
+    import pylit_elisp
+except ImportError:
+    pass
+
+
+# Converter Classes
+# =================
+#
+# The converter classes implement a simple state machine to separate and
+# transform documentation and code blocks. For this task, only a very limited
+# parsing is needed. PyLit's parser assumes:
+#
+# * `indented literal blocks`_ in a text source are code blocks.
+#
+# * comment blocks in a code source where every line starts with a matching
+#   comment string are documentation blocks.
+#
+# TextCodeConverter
+# -----------------
+# ::
+
+class TextCodeConverter(object):
+    """Parent class for the converters `Text2Code` and `Code2Text`.
+    """
+
+# The parent class defines data attributes and functions used in both
+# `Text2Code`_ converting a text source to executable code source, and
+# `Code2Text`_ converting commented code to a text source.
+#
+# Data attributes
+# ~~~~~~~~~~~~~~~
+#
+# Class default values are fetched from the `defaults`_ object and can be
+# overridden by matching keyword arguments during class instantiation. This
+# also works with keyword arguments to `get_converter`_ and `main`_, as these
+# functions pass on unused keyword args to the instantiation of a converter
+# class. ::
+
+    language = defaults.languages.default
+    comment_strings = defaults.comment_strings
+    comment_string = "" # set in __init__ (if empty)
+    codeindent =  defaults.codeindent
+    header_string = defaults.header_string
+    code_block_markers = defaults.code_block_markers
+    code_block_marker = "" # set in __init__ (if empty)
+    strip = defaults.strip
+    strip_marker = defaults.strip_marker
+    add_missing_marker = defaults.add_missing_marker
+    directive_option_regexp = re.compile(r' +:(\w|[-._+:])+:( |$)')
+    state = "" # type of current block, see `TextCodeConverter.convert`_
+
+# Interface methods
+# ~~~~~~~~~~~~~~~~~
+#
+# .. _TextCodeConverter.__init__:
+#
+# __init__
+# """"""""
+#
+# Initialising sets the `data` attribute, an iterable object yielding lines of
+# the source to convert. [#]_
+#
+# .. [#] The most common choice of data is a `file` object with the text
+#        or code source.
+#
+#        To convert a string into a suitable object, use its splitlines method
+#        like ``"2 lines\nof source".splitlines(True)``.
+#
+#
+# Additional keyword arguments are stored as instance variables,
+# overwriting the class defaults::
+
+    def __init__(self, data, **keyw):
+        """data   --  iterable data object
+                      (list, file, generator, string, ...)
+           **keyw --  remaining keyword arguments are
+                      stored as data-attributes
+        """
+        self.data = data
+        self.__dict__.update(keyw)
+
+# If empty, `code_block_marker` and `comment_string` are set according
+# to the `language`::
+
+        if not self.code_block_marker:
+            self.code_block_marker = self.code_block_markers[self.language]
+        if not self.comment_string:
+            self.comment_string = self.comment_strings[self.language]
+        self.stripped_comment_string = self.comment_string.rstrip()
+
+# Pre- and postprocessing filters are set (with
+# `TextCodeConverter.get_filter`_)::
+
+        self.preprocessor = self.get_filter("preprocessors", self.language)
+        self.postprocessor = self.get_filter("postprocessors", self.language)
+
+# .. _inserted into a regular expression:
+#
+# Finally, a regular_expression for the `code_block_marker` is compiled
+# to find valid cases of `code_block_marker` in a given line and return
+# the groups: ``\1 prefix, \2 code_block_marker, \3 remainder`` ::
+
+        marker = self.code_block_marker
+        if marker == '::':
+            # the default marker may occur at the end of a text line
+            self.marker_regexp = re.compile('^( *(?!\.\.).*)(::)([ \n]*)$')
+        else:
+            # marker must be on a separate line
+            self.marker_regexp = re.compile('^( *)(%s)(.*\n?)$' % marker)
+
+# .. _TextCodeConverter.__iter__:
+#
+# __iter__
+# """"""""
+#
+# Return an iterator for the instance. Iteration yields lines of converted
+# data.
+#
+# The iterator is a chain of iterators acting on `self.data` that does
+#
+# * preprocessing
+# * text<->code format conversion
+# * postprocessing
+#
+# Pre- and postprocessing are only performed, if filters for the current
+# language are registered in `defaults.preprocessors`_ and|or
+# `defaults.postprocessors`_. The filters must accept an iterable as first
+# argument and yield the processed input data line-wise.
+# ::
+
+    def __iter__(self):
+        """Iterate over input data source and yield converted lines
+        """
+        return self.postprocessor(self.convert(self.preprocessor(self.data)))
+
+
+# .. _TextCodeConverter.__call__:
+#
+# __call__
+# """"""""
+# The special `__call__` method allows the use of class instances as callable
+# objects. It returns the converted data as list of lines::
+
+    def __call__(self):
+        """Iterate over state-machine and return results as list of lines"""
+        return [line for line in self]
+
+
+# .. _TextCodeConverter.__str__:
+#
+# __str__
+# """""""
+# Return converted data as string::
+
+    def __str__(self):
+        return "".join(self())
+
+
+# Helpers and convenience methods
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# .. _TextCodeConverter.convert:
+#
+# convert
+# """""""
+#
+# The `convert` method generates an iterator that does the actual  code <-->
+# text format conversion. The converted data is yielded line-wise and the
+# instance's `status` argument indicates whether the current line is "header",
+# "documentation", or "code_block"::
+
+    def convert(self, lines):
+        """Iterate over lines of a program document and convert
+        between "text" and "code" format
+        """
+
+# Initialise internal data arguments. (Done here, so that every new iteration
+# re-initialises them.)
+#
+# `state`
+#   the "type" of the currently processed block of lines. One of
+#
+#   :"":              initial state: check for header,
+#   :"header":        leading code block: strip `header_string`,
+#   :"documentation": documentation part: comment out,
+#   :"code_block":    literal blocks containing source code: unindent.
+#
+# ::
+
+        self.state = ""
+
+# `_codeindent`
+#   * Do not confuse the internal attribute `_codeindent` with the configurable
+#     `codeindent` (without the leading underscore).
+#   * `_codeindent` is set in `Text2Code.code_block_handler`_ to the indent of
+#     first non-blank "code_block" line and stripped from all "code_block" lines
+#     in the text-to-code conversion,
+#   * `codeindent` is set in `__init__` to `defaults.codeindent`_ and added to
+#     "code_block" lines in the code-to-text conversion.
+#
+# ::
+
+        self._codeindent = 0
+
+# `_textindent`
+#   * set by `Text2Code.documentation_handler`_ to the minimal indent of a
+#     documentation block,
+#   * used in `Text2Code.set_state`_ to find the end of a code block.
+#
+# ::
+
+        self._textindent = 0
+
+# `_add_code_block_marker`
+#   If the last paragraph of a documentation block does not end with a
+#   code_block_marker_, it should be added (otherwise, the back-conversion
+#   fails.).
+#
+#   `_add_code_block_marker` is set by `Code2Text.documentation_handler`_
+#   and evaluated by `Code2Text.code_block_handler`_, because the
+#   documentation_handler does not know whether the next block will be
+#   documentation (with no need for a code_block_marker) or a code block.
+#
+# ::
+
+        self._add_code_block_marker = False
+
+
+
+# Determine the state of the block and convert with the matching "handler"::
+
+        for block in collect_blocks(expandtabs_filter(lines)):
+            self.set_state(block)
+            for line in getattr(self, self.state+"_handler")(block):
+                yield line
+
+
+# .. _TextCodeConverter.get_filter:
+#
+# get_filter
+# """"""""""
+# ::
+
+    def get_filter(self, filter_set, language):
+        """Return language specific filter"""
+        if self.__class__ == Text2Code:
+            key = "text2"+language
+        elif self.__class__ == Code2Text:
+            key = language+"2text"
+        else:
+            key = ""
+        try:
+            return getattr(defaults, filter_set)[key]
+        except (AttributeError, KeyError):
+            # print "there is no %r filter in %r"%(key, filter_set)
+            pass
+        return identity_filter
+
+
+# get_indent
+# """"""""""
+# Return the number of leading spaces in `line`::
+
+    def get_indent(self, line):
+        """Return the indentation of `string`.
+        """
+        return len(line) - len(line.lstrip())
+
+
+# Text2Code
+# ---------
+#
+# The `Text2Code` converter separates *code-blocks* [#]_ from *documentation*.
+# Code blocks are unindented, documentation is commented (or filtered, if the
+# ``strip`` option is True).
+#
+# .. [#] Only `indented literal blocks`_ are considered code-blocks. `quoted
+#        literal blocks`_, `parsed-literal blocks`_, and `doctest blocks`_ are
+#        treated as part of the documentation. This allows the inclusion of
+#        examples:
+#
+#           >>> 23 + 3
+#           26
+#
+#        Mark that there is no double colon before the doctest block in the
+#        text source.
+#
+# The class inherits the interface and helper functions from
+# TextCodeConverter_ and adds functions specific to the text-to-code format
+# conversion::
+
+class Text2Code(TextCodeConverter):
+    """Convert a (reStructured) text source to code source
+    """
+
+# .. _Text2Code.set_state:
+#
+# set_state
+# ~~~~~~~~~
+# ::
+
+    def set_state(self, block):
+        """Determine state of `block`. Set `self.state`
+        """
+
+# `set_state` is used inside an iteration. Hence, if we are out of data, a
+# StopItertion exception should be raised::
+
+        if not block:
+            raise StopIteration
+
+# The new state depends on the active state (from the last block) and
+# features of the current block. It is either "header", "documentation", or
+# "code_block".
+#
+# If the current state is "" (first block), check for
+# the  `header_string` indicating a leading code block::
+
+        if self.state == "":
+            # print "set state for %r"%block
+            if block[0].startswith(self.header_string):
+                self.state = "header"
+            else:
+                self.state = "documentation"
+
+# If the current state is "documentation", the next block is also
+# documentation. The end of a documentation part is detected in the
+# `Text2Code.documentation_handler`_::
+
+        # elif self.state == "documentation":
+        #    self.state = "documentation"
+
+# A "code_block" ends with the first less indented, non-blank line.
+# `_textindent` is set by the documentation handler to the indent of the
+# preceding documentation block::
+
+        elif self.state in ["code_block", "header"]:
+            indents = [self.get_indent(line) for line in block
+                       if line.rstrip()]
+            # print "set_state:", indents, self._textindent
+            if indents and min(indents) <= self._textindent:
+                self.state = 'documentation'
+            else:
+                self.state = 'code_block'
+
+# TODO: (or not to do?) insert blank line before the first line with too-small
+# codeindent using self.ensure_trailing_blank_line(lines, line) (would need
+# split and push-back of the documentation part)?
+#
+# .. _Text2Code.header_handler:
+#
+# header_handler
+# ~~~~~~~~~~~~~~
+#
+# Sometimes code needs to remain on the first line(s) of the document to be
+# valid. The most common example is the "shebang" line that tells a POSIX
+# shell how to process an executable file::
+
+#!/usr/bin/env python
+
+# In Python, the special comment to indicate the encoding, e.g.
+# ``# -*- coding: iso-8859-1 -*-``, must occur before any other comment
+# or code too.
+#
+# If we want to keep the line numbers in sync for text and code source, the
+# reStructured Text markup for these header lines must start at the same line
+# as the first header line. Therefore, header lines could not be marked as
+# literal block (this would require the ``::`` and an empty line above the
+# code_block).
+#
+# OTOH, a comment may start at the same line as the comment marker and it
+# includes subsequent indented lines. Comments are visible in the reStructured
+# Text source but hidden in the pretty-printed output.
+#
+# With a header converted to comment in the text source, everything before
+# the first documentation block (i.e. before the first paragraph using the
+# matching comment string) will be hidden away (in HTML or PDF output).
+#
+# This seems a good compromise, the advantages
+#
+# * line numbers are kept
+# * the "normal" code_block conversion rules (indent/unindent by `codeindent` apply
+# * greater flexibility: you can hide a repeating header in a project
+#   consisting of many source files.
+#
+# set off the disadvantages
+#
+# - it may come as surprise if a part of the file is not "printed",
+# - one more syntax element to learn for rst newbies to start with pylit,
+#   (however, starting from the code source, this will be auto-generated)
+#
+# In the case that there is no matching comment at all, the complete code
+# source will become a comment -- however, in this case it is not very likely
+# the source is a literate document anyway.
+#
+# If needed for the documentation, it is possible to quote the header in (or
+# after) the first documentation block, e.g. as `parsed literal`.
+# ::
+
+    def header_handler(self, lines):
+        """Format leading code block"""
+        # strip header string from first line
+        lines[0] = lines[0].replace(self.header_string, "", 1)
+        # yield remaining lines formatted as code-block
+        for line in self.code_block_handler(lines):
+            yield line
+
+
+# .. _Text2Code.documentation_handler:
+#
+# documentation_handler
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# The 'documentation' handler processes everything that is not recognised as
+# "code_block". Documentation is quoted with `self.comment_string`
+# (or filtered with `--strip=True`).
+#
+# If end-of-documentation marker is detected,
+#
+# * set state to 'code_block'
+# * set `self._textindent` (needed by `Text2Code.set_state`_ to find the
+#   next "documentation" block)
+#
+# ::
+
+    def documentation_handler(self, lines):
+        """Convert documentation blocks from text to code format
+        """
+        for line in lines:
+            # test lines following the code-block marker for false positives
+            if (self.state == "code_block" and line.rstrip()
+                and not self.directive_option_regexp.search(line)):
+                self.state = "documentation"
+            # test for end of documentation block
+            if self.marker_regexp.search(line):
+                self.state = "code_block"
+                self._textindent = self.get_indent(line)
+            # yield lines
+            if self.strip:
+                continue
+            # do not comment blank lines preceding a code block
+            if self.state == "code_block" and not line.rstrip():
+                yield line
+            else:
+                yield self.comment_string + line
+
+
+
+
+# .. _Text2Code.code_block_handler:
+#
+# code_block_handler
+# ~~~~~~~~~~~~~~~~~~
+#
+# The "code_block" handler is called with an indented literal block. It
+# removes leading whitespace up to the indentation of the first code line in
+# the file (this deviation from Docutils behaviour allows indented blocks of
+# Python code). ::
+
+    def code_block_handler(self, block):
+        """Convert indented literal blocks to source code format
+        """
+
+# If still unset, determine the indentation of code blocks from first non-blank
+# code line::
+
+        if self._codeindent == 0:
+            self._codeindent = self.get_indent(block[0])
+
+# Yield unindented lines after check whether we can safely unindent. If the
+# line is less indented then `_codeindent`, something got wrong. ::
+
+        for line in block:
+            if line.lstrip() and self.get_indent(line) < self._codeindent:
+                raise ValueError("code block contains line less indented " \
+                      "than %d spaces \n%r"%(self._codeindent, block))
+            yield line.replace(" "*self._codeindent, "", 1)
+
+
+# Code2Text
+# ---------
+#
+# The `Code2Text` converter does the opposite of `Text2Code`_ -- it processes
+# a source in "code format" (i.e. in a programming language), extracts
+# documentation from comment blocks, and puts program code in literal blocks.
+#
+# The class inherits the interface and helper functions from
+# TextCodeConverter_ and adds functions specific to the text-to-code  format
+# conversion::
+
+class Code2Text(TextCodeConverter):
+    """Convert code source to text source
+    """
+
+# set_state
+# ~~~~~~~~~
+#
+# Check if block is "header", "documentation", or "code_block":
+#
+# A paragraph is "documentation", if every non-blank line starts with a
+# matching comment string (including whitespace except for commented blank
+# lines) ::
+
+    def set_state(self, block):
+        """Determine state of `block`."""
+        for line in block:
+            # skip documentation lines (commented, blank or blank comment)
+            if (line.startswith(self.comment_string)
+                or not line.rstrip()
+                or line.rstrip() == self.comment_string.rstrip()
+               ):
+                continue
+            # non-commented line found:
+            if self.state == "":
+                self.state = "header"
+            else:
+                self.state = "code_block"
+            break
+        else:
+            # no code line found
+            # keep state if the block is just a blank line
+            # if len(block) == 1 and self._is_blank_codeline(line):
+            #     return
+            self.state = "documentation"
+
+
+# header_handler
+# ~~~~~~~~~~~~~~
+#
+# Handle a leading code block. (See `Text2Code.header_handler`_ for a
+# discussion of the "header" state.) ::
+
+    def header_handler(self, lines):
+        """Format leading code block"""
+        if self.strip == True:
+            return
+        # get iterator over the lines that formats them as code-block
+        lines = iter(self.code_block_handler(lines))
+        # prepend header string to first line
+        yield self.header_string + next(lines)
+        # yield remaining lines
+        for line in lines:
+            yield line
+
+# .. _Code2Text.documentation_handler:
+#
+# documentation_handler
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# The *documentation state* handler converts a comment to a documentation
+# block by stripping the leading `comment string` from every line::
+
+    def documentation_handler(self, block):
+        """Uncomment documentation blocks in source code
+        """
+
+# Strip comment strings::
+
+        lines = [self.uncomment_line(line) for line in block]
+
+# If the code block is stripped, the literal marker would lead to an
+# error when the text is converted with Docutils. Strip it as well. ::
+
+        if self.strip or self.strip_marker:
+            self.strip_code_block_marker(lines)
+
+# Otherwise, check for the `code_block_marker`_ at the end of the
+# documentation block (skipping directive options that might follow it)::
+
+        elif self.add_missing_marker:
+            for line in lines[::-1]:
+                if self.marker_regexp.search(line):
+                    self._add_code_block_marker = False
+                    break
+                if (line.rstrip() and
+                    not self.directive_option_regexp.search(line)):
+                    self._add_code_block_marker = True
+                    break
+            else:
+                self._add_code_block_marker = True
+
+# Yield lines::
+
+        for line in lines:
+            yield line
+
+# uncomment_line
+# ~~~~~~~~~~~~~~
+#
+# Return documentation line after stripping comment string. Consider the
+# case that a blank line has a comment string without trailing whitespace::
+
+    def uncomment_line(self, line):
+        """Return uncommented documentation line"""
+        line = line.replace(self.comment_string, "", 1)
+        if line.rstrip() == self.stripped_comment_string:
+            line = line.replace(self.stripped_comment_string, "", 1)
+        return line
+
+# .. _Code2Text.code_block_handler:
+#
+# code_block_handler
+# ~~~~~~~~~~~~~~~~~~
+#
+# The `code_block` handler returns the code block as indented literal
+# block (or filters it, if ``self.strip == True``). The amount of the code
+# indentation is controlled by `self.codeindent` (default 2).  ::
+
+    def code_block_handler(self, lines):
+        """Covert code blocks to text format (indent or strip)
+        """
+        if self.strip == True:
+            return
+        # eventually insert transition marker
+        if self._add_code_block_marker:
+            self.state = "documentation"
+            yield self.code_block_marker + "\n"
+            yield "\n"
+            self._add_code_block_marker = False
+            self.state = "code_block"
+        for line in lines:
+            yield " "*self.codeindent + line
+
+
+
+# strip_code_block_marker
+# ~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Replace the literal marker with the equivalent of Docutils replace rules
+#
+# * strip ``::``-line (and preceding blank line) if on a line on its own
+# * strip ``::`` if it is preceded by whitespace.
+# * convert ``::`` to a single colon if preceded by text
+#
+# `lines` is a list of documentation lines (with a trailing blank line).
+# It is modified in-place::
+
+    def strip_code_block_marker(self, lines):
+        try:
+            line = lines[-2]
+        except IndexError:
+            return # just one line (no trailing blank line)
+
+        # match with regexp: `match` is None or has groups
+        # \1 leading text, \2 code_block_marker, \3 remainder
+        match = self.marker_regexp.search(line)
+
+        if not match:                 # no code_block_marker present
+            return
+        if not match.group(1):        # `code_block_marker` on an extra line
+            del(lines[-2])
+            # delete preceding line if it is blank
+            if len(lines) >= 2 and not lines[-2].lstrip():
+                del(lines[-2])
+        elif match.group(1).rstrip() < match.group(1):
+            # '::' follows whitespace
+            lines[-2] = match.group(1).rstrip() + match.group(3)
+        else:                         # '::' follows text
+            lines[-2] = match.group(1).rstrip() + ':' + match.group(3)
+
+# Filters
+# =======
+#
+# Filters allow pre- and post-processing of the data to bring it in a format
+# suitable for the "normal" text<->code conversion. An example is conversion
+# of `C` ``/*`` ``*/`` comments into C++ ``//`` comments (and back).
+# Another example is the conversion of `C` ``/*`` ``*/`` comments into C++
+# ``//`` comments (and back).
+#
+# Filters are generator functions that return an iterator acting on a
+# `data` iterable and yielding processed `data` lines.
+#
+# identity_filter
+# ---------------
+#
+# The most basic filter is the identity filter, that returns its argument as
+# iterator::
+
+def identity_filter(data):
+    """Return data iterator without any processing"""
+    return iter(data)
+
+# expandtabs_filter
+# -----------------
+#
+# Expand hard-tabs in every line of `data` (cf. `str.expandtabs`).
+#
+# This filter is applied to the input data by `TextCodeConverter.convert`_ as
+# hard tabs can lead to errors when the indentation is changed. ::
+
+def expandtabs_filter(data):
+    """Yield data tokens with hard-tabs expanded"""
+    for line in data:
+        yield line.expandtabs()
+
+
+# collect_blocks
+# --------------
+#
+# A filter to aggregate "paragraphs" (blocks separated by blank
+# lines). Yields lists of lines::
+
+def collect_blocks(lines):
+    """collect lines in a list
+
+    yield list for each paragraph, i.e. block of lines separated by a
+    blank line (whitespace only).
+
+    Trailing blank lines are collected as well.
+    """
+    blank_line_reached = False
+    block = []
+    for line in lines:
+        if blank_line_reached and line.rstrip():
+            yield block
+            blank_line_reached = False
+            block = [line]
+            continue
+        if not line.rstrip():
+            blank_line_reached = True
+        block.append(line)
+    yield block
+
+
+
+# dumb_c_preprocessor
+# -------------------
+#
+# This is a basic filter to convert `C` to `C++` comments. Works line-wise and
+# only converts lines that
+#
+# * start with "/\* " and end with " \*/" (followed by whitespace only)
+#
+# A more sophisticated version would also
+#
+# * convert multi-line comments
+#
+#   + Keep indentation or strip 3 leading spaces?
+#
+# * account for nested comments
+#
+# * only convert comments that are separated from code by a blank line
+#
+# ::
+
+def dumb_c_preprocessor(data):
+    """change `C` ``/* `` `` */`` comments into C++ ``// `` comments"""
+    comment_string = defaults.comment_strings["c++"]
+    boc_string = "/* "
+    eoc_string = " */"
+    for line in data:
+        if (line.startswith(boc_string)
+            and line.rstrip().endswith(eoc_string)
+           ):
+            line = line.replace(boc_string, comment_string, 1)
+            line = "".join(line.rsplit(eoc_string, 1))
+        yield line
+
+# Unfortunately, the `replace` method of strings does not support negative
+# numbers for the `count` argument:
+#
+#   >>> "foo */ baz */ bar".replace(" */", "", -1) == "foo */ baz bar"
+#   False
+#
+# However, there is the `rsplit` method, that can be used together with `join`:
+#
+#   >>> "".join("foo */ baz */ bar".rsplit(" */", 1)) == "foo */ baz bar"
+#   True
+#
+# dumb_c_postprocessor
+# --------------------
+#
+# Undo the preparations by the dumb_c_preprocessor and re-insert valid comment
+# delimiters ::
+
+def dumb_c_postprocessor(data):
+    """change C++ ``// `` comments into `C` ``/* `` `` */`` comments"""
+    comment_string = defaults.comment_strings["c++"]
+    boc_string = "/* "
+    eoc_string = " */"
+    for line in data:
+        if line.rstrip() == comment_string.rstrip():
+            line = line.replace(comment_string, "", 1)
+        elif line.startswith(comment_string):
+            line = line.replace(comment_string, boc_string, 1)
+            line = line.rstrip() + eoc_string + "\n"
+        yield line
+
+
+# register filters
+# ----------------
+#
+# ::
+
+defaults.preprocessors['c2text'] = dumb_c_preprocessor
+defaults.preprocessors['css2text'] = dumb_c_preprocessor
+defaults.postprocessors['text2c'] = dumb_c_postprocessor
+defaults.postprocessors['text2css'] = dumb_c_postprocessor
+
+
+# Command line use
+# ================
+#
+# Using this script from the command line will convert a file according to its
+# extension. This default can be overridden by a couple of options.
+#
+# Dual source handling
+# --------------------
+#
+# How to determine which source is up-to-date?
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# - set modification date of `outfile` to the one of `infile`
+#
+#   Points out that the source files are 'synchronised'.
+#
+#   * Are there problems to expect from "backdating" a file? Which?
+#
+#     Looking at http://www.unix.com/showthread.php?t=20526, it seems
+#     perfectly legal to set `mtime` (while leaving `ctime`) as `mtime` is a
+#     description of the "actuality" of the data in the file.
+#
+#   * Should this become a default or an option?
+#
+# - alternatively move input file to a backup copy (with option: `--replace`)
+#
+# - check modification date before overwriting
+#   (with option: `--overwrite=update`)
+#
+# - check modification date before editing (implemented as `Jed editor`_
+#   function `pylit_check()` in `pylit.sl`_)
+#
+# .. _Jed editor: http://www.jedsoft.org/jed/
+# .. _pylit.sl: http://jedmodes.sourceforge.net/mode/pylit/
+#
+# Recognised Filename Extensions
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Instead of defining a new extension for "pylit" literate programs,
+# by default ``.txt`` will be appended for the text source and stripped by
+# the conversion to the code source. I.e. for a Python program foo:
+#
+# * the code source is called ``foo.py``
+# * the text source is called ``foo.py.txt``
+# * the html rendering is called ``foo.py.html``
+#
+#
+# OptionValues
+# ------------
+#
+# The following class adds `as_dict`_, `complete`_ and `__getattr__`_
+# methods to `optparse.Values`::
+
+class OptionValues(optparse.Values):
+
+# .. _OptionValues.as_dict:
+#
+# as_dict
+# ~~~~~~~
+#
+# For use as keyword arguments, it is handy to have the options in a
+# dictionary. `as_dict` returns a copy of the instances object dictionary::
+
+    def as_dict(self):
+        """Return options as dictionary object"""
+        return self.__dict__.copy()
+
+# .. _OptionValues.complete:
+#
+# complete
+# ~~~~~~~~
+#
+# ::
+
+    def complete(self, **keyw):
+        """
+        Complete the option values with keyword arguments.
+
+        Do not overwrite existing values. Only use arguments that do not
+        have a corresponding attribute in `self`,
+        """
+        for key in keyw:
+            if key not in self.__dict__:
+                setattr(self, key, keyw[key])
+
+# .. _OptionValues.__getattr__:
+#
+# __getattr__
+# ~~~~~~~~~~~
+#
+# To replace calls using ``options.ensure_value("OPTION", None)`` with the
+# more concise ``options.OPTION``, we define `__getattr__` [#]_ ::
+
+    def __getattr__(self, name):
+        """Return default value for non existing options"""
+        return None
+
+
+# .. [#] The special method `__getattr__` is only called when an attribute
+#        look-up has not found the attribute in the usual places (i.e. it is
+#        not an instance attribute nor is it found in the class tree for
+#        self).
+#
+#
+# PylitOptions
+# ------------
+#
+# The `PylitOptions` class comprises an option parser and methods for parsing
+# and completion of command line options::
+
+class PylitOptions(object):
+    """Storage and handling of command line options for pylit"""
+
+# Instantiation
+# ~~~~~~~~~~~~~
+#
+# ::
+
+    def __init__(self):
+        """Set up an `OptionParser` instance for pylit command line options
+
+        """
+        p = optparse.OptionParser(usage=main.__doc__, version=_version)
+
+        # Conversion settings
+
+        p.add_option("-c", "--code2txt", dest="txt2code", action="store_false",
+                     help="convert code source to text source")
+        p.add_option("-t", "--txt2code", action="store_true",
+                     help="convert text source to code source")
+        p.add_option("--language",
+                     choices = list(defaults.languages.values()),
+                     help="use LANGUAGE native comment style")
+        p.add_option("--comment-string", dest="comment_string",
+                     help="documentation block marker in code source "
+                     "(including trailing whitespace, "
+                     "default: language dependent)")
+        p.add_option("-m", "--code-block-marker", dest="code_block_marker",
+                     help="syntax token starting a code block. (default '::')")
+        p.add_option("--codeindent", type="int",
+                     help="Number of spaces to indent code blocks with "
+                     "text2code (default %d)" % defaults.codeindent)
+
+        # Output file handling
+
+        p.add_option("--overwrite", action="store",
+                     choices = ["yes", "update", "no"],
+                     help="overwrite output file (default 'update')")
+        p.add_option("--replace", action="store_true",
+                     help="move infile to a backup copy (appending '~')")
+        p.add_option("-s", "--strip", action="store_true",
+                     help='"export" by stripping documentation or code')
+
+        # Special actions
+
+        p.add_option("-d", "--diff", action="store_true",
+                     help="test for differences to existing file")
+        p.add_option("--doctest", action="store_true",
+                     help="run doctest.testfile() on the text version")
+        p.add_option("-e", "--execute", action="store_true",
+                     help="execute code (Python only)")
+
+        self.parser = p
+
+# .. _PylitOptions.parse_args:
+#
+# parse_args
+# ~~~~~~~~~~
+#
+# The `parse_args` method calls the `optparse.OptionParser` on command
+# line or provided args and returns the result as `PylitOptions.Values`
+# instance. Defaults can be provided as keyword arguments::
+
+    def parse_args(self, args=sys.argv[1:], **keyw):
+        """parse command line arguments using `optparse.OptionParser`
+
+           parse_args(args, **keyw) -> OptionValues instance
+
+            args --  list of command line arguments.
+            keyw --  keyword arguments or dictionary of option defaults
+        """
+        # parse arguments
+        (values, args) = self.parser.parse_args(args, OptionValues(keyw))
+        # Convert FILE and OUTFILE positional args to option values
+        # (other positional arguments are ignored)
+        try:
+            values.infile = args[0]
+            values.outfile = args[1]
+        except IndexError:
+            pass
+
+        return values
+
+# .. _PylitOptions.complete_values:
+#
+# complete_values
+# ~~~~~~~~~~~~~~~
+#
+# Complete an OptionValues instance `values`.  Use module-level defaults and
+# context information to set missing option values to sensible defaults (if
+# possible) ::
+
+    def complete_values(self, values):
+        """complete option values with module and context sensible defaults
+
+        x.complete_values(values) -> values
+        values -- OptionValues instance
+        """
+
+# Complete with module-level defaults_::
+
+        values.complete(**defaults.__dict__)
+
+# Ensure infile is a string::
+
+        values.ensure_value("infile", "")
+
+# Guess conversion direction from `infile` filename::
+
+        if values.txt2code is None:
+            in_extension = os.path.splitext(values.infile)[1]
+            if in_extension in values.text_extensions:
+                values.txt2code = True
+            elif in_extension in list(values.languages.keys()):
+                values.txt2code = False
+
+# Auto-determine the output file name::
+
+        values.ensure_value("outfile", self._get_outfile_name(values))
+
+# Second try: Guess conversion direction from outfile filename::
+
+        if values.txt2code is None:
+            out_extension = os.path.splitext(values.outfile)[1]
+            values.txt2code = not (out_extension in values.text_extensions)
+
+# Set the language of the code::
+
+        if values.txt2code is True:
+            code_extension = os.path.splitext(values.outfile)[1]
+        elif values.txt2code is False:
+            code_extension = os.path.splitext(values.infile)[1]
+        values.ensure_value("language", values.languages[code_extension])
+
+        return values
+
+# _get_outfile_name
+# ~~~~~~~~~~~~~~~~~
+#
+# Construct a matching filename for the output file. The output filename is
+# constructed from `infile` by the following rules:
+#
+# * '-' (stdin) results in '-' (stdout)
+# * strip the `text_extension`_ (txt2code) or
+# * add the `text_extension`_ (code2txt)
+# * fallback: if no guess can be made, add ".out"
+#
+#   .. TODO: use values.outfile_extension if it exists?
+#
+# ::
+
+    def _get_outfile_name(self, values):
+        """Return a matching output filename for `infile`
+        """
+        # if input is stdin, default output is stdout
+        if values.infile == '-':
+            return '-'
+
+        # Derive from `infile` name: strip or add text extension
+        (base, ext) = os.path.splitext(values.infile)
+        if ext in values.text_extensions:
+            return base # strip
+        if ext in list(values.languages.keys()) or values.txt2code == False:
+            return values.infile + values.text_extensions[0] # add
+        # give up
+        return values.infile + ".out"
+
+# .. _PylitOptions.__call__:
+#
+# __call__
+# ~~~~~~~~
+#
+# The special `__call__` method allows to use PylitOptions instances as
+# *callables*: Calling an instance parses the argument list to extract option
+# values and completes them based on "context-sensitive defaults".  Keyword
+# arguments are passed to `PylitOptions.parse_args`_ as default values. ::
+
+    def __call__(self, args=sys.argv[1:], **keyw):
+        """parse and complete command line args return option values
+        """
+        values = self.parse_args(args, **keyw)
+        return self.complete_values(values)
+
+
+
+# Helper functions
+# ----------------
+#
+# open_streams
+# ~~~~~~~~~~~~
+#
+# Return file objects for in- and output. If the input path is missing,
+# write usage and abort. (An alternative would be to use stdin as default.
+# However,  this leaves the uninitiated user with a non-responding application
+# if (s)he just tries the script without any arguments) ::
+
+def open_streams(infile = '-', outfile = '-', overwrite='update', **keyw):
+    """Open and return the input and output stream
+
+    open_streams(infile, outfile) -> (in_stream, out_stream)
+
+    in_stream   --  open(infile) or sys.stdin
+    out_stream  --  open(outfile) or sys.stdout
+    overwrite   --  'yes': overwrite eventually existing `outfile`,
+                    'update': fail if the `outfile` is newer than `infile`,
+                    'no': fail if `outfile` exists.
+
+                    Irrelevant if `outfile` == '-'.
+    """
+    if not infile:
+        strerror = "Missing input file name ('-' for stdin; -h for help)"
+        raise IOError(2, strerror, infile)
+    if infile == '-':
+        in_stream = sys.stdin
+    else:
+        in_stream = open(infile, 'r')
+    if outfile == '-':
+        out_stream = sys.stdout
+    elif overwrite == 'no' and os.path.exists(outfile):
+        raise IOError(1, "Output file exists!", outfile)
+    elif overwrite == 'update' and is_newer(outfile, infile):
+        raise IOError(1, "Output file is newer than input file!", outfile)
+    else:
+        out_stream = open(outfile, 'w')
+    return (in_stream, out_stream)
+
+# is_newer
+# ~~~~~~~~
+#
+# ::
+
+def is_newer(path1, path2):
+    """Check if `path1` is newer than `path2` (using mtime)
+
+    Compare modification time of files at path1 and path2.
+
+    Non-existing files are considered oldest: Return False if path1 does not
+    exist and True if path2 does not exist.
+
+    Return None for equal modification time. (This evaluates to False in a
+    Boolean context but allows a test for equality.)
+
+    """
+    try:
+        mtime1 = os.path.getmtime(path1)
+    except OSError:
+        mtime1 = -1
+    try:
+        mtime2 = os.path.getmtime(path2)
+    except OSError:
+        mtime2 = -1
+    # print "mtime1", mtime1, path1, "\n", "mtime2", mtime2, path2
+
+    if mtime1 == mtime2:
+        return None
+    return mtime1 > mtime2
+
+
+# get_converter
+# ~~~~~~~~~~~~~
+#
+# Get an instance of the converter state machine::
+
+def get_converter(data, txt2code=True, **keyw):
+    if txt2code:
+        return Text2Code(data, **keyw)
+    else:
+        return Code2Text(data, **keyw)
+
+
+# Use cases
+# ---------
+#
+# run_doctest
+# ~~~~~~~~~~~
+# ::
+
+def run_doctest(infile="-", txt2code=True,
+                globs={}, verbose=False, optionflags=0, **keyw):
+    """run doctest on the text source
+    """
+
+# Allow imports from the current working dir by prepending an empty string to
+# sys.path (see doc of sys.path())::
+
+    sys.path.insert(0, '')
+
+# Import classes from the doctest module::
+
+    from doctest import DocTestParser, DocTestRunner
+
+# Read in source. Make sure it is in text format, as tests in comments are not
+# found by doctest::
+
+    (data, out_stream) = open_streams(infile, "-")
+    if txt2code is False:
+        keyw.update({'add_missing_marker': False})
+        converter = Code2Text(data, **keyw)
+        docstring = str(converter)
+    else:
+        docstring = data.read()
+
+# decode doc string if there is a "magic comment" in the first or second line
+# (http://docs.python.org/reference/lexical_analysis.html#encoding-declarations)
+# ::
+
+    firstlines = ' '.join(docstring.splitlines()[:2])
+    match = re.search('coding[=:]\s*([-\w.]+)', firstlines)
+    if match:
+        docencoding = match.group(1)
+        docstring = docstring.decode(docencoding)
+
+# Use the doctest Advanced API to run all doctests in the source text::
+
+    test = DocTestParser().get_doctest(docstring, globs, name="",
+                                       filename=infile, lineno=0)
+    runner = DocTestRunner(verbose, optionflags)
+    runner.run(test)
+    runner.summarize
+    # give feedback also if no failures occurred
+    if not runner.failures:
+        print("%d failures in %d tests"%(runner.failures, runner.tries))
+    return runner.failures, runner.tries
+
+
+# diff
+# ~~~~
+#
+# ::
+
+def diff(infile='-', outfile='-', txt2code=True, **keyw):
+    """Report differences between converted infile and existing outfile
+
+    If outfile does not exist or is '-', do a round-trip conversion and
+    report differences.
+    """
+
+    import difflib
+
+    instream = open(infile)
+    # for diffing, we need a copy of the data as list::
+    data = instream.readlines()
+    # convert
+    converter = get_converter(data, txt2code, **keyw)
+    new = converter()
+
+    if outfile != '-' and os.path.exists(outfile):
+        outstream = open(outfile)
+        old = outstream.readlines()
+        oldname = outfile
+        newname = "<conversion of %s>"%infile
+    else:
+        old = data
+        oldname = infile
+        # back-convert the output data
+        converter = get_converter(new, not txt2code)
+        new = converter()
+        newname = "<round-conversion of %s>"%infile
+
+    # find and print the differences
+    is_different = False
+    # print type(old), old
+    # print type(new), new
+    delta = difflib.unified_diff(old, new,
+    # delta = difflib.unified_diff(["heute\n", "schon\n"], ["heute\n", "noch\n"],
+                                      fromfile=oldname, tofile=newname)
+    for line in delta:
+        is_different = True
+        print(line, end=' ')
+    if not is_different:
+        print(oldname)
+        print(newname)
+        print("no differences found")
+    return is_different
+
+
+# execute
+# ~~~~~~~
+#
+# Works only for python code.
+#
+# Does not work with `eval`, as code is not just one expression. ::
+
+def execute(infile="-", txt2code=True, **keyw):
+    """Execute the input file. Convert first, if it is a text source.
+    """
+
+    data = open(infile)
+    if txt2code:
+        data = str(Text2Code(data, **keyw))
+    # print "executing " + options.infile
+    exec(data)
+
+
+# main
+# ----
+#
+# If this script is called from the command line, the `main` function will
+# convert the input (file or stdin) between text and code formats.
+#
+# Option default values for the conversion can be given as keyword arguments
+# to `main`_.  The option defaults will be updated by command line options and
+# extended with "intelligent guesses" by `PylitOptions`_ and passed on to
+# helper functions and the converter instantiation.
+#
+# This allows easy customisation for programmatic use -- just call `main`
+# with the appropriate keyword options, e.g. ``pylit.main(comment_string="## ")``
+#
+# ::
+
+def main(args=sys.argv[1:], **defaults):
+    """%prog [options] INFILE [OUTFILE]
+
+    Convert between (reStructured) text source with embedded code,
+    and code source with embedded documentation (comment blocks)
+
+    The special filename '-' stands for standard in and output.
+    """
+
+# Parse and complete the options::
+
+    options = PylitOptions()(args, **defaults)
+    # print "infile", repr(options.infile)
+
+# Special actions with early return::
+
+    if options.doctest:
+        return run_doctest(**options.as_dict())
+
+    if options.diff:
+        return diff(**options.as_dict())
+
+    if options.execute:
+        return execute(**options.as_dict())
+
+# Open in- and output streams::
+
+    try:
+        (data, out_stream) = open_streams(**options.as_dict())
+    except IOError as ex:
+        print("IOError: %s %s" % (ex.filename, ex.strerror))
+        sys.exit(ex.errno)
+
+# Get a converter instance::
+
+    converter = get_converter(data, **options.as_dict())
+
+# Convert and write to out_stream::
+
+    out_stream.write(str(converter))
+
+    if out_stream is not sys.stdout:
+        print("extract written to", out_stream.name)
+        out_stream.close()
+
+# If input and output are from files, set the modification time (`mtime`) of
+# the output file to the one of the input file to indicate that the contained
+# information is equal. [#]_ ::
+
+        try:
+            os.utime(options.outfile, (os.path.getatime(options.outfile),
+                                       os.path.getmtime(options.infile))
+                    )
+        except OSError:
+            pass
+
+    ## print "mtime", os.path.getmtime(options.infile),  options.infile
+    ## print "mtime", os.path.getmtime(options.outfile), options.outfile
+
+
+# .. [#] Make sure the corresponding file object (here `out_stream`) is
+#        closed, as otherwise the change will be overwritten when `close` is
+#        called afterwards (either explicitly or at program exit).
+#
+#
+# Rename the infile to a backup copy if ``--replace`` is set::
+
+    if options.replace:
+        os.rename(options.infile, options.infile + "~")
+
+
+# Run main, if called from the command line::
+
+if __name__ == '__main__':
+    main()
+
+
+# Open questions
+# ==============
+#
+# Open questions and ideas for further development
+#
+# Clean code
+# ----------
+#
+# * can we gain from using "shutils" over "os.path" and "os"?
+# * use pylint or pyChecker to enforce a consistent style?
+#
+# Options
+# -------
+#
+# * Use templates for the "intelligent guesses" (with Python syntax for string
+#   replacement with dicts: ``"hello %(what)s" % {'what': 'world'}``)
+#
+# * Is it sensible to offer the `header_string` option also as command line
+#   option?
+#
+# treatment of blank lines
+# ------------------------
+#
+# Alternatives: Keep blank lines blank
+#
+# - "never" (current setting) -> "visually merges" all documentation
+#    if there is no interjacent code
+#
+# - "always" -> disrupts documentation blocks,
+#
+# - "if empty" (no whitespace). Comment if there is whitespace.
+#
+#   This would allow non-obstructing markup but unfortunately this is (in
+#   most editors) also non-visible markup.
+#
+# + "if double" (if there is more than one consecutive blank line)
+#
+#   With this handling, the "visual gap" remains in both, text and code
+#   source.
+#
+#
+# Parsing Problems
+# ----------------
+#
+# * Ignore "matching comments" in literal strings?
+#
+#   Too complicated: Would need a specific detection algorithm for every
+#   language that supports multi-line literal strings (C++, PHP, Python)
+#
+# * Warn if a comment in code will become documentation after round-trip?
+#
+#
+# docstrings in code blocks
+# -------------------------
+#
+# * How to handle docstrings in code blocks? (it would be nice to convert them
+#   to rst-text if ``__docformat__ == restructuredtext``)
+#
+# TODO: Ask at Docutils users|developers
+#
+# Plug-ins
+# --------
+#
+# Specify a path for user additions and plug-ins. This would require to
+# convert Pylit from a pure module to a package...
+#
+#   6.4.3 Packages in Multiple Directories
+#
+#   Packages support one more special attribute, __path__. This is initialized
+#   to be a list containing the name of the directory holding the package's
+#   __init__.py before the code in that file is executed. This
+#   variable can be modified; doing so affects future searches for modules and
+#   subpackages contained in the package.
+#
+#   While this feature is not often needed, it can be used to extend the set
+#   of modules found in a package.
+#
+#
+# .. References
+#
+# .. _Docutils: http://docutils.sourceforge.net/
+# .. _Sphinx: http://sphinx.pocoo.org
+# .. _Pygments: http://pygments.org/
+# .. _code-block directive:
+#     http://docutils.sourceforge.net/sandbox/code-block-directive/
+# .. _literal block:
+# .. _literal blocks:
+#     http://docutils.sf.net/docs/ref/rst/restructuredtext.html#literal-blocks
+# .. _indented literal block:
+# .. _indented literal blocks:
+#     http://docutils.sf.net/docs/ref/rst/restructuredtext.html#indented-literal-blocks
+# .. _quoted literal block:
+# .. _quoted literal blocks:
+#     http://docutils.sf.net/docs/ref/rst/restructuredtext.html#quoted-literal-blocks
+# .. _parsed-literal blocks:
+#     http://docutils.sf.net/docs/ref/rst/directives.html#parsed-literal-block
+# .. _doctest block:
+# .. _doctest blocks:
+#     http://docutils.sf.net/docs/ref/rst/restructuredtext.html#doctest-blocks
+#
+# .. _feature request and patch by jrioux:
+#     http://developer.berlios.de/feature/?func=detailfeature&feature_id=4890&group_id=7974
diff --git a/doc/rst_prolog b/doc/rst_prolog
new file mode 100644
index 0000000..1b799ca
--- /dev/null
+++ b/doc/rst_prolog
@@ -0,0 +1,15 @@
+.. |Ang| unicode:: U+212B
+.. |pm| unicode:: U+00B1
+.. |cdot| unicode:: U+00B7
+.. |deg| unicode:: U+00B0
+.. |cm^2| replace:: cm\ :sup:`2`
+.. |cm^3| replace:: cm\ :sup:`3`
+.. |1/cm| replace:: cm\ :sup:`-1`
+.. |Ang^2| replace:: |Ang|\ :sup:`2`
+.. |Ang^3| replace:: |Ang|\ :sup:`3`
+.. |1/Ang| replace:: |Ang|\ :sup:`-1`
+.. |1/Ang^2| replace:: |Ang|\ :sup:`-2`
+.. |1e-6/Ang^2| replace:: 10\ :sup:`-6`\ |Ang|\ :sup:`-2`
+.. |g/cm^3| replace:: g\ |cdot|\ cm\ :sup:`-3`
+.. |fm^2| replace:: fm\ :sup:`2`
+.. |Js| replace:: J\ |cdot|\ s
diff --git a/doc/sitedoc.py b/doc/sitedoc.py
new file mode 100644
index 0000000..caa012a
--- /dev/null
+++ b/doc/sitedoc.py
@@ -0,0 +1,64 @@
+import os
+
+import numpy as np
+
+import bumps.fitters as fit
+from bumps.cli import load_model
+
+SEED = 1
+
+def example_dir():
+    """
+    Return the directory containing the rst file source for the current plot.
+    """
+    # Search through the call stack for the rstdir variable.
+    #
+    # This is an ugly hack which relies on internal structures of the python
+    # interpreter and particular variables used in internal functions from
+    # the matplotlib plot directive, and so is likely to break.
+    #
+    # If this code breaks, you could probably get away with searching up
+    # the stack for the variable 'state_machine', which is a sphinx defined
+    # variable, and use:
+    #
+    #  rstdir, _ = os.path.split(state_machine.document.attributes['source'])
+    #
+    # Even better would be to modify the plot directive to make rstdir
+    # available to the inline plot directive, e.g., by adding it to the
+    # locals context.  It is already implicitly available to the plot
+    # file context because there is an explicit chdir to the directory
+    # containing the plot.
+    import inspect
+    frame = inspect.currentframe()
+    RSTDIR='rst_dir'
+    while frame and RSTDIR not in frame.f_locals:
+        frame = frame.f_back
+        #print "checking",frame.f_code.co_name
+    if not frame:
+        raise RuntimeError('plot directive changed: %r no longer defined'%RSTDIR)
+    return frame.f_locals[RSTDIR] if frame else ""
+
+def plot_model(filename):
+    from matplotlib import pyplot as plt
+    #import sys; print >>sys.stderr, "in plot with",filename, example_dir()
+    np.random.seed(SEED)
+    p = load_model(os.path.join(example_dir(), filename))
+    p.plot()
+    plt.show()
+
+def fit_model(filename):
+    from matplotlib import pyplot as plt
+    #import sys; print >>sys.stderr, "in plot with",filename, example_dir()
+    np.random.seed(SEED)
+    p =load_model(os.path.join(example_dir(),filename))
+    #x.fx = fit.RLFit(p).solve(steps=1000, burn=99)
+    #x,fx = fit.DEFit(p).solve(steps=200, pop=10)
+    #x,fx = fit.PTFit(p).solve(steps=100,burn=400)
+    #x.fx = fit.BFGSFit(p).solve(steps=200)
+    x,fx = fit.SimplexFit(p).solve(steps=1000)
+    chisq = p(x)
+    print("chisq=%g"%chisq)
+    if chisq>2:
+        raise RuntimeError("Fit did not converge")
+    p.plot()
+    plt.show()
diff --git a/extra/amqp_map/USECASE.txt b/extra/amqp_map/USECASE.txt
new file mode 100644
index 0000000..29e4203
--- /dev/null
+++ b/extra/amqp_map/USECASE.txt
@@ -0,0 +1,60 @@
+SERVICE/WORKER ARCHITECTURE
+* pause/resume job
+* cancel job
+* cancel map
+* slow node
+* one data point more expensive than the others
+* infinite loop for some values
+* exceptions for some values
+* worker not stateless: one calculation interferes with the next
+* worker memory leak
+* worker requires initial state
+* worker state changes between map calls
+
+* client requests logging on service, any worker, or all workers
+* logging based on condition
+* rerun value with logging when worker fails
+
+* parallel random number generator, with seed control
+
+* worker which needs workers
+
+CLUSTER MANAGEMENT
+* priority jobs
+* start/stop workers when job starts
+* add client to processing pool, with preference for client jobs
+* add new node
+* machine reboots: exchange, service, worker, monitor
+* unreliable processor/memory on some nodes
+* workers which use local disk but don't clean up after themselves
+* service monitoring
+* identify all processes for a given user
+* identify idle machines
+
+JOB MANAGER
+* job manager machine reboots
+* job manager upgrades
+* progress thumbnail
+* client detach/attach
+* retrieve results
+* notify user when job starts (message queue or email)
+
+DEPLOYMENT
+* egg basket and trusted users
+* different users having conflicting dependencies
+* worker implemented in java, IDL, Matlab, R, ...
+* client implemented java, IDL, Matlab, R, ...
+* code movement while developing worker
+
+DATA MANAGEMENT
+* big file, independent disks
+* storing run results
+
+SCALABILITY
+* workstation, dedicated cluster, batch queue, NoW, EC2, teragrid
+
+SECURITY (SNS, Teragrid, shared clusters)
+* service/workers run as user
+* exchange runs as user
+* job manager behind firewall
+* cluster behind firewall
diff --git a/extra/amqp_map/__init__.py b/extra/amqp_map/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/extra/amqp_map/config.py b/extra/amqp_map/config.py
new file mode 100644
index 0000000..020ca11
--- /dev/null
+++ b/extra/amqp_map/config.py
@@ -0,0 +1,28 @@
+"""
+Exchange information.
+
+
+This information should be filled in when connecting to a service.
+
+Some of this should be filled from
+Note that the host used for job management and status updates is
+going to be different from that used for mapping operations within
+the job.
+
+*CLIENT_HOST* | "user:password at host:port/virtual_host"
+    Host for messages to and from the job manager and computation monitor.
+    The user supplies this when they make a connection.
+*SERVICE_HOST* | "user:password at host:port/virtual_host"
+    Host for messages within the computation.  The administrator supplies
+    this when the configure the compute cluster.
+*EXCHANGE* | string
+    Exchange name to use for the system.
+*MAX_QUEUE* | int
+    The maximum number of messages any process should have outstanding.  This
+    should be somewhat greater than the number of computers in the cluster,
+    but not so large that the computation saturates the exchange.
+"""
+CLIENT_HOST = "guest:guest at sparkle.ncnr.nist.gov:5672/"
+SERVICE_HOST = "guest:guest at sparkle.ncnr.nist.gov:5672/"
+EXCHANGE = "park"
+MAX_QUEUE = 1000
diff --git a/extra/amqp_map/core.py b/extra/amqp_map/core.py
new file mode 100644
index 0000000..6484036
--- /dev/null
+++ b/extra/amqp_map/core.py
@@ -0,0 +1,207 @@
+
+# Mechanisms for throttling the mapper when the function is expensive:
+#
+# 1) Do nothing.
+#    PRO: no computation overhead
+#    PRO: AMQP will use flow control when it runs low on memory
+#    PRO: most maps are small
+#    CON: may use excess memory on exchange
+# 2) Use locks.
+#    PRO: threading.Condition() makes this easy
+#    PRO: good lock implementation means master consumes no resources
+#    CON: may not handle keyboard interrupt correctly on some platforms
+# 3) Use sleep.
+#    PRO: simple implementation will work across platforms
+#    CON: master stays in memory because it is restarted every 10 min.
+#
+# The current implementation uses locks to throttle.
+
+## USE_LOCKS_TO_THROTTLE
+import threading
+
+## USE_SLEEP_TO_THROTTLE
+#import time
+#import os
+
+#from dill import loads, dumps
+from pickle import loads, dumps
+import sys
+
+from amqplib import client_0_8 as amqp #@UnresolvedImport if amqp isn't available
+
+from . import config
+from .url import URL
+from .threaded import daemon
+
+def connect(url, insist=False):
+    url = URL(url, host="localhost", port=5672,
+              user="guest", password="guest", path="/")
+    host = ":".join( (url.host, str(url.port)) )
+    userid,password = url.user,url.password
+    virtual_host = "/" + url.path
+    server = amqp.Connection(host=host, userid=userid, password=password,
+                             virtual_host=virtual_host, insist=insist)
+    return server
+
+def start_worker(server, mapid, work):
+    """
+    Client side driver of the map work.
+
+    The model should already be loaded before calling this.
+    """
+    # Create the exchange and the worker queue
+    channel = server.channel()
+    exchange = "park.map"
+    map_queue = ".".join(("map",mapid))
+    channel.exchange_declare(exchange=exchange, type="direct",
+                             durable=False, auto_delete=True)
+    channel.queue_declare(queue=map_queue, durable=False,
+                          exclusive=False, auto_delete=True)
+
+    #me = os.getpid()
+    #os.system("echo '%s' > /home/pkienzle/map.%d"%('starting',me))
+
+    # Prefetch requires basic_ack, basic_qos and consume with ack
+    def _process_work(msg):
+        # Check for sentinel
+        if msg.reply_to == "":
+            channel.basic_cancel(consumer)
+        body = loads(msg.body)
+        # Acknowledge delivery of message
+        #print "processing...",body['index'],body['value']; sys.stdout.flush()
+        #os.system("echo 'processing %s' >> /home/pkienzle/map.%d"%(body['value'],me))
+        try:
+            result = work(body['value'])
+        except Exception as _exc:
+            #os.system("echo 'error %s' >> /home/pkienzle/map.%d"%(_exc,me))
+            result = None
+        #os.system("echo 'returning %s' >> /home/pkienzle/map.%d"%(result,me))
+        #print "done"
+        channel.basic_ack(msg.delivery_tag)
+        reply = amqp.Message(dumps(dict(index=body['index'],result=result)))
+        channel.basic_publish(reply, exchange=exchange,
+                              routing_key=msg.reply_to)
+    #channel.basic_qos(prefetch_size=0, prefetch_count=1, a_global=False)
+    consumer = channel.basic_consume(queue=map_queue, callback=_process_work,
+                                     no_ack=False)
+    while True:
+        channel.wait()
+
+class Mapper(object):
+    def __init__(self, server, mapid):
+        # Create the exchange and the worker and reply queues
+        channel = server.channel()
+        exchange = "park.map"
+        channel.exchange_declare(exchange=exchange, type="direct",
+                                 durable=False, auto_delete=True)
+
+        map_channel = channel
+        map_queue = ".".join(("map",mapid))
+        map_channel.queue_declare(queue=map_queue, durable=False,
+                                  exclusive=False, auto_delete=True)
+        map_channel.queue_bind(queue=map_queue, exchange="park.map",
+                               routing_key = map_queue)
+
+        reply_channel = server.channel()
+        #reply_queue = ".".join(("reply",mapid)) # Fixed Queue name
+        reply_queue = "" # Let amqp create a temporary queue for us
+        reply_queue,_,_ = reply_channel.queue_declare(queue=reply_queue,
+                                                      durable=False,
+                                                      exclusive=True,
+                                                      auto_delete=True)
+        reply_channel.queue_bind(queue=reply_queue, exchange="park.map",
+                                 routing_key = reply_queue)
+        reply_channel.basic_consume(queue=reply_queue,
+                                    callback=self._process_result,
+                                    no_ack=True)
+        self.exchange = exchange
+        self.map_queue = map_queue
+        self.map_channel = map_channel
+        self.reply_queue = reply_queue
+        self.reply_channel = reply_channel
+
+        ## USE_LOCKS_TO_THROTTLE
+        self._throttle = threading.Condition()
+
+    def close(self):
+        self.channel.close()
+    def _process_result(self, msg):
+        self._reply = loads(msg.body)
+        #print "received result",self._reply['index'],self._reply['result']
+    @daemon
+    def _send_map(self, items):
+        for i,v in enumerate(items):
+            self.num_queued = i
+            #print "queuing %d %s"%(i,v)
+
+            ## USE_LOCKS_TO_THROTTLE
+            if  self.num_queued - self.num_processed > config.MAX_QUEUE:
+                #print "sleeping at %d in %d out"%(i,self.num_processed)
+                self._throttle.acquire()
+                self._throttle.wait()
+                self._throttle.release()
+                #print "waking at %d in %d out"%(i,self.num_processed)
+
+            # USE_SLEEP_TO_THROTTLE
+            #sleep_time = 0.2
+            #while i - self.num_processed > config.MAX_QUEUE:
+            #    #print "sleeping %g with in=%d out=%d"%(sleep_time,self.num_queued,self.num_processed)
+            #    time.sleep(sleep_time)
+            #    sleep_time = min(2*sleep_time, 600)
+
+            body = dumps(dict(index=i,value=v))
+            msg = amqp.Message(body, reply_to=self.reply_queue, delivery_mode=1)
+            self.map_channel.basic_publish(msg, exchange=self.exchange,
+                                           routing_key=self.map_queue)
+
+    def cancel(self):
+        """
+        Stop a running map.
+        """
+        raise NotImplementedError()
+        # Need to clear the queued items and notify async that no more results.
+        # Messages in transit need to be ignored, which probably means tagging
+        # each map header with a call number so that previous calls don't
+        # get confused with current calls.
+        msg = amqp.Message("", reply_to="", delivery_mode=1)
+        self.map_channel.basic_publish(msg, exchange=self.exchange,
+                                       routing_key=self.map_queue)
+
+    def async(self, items):
+        # TODO: we should be able to flag completion somehow so that the
+        # whole list does not need to be formed.
+        items = list(items) # make it indexable
+        self.num_items = len(items)
+        # Queue items in separate thread so we can start receiving results
+        # before all items are even queued
+        self.num_processed = 0
+        publisher = self._send_map(items)
+        recvd = set()
+        while self.num_processed < self.num_items:
+            try: del self._reply
+            except: pass
+            self.reply_channel.wait()
+            try:
+                idx = self._reply['index']
+            except:
+                sys.stdout.flush()
+                raise RuntimeError("Reply not received")
+            if idx in recvd: continue
+            recvd.add(idx)
+            result = self._reply['result']
+            #print "received %d %g"%(idx,result)
+            self.num_processed += 1
+
+            ## USE_LOCKS_TO_THROTTLE
+            if self.num_queued - self.num_processed < config.MAX_QUEUE - 10:
+                # Ten at a time go through for slow processes
+                self._throttle.acquire()
+                self._throttle.notify()
+                self._throttle.release()
+
+            yield idx,result
+        publisher.join()
+    def __call__(self, items):
+        result = list(self.async(items))
+        result = list(sorted(result,lambda x,y: cmp(x[0],y[0])))
+        return zip(*result)[1]
diff --git a/extra/amqp_map/example/echo.py b/extra/amqp_map/example/echo.py
new file mode 100755
index 0000000..f62dd65
--- /dev/null
+++ b/extra/amqp_map/example/echo.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+import time
+
+from amqp_map.config import SERVICE_HOST
+from amqp_map.core import connect
+from amqp_map.rpc import RPC
+
+rpc = RPC(connect(SERVICE_HOST))
+for item in sys.argv[1:]:
+    print rpc.echo.echo(item)
diff --git a/extra/amqp_map/example/rpc_echo.py b/extra/amqp_map/example/rpc_echo.py
new file mode 100644
index 0000000..3ef484b
--- /dev/null
+++ b/extra/amqp_map/example/rpc_echo.py
@@ -0,0 +1,16 @@
+import time
+
+from amqp_map.core import connect
+from amqp_map.config import SERVICE_HOST
+from amqp_map.rpc import RPCMixin
+
+server = connect(SERVICE_HOST)
+
+class Echo(object,RPCMixin):
+    def __init__(self, server):
+        self.rpc_init(server, service="echo")
+    def echo(self, msg):
+        return msg
+
+service = Echo(server)
+service.rpc_serve()
diff --git a/extra/amqp_map/example/square.py b/extra/amqp_map/example/square.py
new file mode 100644
index 0000000..301649b
--- /dev/null
+++ b/extra/amqp_map/example/square.py
@@ -0,0 +1,26 @@
+import time
+
+import numpy as np
+
+from amqp_map.config import SERVICE_HOST
+from amqp_map.core import connect, Mapper
+
+server = connect(SERVICE_HOST)
+square = Mapper(server, "square")
+
+#print square(xrange(5,10))
+
+#for i,v in square.async(xrange(-20,-15)): print i,v
+
+t0 = time.time()
+n=10000
+print "start direct",n
+[ x*x for x in xrange(n)]
+print "direct time",1000*(time.time()-t0)/n,"ms/call"
+
+
+t0 = time.time()
+n=100
+print "start big",n
+square([x*np.ones(3) for x in xrange(n)])
+print "remote time",1000*(time.time()-t0)/n,"ms/call"
diff --git a/extra/amqp_map/example/square_worker.py b/extra/amqp_map/example/square_worker.py
new file mode 100644
index 0000000..1e147e9
--- /dev/null
+++ b/extra/amqp_map/example/square_worker.py
@@ -0,0 +1,17 @@
+
+from amqp_map.config import SERVICE_HOST
+from amqp_map.core import connect, start_worker
+
+#BUSY = 30000000
+#BUSY = 1000000
+BUSY = 0
+def square(x):
+    print "recv'd",x
+    #x = float(x)
+    for i in xrange(BUSY):
+        x = x+i
+    for i in xrange(BUSY):
+        x = x-i
+    return x*x
+server = connect(SERVICE_HOST)
+start_worker(server, "square", square)
diff --git a/extra/amqp_map/jsonrpc.txt b/extra/amqp_map/jsonrpc.txt
new file mode 100644
index 0000000..d1e4158
--- /dev/null
+++ b/extra/amqp_map/jsonrpc.txt
@@ -0,0 +1,197 @@
+http://groups.google.com/group/json-rpc/web/json-rpc-2-0
+
+JSON-RPC 2.0 Specification
+
+Date:   2010-03-26 (based on the 2009-05-24 version)
+Author: JSON-RPC Working Group <json-rpc(at)googlegroups.com>
+Table of Contents
+Overview
+Conventions
+Compatibility
+Request Object
+Notification
+Parameter Structures
+Response Object
+Error Object
+Batch
+Examples
+Extensions
+1 Overview
+
+JSON-RPC is a stateless, light-weight remote procedure call (RPC) protocol. Primarily this specification defines several data structures and the rules around their processing. It is transport agnostic in that the concepts can be used within the same process, over sockets, over http, or in many various message passing environments. It uses JSON (RFC 4627) as data format.
+It is designed to be simple!
+2 Conventions
+
+The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119.
+
+Since JSON-RPC utilizes JSON, it has the same type system (see http://www.json.org or RFC 4627). JSON can represent four primitive types (Strings, Numbers, Booleans, and Null) and two structured types (Objects and Arrays). The term "Primitive" in this specification references any of those four primitive JSON types. The term "Structured" references either of the structured JSON types. Whenever this document refers to any JSON type, the first letter is always capitalized: Object, Array, St [...]
+
+All member names exchanged between the Client and the Server that are considered for matching of any kind should be considered to be case-sensitive. The terms function, method, and procedure can be assumed to be interchangeable.
+
+The Client is defined as the origin of Request objects and the handler of Response objects. 
+The Server is defined as the origin of Response objects and the handler of Request objects. 
+
+One implementation of this specification could easily fill both of those roles, even at the same time, to other different clients or the same client. This specification does not address that layer of complexity.
+3 Compatibility
+
+JSON-RPC 2.0 Request objects and Response objects may not work with existing JSON-RPC 1.0 clients or servers. However, it is easy to distinguish between the two versions as 2.0 always has a member named "jsonrpc" with a String value of "2.0" whereas 1.0 does not. Most 2.0 implementations should consider trying to handle 1.0 objects, even if not the peer-to-peer and class hinting aspects of 1.0.
+4 Request object
+
+A rpc call is represented by sending a Request object to a Server. The Request object has the following members:
+jsonrpc
+A String specifying the version of the JSON-RPC protocol. MUST be exactly "2.0".
+method
+A String containing the name of the method to be invoked. Method names that begin with the word rpc followed by a period character (U+002E or ASCII 46) are reserved for rpc-internal methods and extensions and MUST NOT be used for anything else.
+params
+A Structured value that holds the parameter values to be used during the invocation of the method. This member MAY be omitted.
+id
+An identifier established by the Client that MUST contain a String, Number, or NULL value if included. If it is not included it is assumed to be a notification. The value SHOULD normally not be Null [1] and Numbers SHOULD NOT contain fractional parts [2]
+
+The Server MUST reply with the same value in the Response object if included. This member is used to correlate the context between the two objects.
+
+[1] The use of Null as a value for the id member in a Request object is discouraged, because this specification uses a value of Null for Responses with an unknown id. Also, because JSON-RPC 1.0 uses an id value of Null for Notifications this could cause confusion in handling.
+
+[2] Fractional parts may be problematic, since many decimal fractions cannot be represented exactly as binary fractions.
+4.1 Notification
+
+A Notification is a Request object without an "id" member. A Request object that is a Notification signifies the Client's lack of interest in the corresponding Response object, and as such no Response object needs to be returned to the client. The Server MUST NOT reply to a Notification, including those that are within a batch request.
+Notifications are not confirmable by definition, since they do not have a Response object to be returned. As such, the Client would not be aware of any errors (like e.g. "Invalid params.", "Internal error.").
+4.2 Parameter Structures
+
+If present, parameters for the rpc call MUST be provided as a Structured value. Either by-position through an Array or by-name through an Object.
+by-position: params MUST be an Array, containing the values in the Server expected order.
+by-name: params MUST be an Object, with member names that match the Server expected parameter names. The absence of expected names MAY result in an error being generated. The names MUST match exactly, including case, to the method's expected parameters.
+5 Response object
+
+When a rpc call is made, the Server MUST reply with a Response, except for in the case of Notifications. The Response is expressed as a single JSON Object, with the following members:
+jsonrpc
+A String specifying the version of the JSON-RPC protocol. MUST be exactly "2.0".
+result
+This member is REQUIRED on success.
+This member MUST NOT exist if there was an error invoking the method.
+The value of this member is determined by the method invoked on the Server.
+error
+This member is REQUIRED on error.
+This member MUST NOT exist if there was no error triggered during invocation.
+The value for this member MUST be an Object as defined in section 5.1.
+id
+This member is REQUIRED.
+It MUST be the same as the value of the id member in the Request Object.
+If there was an error in detecting the id in the Request object (e.g. Parse error/Invalid Request), it MUST be Null.
+Either the result member or error member MUST be included, but both members MUST NOT be included.
+5.1 Error object
+
+When a rpc call encounters an error, the Response Object MUST contain the error member with a value that is a Object with the following members:
+code
+A Number that indicates the error type that occurred.
+This MUST be an integer.
+message
+A String providing a short description of the error.
+The message SHOULD be limited to a concise single sentence.
+data
+A Primitive or Structured value that contains additional information about the error.
+This may be omitted.
+The value of this member is defined by the Server (e.g. detailed error information, nested errors etc.).
+The error codes from and including -32768 to -32000 are reserved for pre-defined errors. Any code within this range, but not defined explicitly below is reserved for future use. The error codes are nearly the same as those suggested for XML-RPC at the following url: http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php
+code    message meaning
+-32700  Parse error     Invalid JSON was received by the server.
+An error occurred on the server while parsing the JSON text.
+-32600  Invalid Request The JSON sent is not a valid Request object.
+-32601  Method not found        The method does not exist / is not available.
+-32602  Invalid params  Invalid method parameter(s).
+-32603  Internal error  Internal JSON-RPC error.
+-32099 to -32000        Server error    Reserved for implementation-defined server-errors.
+The remainder of the space is available for application defined errors.
+6 Batch
+
+To send several Request objects at the same time, the Client MAY send an Array filled with Request objects.
+
+The Server should respond with an Array containing the corresponding Response objects, after all of the batch Request objects have been processed. A Response object SHOULD exist for each Request object, except that there SHOULD NOT be any Response objects for notifications. The Server MAY process a batch rpc call as a set of concurrent tasks, processing them in any order and with any width of parallelism.
+
+The Response objects being returned from a batch call MAY be returned in any order within the Array. The Client SHOULD match contexts between the set of Request objects and the resulting set of Response objects based on the id member within each Object.
+
+If the batch rpc call itself fails to be recognized as an valid JSON or as an Array with at least one value, the response from the Server MUST be a single Response object. If there are no Response objects contained within the Response array as it is to be sent to the client, the server MUST NOT return an empty Array and should return nothing at all.
+
+7 Examples
+
+Syntax:
+--> data sent to Server
+<-- data sent to Client
+rpc call with positional parameters:
+--> {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": 1}
+<-- {"jsonrpc": "2.0", "result": 19, "id": 1}
+
+--> {"jsonrpc": "2.0", "method": "subtract", "params": [23, 42], "id": 2}
+
+<-- {"jsonrpc": "2.0", "result": -19, "id": 2}
+rpc call with named parameters:
+--> {"jsonrpc": "2.0", "method": "subtract", "params": {"subtrahend": 23, "minuend": 42}, "id": 3}
+<-- {"jsonrpc": "2.0", "result": 19, "id": 3}
+--> {"jsonrpc": "2.0", "method": "subtract", "params": {"minuend": 42, "subtrahend": 23}, "id": 4}
+
+<-- {"jsonrpc": "2.0", "result": 19, "id": 4}
+a Notification:
+--> {"jsonrpc": "2.0", "method": "update", "params": [1,2,3,4,5]}
+--> {"jsonrpc": "2.0", "method": "foobar"}
+rpc call of non-existent method:
+--> {"jsonrpc": "2.0", "method": "foobar", "id": "1"}
+<-- {"jsonrpc": "2.0", "error": {"code": -32601, "message": "Procedure not found."}, "id": "1"}
+rpc call with invalid JSON:
+--> {"jsonrpc": "2.0", "method": "foobar, "params": "bar", "baz]
+<-- {"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error."}, "id": null}
+rpc call with invalid Request object:
+--> {"jsonrpc": "2.0", "method": 1, "params": "bar"}
+<-- {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null}
+rpc call Batch, invalid JSON:
+--> [ {"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},{"jsonrpc": "2.0", "method" ]
+<-- {"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error."}, "id": null}
+rpc call with an empty Array:
+
+--> []
+<-- {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null}
+
+rpc call with an invalid Batch (but not empty):
+--> [1]
+<-- [
+        {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null}
+    ]
+
+rpc call with invalid Batch:
+--> [1,2,3]
+<-- [
+        {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null},
+        {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null},
+        {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null}
+    ]
+rpc call Batch:
+--> [
+        {"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
+        {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
+        {"jsonrpc": "2.0", "method": "subtract", "params": [42,23], "id": "2"},
+        {"foo": "boo"},
+        {"jsonrpc": "2.0", "method": "foo.get", "params": {"name": "myself"}, "id": "5"},
+        {"jsonrpc": "2.0", "method": "get_data", "id": "9"} 
+    ]
+<-- [
+        {"jsonrpc": "2.0", "result": 7, "id": "1"},
+        {"jsonrpc": "2.0", "result": 19, "id": "2"},
+        {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request."}, "id": null},
+        {"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found."}, id: "5"},
+        {"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"}
+    ]
+rpc call Batch (all notifications):
+--> [
+        {"jsonrpc": "2.0", "method": "notify_sum", "params": [1,2,4]},
+        {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
+    ]
+<-- //Nothing is returned for all notification batches
+8 Extensions
+
+Method names that begin with rpc. are reserved for system extensions, and MUST NOT be used for anything else. Each system extension is defined in a related specification. All system extensions are OPTIONAL.
+Copyright (C) 2007-2010 by the JSON-RPC Working Group
+
+This document and translations of it may be used to implement JSON-RPC, it may be copied and furnished to others, and derivative works that comment on or otherwise explain it or assist in its implementation may be prepared, copied, published and distributed, in whole or in part, without restriction of any kind, provided that the above copyright notice and this paragraph are included on all such copies and derivative works. However, this document itself may not be modified in any way.
+
+The limited permissions granted above are perpetual and will not be revoked.
+
+This document and the information contained herein is provided "AS IS" and ALL WARRANTIES, EXPRESS OR IMPLIED are DISCLAIMED, INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
diff --git a/extra/amqp_map/pmap.py b/extra/amqp_map/pmap.py
new file mode 100644
index 0000000..5f08bcd
--- /dev/null
+++ b/extra/amqp_map/pmap.py
@@ -0,0 +1,217 @@
+raise NotImplementedError("This code is still a work in progress")
+
+
+import threading
+import dill as pickle
+from .rpc import RPCMixin
+
+def pickle_worker(server):
+    """
+    Client side driver of the map work.
+    """
+    # Create the exchange and the worker queue
+    channel = server.channel()
+    rpc_channel = server.channel()
+    exchange = "park.map"
+    map_queue = "map.pickle"
+    channel.exchange_declare(exchange=exchange, type="direct",
+                             durable=False, auto_delete=True)
+    channel.queue_declare(queue=map_queue, durable=False,
+                          exclusive=False, auto_delete=True)
+
+    _rpc_queue,_,_ = channel.queue_declare(queue=service,
+                                           durable=False,
+                                           exclusive=True,
+                                           auto_delete=True)
+    channel.queue_bind(queue=_rpc_queue,
+                       exchange="amq.direct",
+                       routing_key=queue)
+
+
+    _cache = {}
+    def _fetch_function(queue, mapid):
+        reply = amqp.Message(dumps(dict(mapid=mapid,
+                                        sendfunction=rpc_queue)))
+        channel.basic_publish(reply, exchange=exchange,
+                              routing_key=queue)
+        def _receive_function(msg):
+            rpc_channel.basic_cancel(tag)
+            body = pickle.loads(msg.body)
+            _cache[body['mapid']] = pickle.loads(body['function'])
+
+        tag = channel.basic_consume(queue=queue,
+                                    callback=_receive_function,
+                                    no_ack=False)
+        rpc_channel.wait() # Wait for function id
+
+    def _process_work(msg):
+        # Check for sentinel
+        if msg.reply_to == "": channel.basic_cancel(consumer)
+
+        body = pickle.loads(msg.body)
+        mapid = body['mapid']
+        if mapid not in _cache:
+            _fetch_function(msg.reply_to, mapid)
+        function = _cache[mapid]
+        if function == None:
+            channel.basic_ack(msg.delivery_tag)
+            return
+
+        # Acknowledge delivery of message
+        #print "processing...",body['index'],body['value']
+        try:
+            result = function(body['value'])
+        except:
+            result = None
+        #print "done"
+        channel.basic_ack(msg.delivery_tag)
+        reply = dict(index=body['index'], result=result, mapid=mapid)
+        replymsg = amqp.Message(pickle.dumps(reply))
+        channel.basic_publish(replymsg, exchange=exchange,
+                              routing_key=msg.reply_to)
+    #channel.basic_qos(prefetch_size=0, prefetch_count=1, a_global=False)
+    consumer = channel.basic_consume(queue=map_queue, callback=_process_work,
+                                     no_ack=False)
+    while True:
+        channel.wait()
+
+
+
+class PickleMapper(object, RPCMixin):
+    def server(self, server):
+        # Create the exchange and the worker and reply queues
+        channel = server.channel()
+        exchange = "park.map"
+        channel.exchange_declare(exchange=exchange, type="direct",
+                                 durable=False, auto_delete=True)
+
+        map_channel = channel
+        map_queue = "map.pickle"
+        map_channel.queue_declare(queue=map_queue, durable=False,
+                                  exclusive=False, auto_delete=True)
+        map_channel.queue_bind(queue=map_queue, exchange="park.map",
+                               routing_key = map_queue)
+
+        reply_channel = server.channel()
+        #reply_queue = ".".join(("reply",mapid)) # Fixed Queue name
+        reply_queue = "" # Let amqp create a temporary queue for us
+        reply_queue,_,_ = reply_channel.queue_declare(queue=reply_queue,
+                                                      durable=False,
+                                                      exclusive=True,
+                                                      auto_delete=True)
+        reply_channel.queue_bind(queue=reply_queue, exchange="park.map",
+                                 routing_key = reply_queue)
+        reply_channel.basic_consume(queue=reply_queue,
+                                    callback=self._process_result,
+                                    no_ack=True)
+        self.exchange = exchange
+        self.map_queue = map_queue
+        self.map_channel = map_channel
+        self.reply_queue = reply_queue
+        self.reply_channel = reply_channel
+
+        ## USE_LOCKS_TO_THROTTLE
+        self._throttle = threading.Condition()
+
+        ## Start the rpc server
+        self.rpc_init(server, service, provides=("map_function"))
+        self.rpc_daemon()
+
+    def close(self):
+        self.channel.close()
+
+    def _process_result(self, msg):
+        self._reply = loads(msg.body)
+        #print "received result",self._reply['index'],self._reply['result']
+    @daemon
+    def _send_map(self, items, mapid):
+        for i,v in enumerate(items):
+            self.num_queued = i
+            #print "queuing %d %s"%(i,v)
+
+            ## USE_LOCKS_TO_THROTTLE
+            if  self.num_queued - self.num_processed > config.MAX_QUEUE:
+                #print "sleeping at %d in %d out"%(i,self.num_processed)
+                self._throttle.acquire()
+                self._throttle.wait()
+                self._throttle.release()
+                #print "waking at %d in %d out"%(i,self.num_processed)
+
+            # USE_SLEEP_TO_THROTTLE
+            #sleep_time = 0.2
+            #while i - self.num_processed > config.MAX_QUEUE:
+            #    #print "sleeping %g with in=%d out=%d"%(sleep_time,self.num_queued,self.num_processed)
+            #    time.sleep(sleep_time)
+            #    sleep_time = min(2*sleep_time, 600)
+
+            body = dumps(dict(index=i,value=v,mapid=mapid))
+            msg = amqp.Message(body, reply_to=self.reply_queue, delivery_mode=1)
+            self.map_channel.basic_publish(msg, exchange=self.exchange,
+                                           routing_key=self.map_queue)
+
+    def _send_function(self, function_str, destination):
+        msg = amqp.Message(function_str, delivery_mode=1)
+        self.map_channel.basic_publish(msg,
+                                       exchange=self.exchange,
+                                       routing_key=destination)
+    def cancel(self):
+        """
+        Stop a running map.
+        """
+        raise NotImplementedError()
+        # Need to clear the queued items and notify async that no more results.
+        # Messages in transit need to be ignored, which probably means tagging
+        # each map header with a call number so that previous calls don't
+        # get confused with current calls.
+        self.reply_channel.basic_publish(msg)
+
+    def async(self, fn, items):
+        function_str = dumps(fn)
+        current_map = md5sum(function_str)
+        items = list(items) # make it indexable
+        self.num_items = len(items)
+        # Queue items in separate thread so we can start receiving results
+        # before all items are even queued
+        self.num_processed = 0
+        publisher = self._send_map(items, mapid = current_map)
+        received = set()
+        for i in items:
+            while True:
+                self.reply_channel.wait()
+                mapid = self._repy['mapid']
+                if 'sendfunction' in self._reply:
+                    destination = self._reply['sendfunction']
+                    if mapid == current_map:
+                        content = function_str
+                    else:
+                        content = ""
+                    self._send_function(content, mapid, destination)
+                elif 'result' in self._reply:
+                    idx = self._reply['index']
+                    if mapid == current_map:
+                        if idx not in received:
+                            received.add(idx) # Track responses
+                            break
+                        else:
+                            pass # Ignore duplicates
+                    else:
+                        pass # ignore late responders
+                else:
+                    print("ignoring unexpected message")
+            result = self._reply['result']
+            #print "received %d %g"%(idx,result)
+            self.num_processed = i
+
+            ## USE_LOCKS_TO_THROTTLE
+            if self.num_queued - self.num_processed < config.MAX_QUEUE - 10:
+                # Ten at a time go through for slow processes
+                self._throttle.acquire()
+                self._throttle.notify()
+                self._throttle.release()
+
+            yield idx,result
+        publisher.join()
+    def __call__(self, fn, items):
+        result = list(self.async(fn, items))
+        result = list(sorted(result,lambda x,y: cmp(x[0],y[0])))
+        return zip(*result)[1]
diff --git a/extra/amqp_map/rpc.py b/extra/amqp_map/rpc.py
new file mode 100644
index 0000000..9fe19d7
--- /dev/null
+++ b/extra/amqp_map/rpc.py
@@ -0,0 +1,163 @@
+import threading
+from pickle import loads, dumps
+
+from amqplib import client_0_8 as amqp
+
+from .threaded import daemon
+
+# TODO: create rpc exchange to avoid accidental collisions from wrong names
+
+class RPCMixin:
+    """
+    Add RPC capabilities to a class
+    """
+    class RemoteException(Exception): pass
+    def rpc_init(self, server, service="", provides=None):
+        channel = server.channel()
+        queue,_,_ = channel.queue_declare(queue=service,
+                                          durable=False,
+                                          exclusive=True,
+                                          auto_delete=True)
+        channel.queue_bind(queue=queue,
+                           exchange="amq.direct",
+                           routing_key=queue)
+        channel.basic_consume(queue=queue,
+                              callback=self._rpc_process,
+                              no_ack=False)
+        self._rpc_queue = queue
+        self._rpc_channel = channel
+        self._rpc_id = 0
+        self._rpc_provides = provides
+        self._rpc_sync = threading.Condition()
+        self._rpc_results = {}
+    @daemon
+    def rpc_daemon(self):
+        self.rpc_serve()
+    def rpc_serve(self):
+        while True:
+            #print "waiting on channel"
+            self._rpc_channel.wait()
+        self._rpc_channel.close()
+    def rpc(self, service, method, *args, **kw):
+        self._rpc_send_call(service, ("call", method, args, kw))
+        return self.rpc_wait(str(self._rpc_id))
+
+    def rpc_async(self, service, method, *args, **kw):
+        self._rpc_send_call(service, ("call", method, args, kw))
+        return lambda: self.rpc_wait(str(self._rpc_id))
+
+    def rpc_send(self, service, method, *args, **kw):
+        self._rpc_send_call(service, ("send", method, args, kw))
+        return str(self._rpc_id)
+
+    def rpc_wait(self, rpc_id):
+        # TODO: add timeout
+        while rpc_id not in self._rpc_results:
+            #print "wait results",self._rpc_results
+            self._rpc_sync.acquire()
+            self._rpc_sync.wait()
+            self._rpc_sync.release()
+        result = self._rpc_results.pop(rpc_id)
+        if isinstance(result,Exception):
+            raise result
+        return result
+
+    # Send messages
+    def _rpc_send_call(self, service, parts):
+        self._rpc_id += 1
+        msg = amqp.Message(body=dumps(parts),
+                           reply_to=self._rpc_queue,
+                           message_id = str(self._rpc_id))
+        self._rpc_channel.basic_publish(msg,
+                                        exchange="amq.direct",
+                                        routing_key=service)
+    def _rpc_send_response(self, msg, result):
+        #print "responding to",msg.reply_to,msg.message_id,"with",result
+        resp = amqp.Message(body=dumps(("response",result)),
+                           message_id=msg.message_id)
+        self._rpc_channel.basic_publish(resp,
+                                        exchange="amq.direct",
+                                        routing_key=msg.reply_to)
+    def _rpc_send_error(self, msg, str):
+        resp = amqp.Message(body=dumps(("error",str)),
+                           message_id=msg.message_id)
+        self._rpc_channel.basic_publish(resp,
+                                        exchange="amq.direct",
+                                        routing_key=msg.reply_to)
+
+    # Receive messages
+    def _rpc_process(self, msg):
+        try:
+            parts = loads(msg.body)
+            #print "process",parts
+            # TODO: how do you use message headers properly?
+            if parts[0] == "send":
+                self._rpc_recv_send(msg, *parts[1:])
+            elif parts[0] == "call":
+                self._rpc_recv_call(msg, *parts[1:])
+            elif parts[0] == "response":
+                self._rpc_recv_response(msg, *parts[1:])
+            elif parts[0] == "error":
+                self._rpc_recv_error(msg, *parts[1:])
+        except:
+            raise
+            return self._rpc_send_error(msg, "Invalid message")
+
+    def _rpc_recv_call(self, msg, method, args, kw):
+        if not self._rpc_valid_method(method):
+            return self._rpc_send_error(msg, "Invalid method")
+        fn = getattr(self, method)
+        try:
+            result = fn(*args, **kw)
+        except:
+            return self._rpc_error(msg, "Invalid arguments")
+        return self._rpc_send_response(msg, result)
+
+    def _rpc_recv_send(self, msg, method, args, kw):
+        # TODO: silently ignore errors?
+        if not self._rpc_valid_method(method):
+            return
+        fn = getattr(self, method)
+        try:
+            fn(*args, **kw)
+        except:
+            pass
+
+    def _rpc_recv_response(self, msg, result):
+        self._rpc_results[msg.message_id] = result
+        self._rpc_sync.acquire()
+        self._rpc_sync.notify()
+        self._rpc_sync.release()
+
+    def _rpc_recv_error(self, msg, str):
+        self._rcp_results[msg.message_id] = self.RemoteException(str)
+        self._rpc_sync.acquire()
+        self._rpc_sync.notify()
+        self._rpc_sync.release()
+
+    def _rpc_valid_method(self, method):
+        if self._rpc_provides: return method in self._rpc_provides
+        if method.startswith('_') or method.startswith('rpc'): return False
+        if not hasattr(self, method): return False
+        return callable(getattr(self,method))
+
+class RPC(object):
+    """
+    Connection to an rpc server as AMQP exchange.  Attributes are the
+    names of rpc service queues.  Accessing an attribute creates a proxy.
+    """
+    def __init__(self, server):
+        self._rpc = RPCMixin()
+        self._rpc.rpc_init(server)
+        self._rpc.rpc_daemon()
+    def __getattr__(self, service):
+        return RPCProxy(self._rpc, service)
+class RPCProxy(object):
+    """
+    Proxy to an AMQP exchange rpc service.
+    """
+    def __init__(self, connection, service):
+        self._rpc = connection
+        self._service = service
+    def __getattr__(self, method):
+        return lambda *args, **kw: self._rpc.rpc(self._service, method, *args, **kw)
diff --git a/extra/amqp_map/threaded.py b/extra/amqp_map/threaded.py
new file mode 100755
index 0000000..1df6189
--- /dev/null
+++ b/extra/amqp_map/threaded.py
@@ -0,0 +1,165 @@
+# This program is public domain
+# Author: Paul Kienzle
+"""
+Thread and daemon decorators.
+
+See :function:`threaded` and :function:`daemon` for details.
+"""
+
+from functools import wraps
+import itertools
+import threading
+
+#TODO: fix race conditions
+# notify may be called twice in after()
+# 1. main program calls fn() which starts the processing and returns job
+# 2. main program calls job.after(notify)
+# 3. after() suspends when __after is set but before __stopped is checked
+# 4. thread ends, setting __stopped and calling __after(result)
+# 5. main resumes, calling __after(result) since __stoped is now set
+# solution is to use thread locks when testing/setting __after.
+_after_lock = threading.Lock()
+class AfterThread(threading.Thread):
+    """
+    Thread class with additional 'after' capability which runs a function
+    after the thread is complete.  This allows us to separate the notification
+    from the computation.
+
+    Unlike Thread.join, the wait() method returns the value of the computation.
+    """
+    name = property(threading.Thread.getName,
+                    threading.Thread.setName,
+                    doc="Thread name")
+    def __init__(self, *args, **kwargs):
+        self.__result = None
+        self.__after = kwargs.pop('after',None)
+        threading.Thread.__init__(self, *args, **kwargs)
+
+    def after(self, notify=None):
+        """
+        Calls notify after the thread is complete.  Notify should
+        take a single argument which is the result of the function.
+
+        Note that notify will be called from the main thread if the
+        thread is already complete when thread.after(notify) is called,
+        otherwise it will be called from thread.
+        """
+        _after_lock.acquire()
+        self.__after = notify
+        # Run immediately if thread is already complete
+        if self._Thread__started and self._Thread__stopped:
+            post = notify
+        else:
+            post = lambda x: x
+        _after_lock.release()
+        post(self.__result)
+
+    def run(self):
+        """
+        Run the thread followed by the after function if any.
+        """
+        if self._Thread__target:
+            self.__result = self._Thread__target(*self._Thread__args,
+                                                 **self._Thread__kwargs)
+            _after_lock.acquire()
+            if self.__after is not None:
+                post = self.__after
+            else:
+                post = lambda x: x
+            _after_lock.release()
+            post(self.__result)
+
+    def wait(self, timeout=None):
+        """
+        Wait for the thread to complete.
+
+        Returns the result of the computation.
+
+        Example::
+
+            result = thread.wait()
+
+        If timeout is used, then wait() may return before the result is
+        available.  In this case, wait() will return None.  This can be
+        used as follows::
+
+            while True:
+                result = thread.wait(timeout=0)
+                if result is not None: break
+                ... do something else while waiting ...
+
+        Timeout should not be used with functions that may return None.
+        This is due to the race condition in which the thread completes
+        between the timeout triggering in wait() and the main thread
+        calling thread.isAlive().
+        """
+        self.join(timeout)
+        return self.__result
+
+def threaded(fn):
+    """
+    @threaded decorator for functions to be run in a thread.
+
+    Returns the running thread.
+
+    The returned thread supports the following methods::
+
+        wait(timeout=False)
+            Waits for the function to complete.
+            Returns the result of the function if the thread is joined,
+            or None if timeout.  Use thread.isAlive() to test for timeout.
+        after(notify)
+            Calls notify after the thread is complete.  Notify should
+            take a single argument which is the result of the function.
+        isAlive()
+            Returns True if thread is still running.
+        name
+            Thread name property.  By default the name is 'fn-#' where fn
+            is the function name and # is the number of times the thread
+            has been invoked.
+
+    For example::
+
+        @threaded
+        def compute(self,input):
+            ...
+        def onComputeButton(self,evt):
+            thread = self.compute(self.input.GetValue())
+            thread.after(lambda result: wx.Post(self.win,wx.EVT_PAINT))
+
+    A threaded function can also be invoked directly in the current thread::
+
+        result = self.compute.main(self.input.GetValue())
+
+    All threads must complete before the program can exit.  For queue
+    processing threads which wait are alive continuously waiting for
+    new input, use the @daemon decorator instead.
+    """
+    instance = itertools.count(1)
+    @wraps(fn)
+    def wrapper(*args, **kw):
+        name = "%s-%d"%(fn.__name__,next(instance))
+        thread = AfterThread(target=fn,args=args,kwargs=kw,name=name)
+        thread.start()
+        return thread
+    wrapper.main = fn
+    return wrapper
+
+def daemon(fn):
+    """
+    @daemon decorator for functions to be run in a thread.
+
+    Returns the running thread.
+
+    Unlike threaded functions, daemon functions are not expected to complete.
+    """
+    instance_counter = itertools.count(1)
+    @wraps(fn)
+    def wrapper(*args, **kw):
+        name = "%s-%d"%(fn.__name__,next(instance_counter))
+        thread = threading.Thread(target=fn,args=args,kwargs=kw,name=name)
+        thread.setDaemon(True)
+        thread.start()
+        return thread
+    wrapper.main = fn
+    return wrapper
diff --git a/extra/amqp_map/url.py b/extra/amqp_map/url.py
new file mode 100644
index 0000000..0692d2a
--- /dev/null
+++ b/extra/amqp_map/url.py
@@ -0,0 +1,139 @@
+# This program is public domain
+"""
+Parse URLs
+"""
+
+from six.moves.urllib import parse
+
+class URL(object):
+    """
+    Parse a universal resource locator
+
+        protocol://user:password@host:port/path?p1=v1&p2=v2
+
+    :Parameters:
+        *url*  : string
+            URL to be parsed
+        *host* = 'localhost' : string
+            Default host
+        *user*, *password*, *protocol*, *path* = '' : string
+            Defaults user, password, protocol, path
+        *parameters* = []: [ (string,string ), ... ]
+            Default key,value pairs for POST queries.
+        *port* = 0 : integer
+            Default port
+
+    :Returns:
+        *url* : URL
+            The return URL, with attributes for the pieces
+
+    :Raises:
+        *ValueError* : Not a valid protocol
+    """
+    def __init__(self, url, user='', password='', host='localhost',
+                 protocol='', path='', port=0, parameters=[]):
+        errmsg = "".join( ("Invalid url <",url,">") )
+
+        # chop protocol
+        pieces = url.split('://')
+        if len(pieces) == 1:
+            url = pieces[0]
+        elif len(pieces) == 2:
+            protocol = pieces[0]
+            url = pieces[1]
+        else:
+            raise ValueError(errmsg)
+
+        pos = url.find('/')
+        if pos < 0:
+            server = url
+        else:
+            server = url[:pos]
+            path = url[pos+1:]
+
+        if '@' in server:
+            user,server = server.split('@')
+            if ':' in user:
+                user,password = user.split(':')
+        if ':' in server:
+            server,port = server.split(':')
+            port = int(port)
+        if server != '': host = server
+
+        if '?' in path:
+            path, pars = path.split('?')
+            parameters = [pair.split('=') for pair in pars.split('&')]
+            if any(len(pair) > 2 for pair in parameters):
+                raise ValueError(errmsg)
+            parameters = [[parse.unquote_plus(p) for p in pair]
+                          for pair in parameters]
+        self.protocol = protocol
+        self.user = user
+        self.password = password
+        self.host = host
+        self.port = port
+        self.path = parse.unquote_plus(path)
+        self.parameters = parameters[:]
+
+    def __str__(self):
+        result = []
+        if self.protocol:
+            result.extend( (self.protocol,'://') )
+        if self.password:
+            result.extend( (self.user,':',self.password,'@') )
+        elif self.user:
+            result.extend( (self.user,'@') )
+        if self.host:
+            result.extend( (self.host,) )
+        if self.port:
+            result.extend( (':', str(self.port)) )
+        if self.path or len(self.parameters) > 0:
+            result.extend( ('/', parse.quote_plus(self.path)) )
+        if len(self.parameters) > 0:
+            pars = '&'.join('='.join(parse.quote_plus(p) for p in parts)
+                            for parts in self.parameters)
+            result.extend( ('?', pars) )
+        return ''.join(result)
+
+
+def test():
+    h = URL('a')
+    assert h.user=='' and h.password=='' and h.host=='a' and h.port==0
+    h = URL('a:4')
+    assert h.user=='' and h.password=='' and h.host=='a' and h.port==4
+    h = URL('u at a')
+    assert h.user=='u' and h.password=='' and h.host=='a' and h.port==0
+    h = URL('u:p at a')
+    assert h.user=='u' and h.password=='p' and h.host=='a' and h.port==0
+    h = URL('u at a:4')
+    assert h.user=='u' and h.password=='' and h.host=='a' and h.port==4
+    h = URL('u:p at a:4')
+    assert h.user=='u' and h.password=='p' and h.host=='a' and h.port==4
+    h = URL('')
+    assert h.user=='' and h.password=='' and h.host=='localhost' and h.port==0
+    h = URL('u@')
+    assert h.user=='u' and h.password=='' and h.host=='localhost' and h.port==0
+    h = URL('u@:4')
+    assert h.user=='u' and h.password=='' and h.host=='localhost' and h.port==4
+    h = URL('u:p@')
+    assert h.user=='u' and h.password=='p' and h.host=='localhost' and h.port==0
+    h = URL('u:p@:4')
+    assert h.user=='u' and h.password=='p' and h.host=='localhost' and h.port==4
+    h = URL('proto://u:p@:4')
+    assert (h.protocol=='proto' and h.user=='u' and h.password=='p'
+            and h.host=='localhost' and h.port==4)
+    h = URL('proto://u:p@:4/')
+    assert (h.protocol=='proto' and h.user=='u' and h.password=='p'
+            and h.host=='localhost' and h.port==4 and h.path == '')
+    h = URL('proto://u:p@:4/%7econnolly')
+    assert (h.protocol=='proto' and h.user=='u' and h.password=='p'
+            and h.host=='localhost' and h.port==4 and h.path == '~connolly')
+    h = URL('proto://u:p@:4/%7econnolly?this&that=other')
+    assert (h.protocol=='proto' and h.user=='u' and h.password=='p'
+            and h.host=='localhost' and h.port==4 and h.path == '~connolly')
+    assert (h.parameters[0][0] == 'this'
+            and h.parameters[1][0] == 'that'
+            and h.parameters[1][1] == 'other')
+    assert str(h) == 'proto://u:p@localhost:4/%7Econnolly?this&that=other'
+
+if __name__ == "__main__": test()
diff --git a/extra/appbin/bumps b/extra/appbin/bumps
new file mode 100755
index 0000000..4792916
--- /dev/null
+++ b/extra/appbin/bumps
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# Find directory of the original file, following symlinks.
+# The ${parameter%/*} expansion strips the filename portion.
+LINK=`readlink "${0}"`
+if [ -z "${LINK}" ]; then BASE="${0%/*}"; else BASE="${LINK%/*}"; fi
+ROOT=`(cd "${BASE}" 2>/dev/null && pwd)`
+echo LINK: ${LINK}
+echo BASE: ${BASE}
+echo ROOT: ${ROOT}
+
+PYTHON="$ROOT/Contents/MacOS/python"
+PYTHONHOME="$ROOT/Contents/Resources"
+PYTHONPATH="$ROOT/Contents/Resources"
+DYLD_FRAMEWORK_PATH="$ROOT/Contents/Frameworks"
+DYLD_LIBRARAY_PATH="$ROOT/Contents/Frameworks"
+export PYTHONHOME PYTHONPATH DYLD_FRAMWORK_PATH DYLD_LIBRARY_PATH
+DYLD_INSERT_LIBRARIES="$ROOT/Contents/Frameworks/libpython2.7.dylib" \
+exec "$PYTHON" -m bumps.cli "$@"
diff --git a/extra/appbin/ipython b/extra/appbin/ipython
new file mode 100755
index 0000000..d9b1186
--- /dev/null
+++ b/extra/appbin/ipython
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# Find directory of the original file, following symlinks.
+# The ${parameter%/*} expansion strips the filename portion.
+LINK=`readlink "${0}"`
+if [ -z "${LINK}" ]; then BASE="${0%/*}"; else BASE="${LINK%/*}"; fi
+ROOT=`(cd "${BASE}" 2>/dev/null && pwd)`
+#echo LINK: ${LINK}
+#echo BASE: ${BASE}
+#echo ROOT: ${ROOT}
+
+PYTHON="$ROOT/Contents/MacOS/python"
+PYTHONHOME="$ROOT/Contents/Resources"
+PYTHONPATH="$ROOT/Contents/Resources"
+DYLD_FRAMEWORK_PATH="$ROOT/Contents/Frameworks"
+DYLD_LIBRARAY_PATH="$ROOT/Contents/Frameworks"
+export PYTHONHOME PYTHONPATH DYLD_FRAMWORK_PATH DYLD_LIBRARY_PATH
+
+cmd="from IPython.frontend.terminal.ipapp import launch_new_instance as _; _()"
+#cmd="import IPython.Shell; IPython.Shell.start().mainloop()"
+
+exec "$PYTHON" -c "$cmd"
diff --git a/extra/bumps.icns b/extra/bumps.icns
new file mode 100644
index 0000000..57d5a8f
Binary files /dev/null and b/extra/bumps.icns differ
diff --git a/extra/dmgpack.sh b/extra/dmgpack.sh
new file mode 100755
index 0000000..5ae874c
--- /dev/null
+++ b/extra/dmgpack.sh
@@ -0,0 +1,65 @@
+#! /bin/bash
+
+# dmgpack volume [file|dir]+
+#
+#    Copy a group of files/directories to a compressed disk image.
+#    The resulting image is stored in volume.dmg.
+#
+#    Files are copied with 'ditto' to preserve resource forks.  For
+#    convenience we also call FixupResourceForks after copying.  This
+#    allows you to use /Developer/Tools/SplitFork on your tree and 
+#    manipulate it with CVS, tar, etc.  Don't forget the -kb option 
+#    when adding or committing app and ._app files to CVS!
+#
+#    This command will fail if a volume of the given name is already
+#    mounted.  It could also fail if the size of the resource forks
+#    is large compared to the size of the data forks. Change the
+#    scale factor internally from 11/10 to a more appropriate number
+#    if it complains it is running out of space.
+#
+#    It is possible to add a license agreement to a dmg file.  See
+#    the "Software License Agreements for UDIFs" sdk available at
+#    http://developer.apple.com/sdk/index.html
+
+test $# -lt 2 && echo "usage: $0 diskname [file|dir]+" && exit 1
+#set -x
+NAME="${1%.dmg}" ; shift
+DISK=/tmp/dmgpack$$.dmg
+COMPRESSED="$NAME.dmg"
+VOLUME="$NAME"
+
+# compute needed image size; scale it by 10%
+SIZE=$(du -ck "$@" | tail -1 | sed -e 's/ *total//')
+SIZE=$(echo $SIZE*11/10 | bc)
+test $SIZE -lt 4200 && SIZE=4200
+
+# create the disk
+rm -f $DISK
+hdiutil create -size ${SIZE}k $DISK -layout NONE
+
+# create a file system on the disk; last line of output is
+# the device on which the disk was attached.
+DEVICE=$(hdiutil attach $DISK -nomount | tail -1)
+newfs_hfs -v "$VOLUME" $DEVICE
+
+# mount the file system
+mkdir $DISK-mount
+mount -t hfs $DEVICE $DISK-mount || (echo "mount $DISK-mount failed" && exit 1)
+
+# copy stuff to the disk and fixup resource forks
+for f in "$@"; do 
+    f=${f%/}		;# strip trailing /
+    dest="$DISK-mount/${f##*/}"
+    ditto -rsrc "$f" "$dest" 
+    test -d "$f" && /System/Library/CoreServices/FixupResourceForks "$dest"
+done
+
+# eject the disk
+umount $DISK-mount
+rmdir $DISK-mount
+hdiutil eject $DEVICE
+
+# compress the disk and make it read only
+rm -f "$COMPRESSED"
+hdiutil convert -format UDZO $DISK -imagekey zlib-level=9 -o "$COMPRESSED"
+rm -f $DISK
diff --git a/extra/dream_examples/anticor.py b/extra/dream_examples/anticor.py
new file mode 100755
index 0000000..4415a73
--- /dev/null
+++ b/extra/dream_examples/anticor.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+"""
+Example model with strong correlations between the fitted parameters.
+
+We use a*x = y + N(0,1) made complicated by defining a=p1+p2.
+
+The expected distribution for p1 and p2 will be uniform, with p2 = a-p1 in
+each sample.  Because this distribution is inherently unbounded, artificial
+bounds are required on a least one of the parameters for finite duration
+simulations.
+
+The expected distribution for p1+p2 can be determined from the linear model
+y = a*x.  This is reported along with the values estimated from MCMC.
+"""
+from __future__ import print_function
+
+from pylab import *  # Numeric functions and plotting
+from dream import *  # sampler functions
+
+# Create the correlation function and generate some fake data
+x = linspace(-1., 1, 40)
+fn = lambda p: sum(p)*x
+bounds=(-20,-inf),(40,inf)
+sigma = 1
+data = fn((1,1)) + randn(*x.shape)*sigma  # Fake data
+
+
+# Sample from the posterior density function
+n=2
+model = Simulation(f=fn, data=data, sigma=sigma, bounds=bounds,
+                   labels=["x","y"])
+sampler = Dream(model=model,
+                population=randn(5*n,4,n),
+                thinning=1,
+                draws=20000,
+                )
+mc = sampler.sample()
+mc.title = 'Strong anti-correlation'
+
+# Create a derived parameter without the correlation
+mc.derive_vars(lambda p: (p[0]+p[1]), labels=['x+y'])
+
+# Compare the MCMC estimate for the derived parameter to a least squares fit
+from bumps.wsolve import wpolyfit
+poly = wpolyfit(x,data,degree=1,origin=True)
+print("x+y from linear fit", poly.coeff[0], poly.std[0])
+points,logp = mc.sample(portion=0.5)
+print("x+y from MCMC",mean(points[:,2]), std(points[:,2],ddof=1))
+
+# Plot the samples
+plot_all(mc, portion=0.5)
+show()
diff --git a/extra/dream_examples/banana.m b/extra/dream_examples/banana.m
new file mode 100644
index 0000000..bd9f21d
--- /dev/null
+++ b/extra/dream_examples/banana.m
@@ -0,0 +1,51 @@
+MCMCPar = struct()
+Extra = struct()
+Measurement = struct()
+ParRange = struct()
+
+MCMCPar.n = 10;                         # Dimension of the problem (Nr. parameters to be optimized in the model)
+MCMCPar.seq = MCMCPar.n;                # Number of Markov Chains / sequences
+MCMCPar.DEpairs = 3;                    # Number of chain pairs to generate candidate points
+MCMCPar.Gamma = 0;                      # Kurtosis parameter Bayesian Inference Scheme
+MCMCPar.nCR = 3;                        # Crossover values used to generate proposals (geometric series)
+MCMCPar.ndraw = 10000;                  # Maximum number of function evaluations
+MCMCPar.steps = 10;                     # Number of steps
+MCMCPar.eps = 5e-2;                     # Random error for ergodicity
+MCMCPar.outlierTest = 'IQR_test';       # What kind of test to detect outlier chains?
+
+# -----------------------------------------------------------------------------------------------------------------------
+Extra.pCR = 'Update';                   # Adaptive tuning of crossover values
+# -----------------------------------------------------------------------------------------------------------------------
+
+# --------------------------------------- Added for reduced sample storage ----------------------------------------------
+Extra.reduced_sample_collection = 'Yes';# Thinned sample collection?
+Extra.T = 10;                           # Every Tth sample is collected
+# -----------------------------------------------------------------------------------------------------------------------
+
+# Define the specific properties of the banana function
+Extra.mu   = zeros([1,MCMCPar.n]);                      # Center of the banana function
+Extra.cmat = eye(MCMCPar.n); Extra.cmat[0,0] = 100;     # Target covariance
+Extra.imat = inv(Extra.cmat);                           # Inverse of target covariance
+Extra.bpar = 0.1;                                       # "bananity" of the target, see bananafun.m
+
+# What type of initial sampling
+Extra.InitPopulation = 'COV_BASED';
+# Provide information to do alternative sampling
+Extra.muX = Extra.mu;                                   # Provide mean of initial sample
+Extra.qcov = eye(MCMCPar.n) * 5;                        # Initial covariance
+# Save all information in memory or not?
+Extra.save_in_memory = 'No';
+
+# Give the parameter ranges (minimum and maximum values)
+ParRange.minn = -Inf * ones([1,MCMCPar.n]); ParRange.maxn = Inf * ones([1,MCMCPar.n]);
+
+# Define the boundary handling
+Extra.BoundHandling = 'None';
+# Define data structures for use in computation of posterior density
+Measurement.MeasData = []; Measurement.Sigma = []; Measurement.N = 0;
+# Define modelName
+ModelName = 'Banshp';
+# Define likelihood function
+option = 4;
+
+[Sequences,Reduced_Seq,X,output,hist_logp] = dream(MCMCPar,ParRange,Measurement,ModelName,Extra,option)
diff --git a/extra/dream_examples/banana.py b/extra/dream_examples/banana.py
new file mode 100755
index 0000000..e8730dc
--- /dev/null
+++ b/extra/dream_examples/banana.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+"""
+The Rosenbrock banana function
+"""
+from dream import *
+from pylab import *
+
+def rosen(x):
+    x = asarray(x)
+    return sum(100.0*(x[1:]-x[:-1]**2)**2 + (1-x[:-1])**2)
+
+n=6
+sampler = Dream(model=LogDensity(rosen),
+                population=randn(2*n,5,n),
+                thinning=1,
+                draws=25000,
+                burn=10000,
+                #DE_snooker_rate=0,
+                #cycles=3,
+                )
+
+state = sampler.sample()
+state.mark_outliers()
+state.title = "Banana function example"
+#plot_corr(state); show()
+plot_all(state)
+show()
diff --git a/extra/dream_examples/mixture.py b/extra/dream_examples/mixture.py
new file mode 100755
index 0000000..0f7a151
--- /dev/null
+++ b/extra/dream_examples/mixture.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+"""
+Multivariate gaussian mixture model.
+
+Demonstrates bimodal sampling from a multidimensional space.
+
+Change the relative weights of the modes to see how effectively DREAM samples
+from lower probability regions.  In particular, with low *w2*, long chains
+(thinning*generations*cycles) and small population, the second mode can
+get lost.
+"""
+from pylab import *
+from dream import *
+
+n = 4
+pop = 10
+w1,w2 = 5,3
+mu1 = -5 * ones(n)
+mu2 = 5 * ones(n)
+#mu2[0] = -3  # Watch marginal distribution for p1 overlap between modes
+sigma = eye(n)
+model = Mixture(MVNormal(mu1,sigma), w1, MVNormal(mu2,sigma), w2)
+#model = MVNormal(zeros(n),sigma)
+
+# TODO: with large number of samples, the 1/6 weight peak is lost
+sampler = Dream(model=model, population=randn(pop,n,n),
+                #use_delayed_rejection=False,
+                #outlier_test='IQR',
+                thinning=1, draws=20000)
+state = sampler.sample()
+save_state(filename='mixture',state=state)
+state = load_state('mixture')
+plot_all(state, portion=1)
+show()
diff --git a/extra/dream_examples/mixture2.py b/extra/dream_examples/mixture2.py
new file mode 100755
index 0000000..7058c81
--- /dev/null
+++ b/extra/dream_examples/mixture2.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+"""
+Multimodal demonstration using gaussian mixture model.
+
+The model is a mixture model representing the probability density from a
+product of gaussians.
+
+This example show performance of the algorithm on multimodal densities,
+with adjustable number of densities and degree of separation.
+
+The peaks are distributed about the x-y plane so that the marginal densities
+in x and y are spaced every 2 units using latin hypercube sampling.  For small
+peak widths, this means that the densities will not overlap, and the marginal
+maximum likelihood for a given x or y value should match the estimated density.
+With overlap, the marginal density will over estimate the marginal maximum
+likelihood.
+
+Adjust the width of the peaks, *S*, to see the effect of relative diameter of
+the modes on sampling.  Adjust the height of the peaks, *I*, to see the
+effects of the relative height of the modes.  Adjust the count *n* to see
+the effects of the number of modes.
+
+Note that dream.diffev.de_step adds jitter to the parameters at the 1e-6 level,
+so *S* < 1e-4 cannot be modeled reliably.
+
+*draws* is set to 1000 samples per mode.  *burn* is set to 100 samples per mode.
+Population size *h* is set to 20 per mode.  A good choice for number of
+sequences *k* is not yet determined.
+"""
+from pylab import *
+from bumps.dream import *
+
+if 1: # Fixed layout of 5 minima
+    n = 5
+    S = [0.1]*5
+    x = [-4, -2, 0, 2, 4]
+    y = [2, -2, -4, 0, 4]
+    z = [-2, -1, 0, 1, 3]
+    I = [5, 2.5, 1, 4, 1]
+else: # Semirandom layout of n minima
+    d = 3
+    n = 40
+    S = [0.1]*n
+    x = linspace(-n+1,n-1,n)
+    y = permutation(x)
+    z = permutation(x)
+    I = 2*linspace(-1,1,n)**2 + 1
+
+args = [] # Sequence of density, weight, density, weight, ...
+for xi,yi,zi,Si,Ii in zip(x,y,z,S,I):
+    args.extend( (MVNormal([xi,yi,zi],Si*eye(3)), Ii) )
+    #args.extend( (MVNormal([xi,yi],Si*eye(2)), Ii) )
+model = Mixture(*args)
+
+k = 20*n
+h = int(20*n/k)
+sampler = Dream(model=model,
+                population=randn(h,k,2),
+                #use_delayed_rejection=False,
+                DE_snooker_rate=0.5,
+                outlier_test='none',
+                draws=4000*n,burn=500*k,
+                thinning=1)
+mc = sampler.sample()
+mc.show()
+show()
diff --git a/extra/dream_examples/noisybanana.py b/extra/dream_examples/noisybanana.py
new file mode 100644
index 0000000..1d1965b
--- /dev/null
+++ b/extra/dream_examples/noisybanana.py
@@ -0,0 +1,26 @@
+"""
+The Rosenbrock banana function
+
+Demonstration that sampling works even when the density is unstable.
+"""
+from dream import *
+from pylab import *
+from numpy.random import lognormal
+
+def rosen(x):
+    x = asarray(x)
+    s = sum(100.0*(x[1:]-x[:-1]**2)**2 + (1-x[:-1])**2)
+    return -lognormal(s,sqrt(s)) # Poisson style: variance = # counts
+
+
+n=3
+sampler = Dream(model=LogDensity(rosen),
+                population=randn(20,n,n),
+                thinning=1,
+                burn=20000,
+                draws=20000,
+                )
+
+mc = sampler.sample()
+#plot_corr(mc); show()
+mc.show()
diff --git a/extra/dream_examples/quadfit.py b/extra/dream_examples/quadfit.py
new file mode 100755
index 0000000..642bc02
--- /dev/null
+++ b/extra/dream_examples/quadfit.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+from pylab import *
+from dream import *
+
+x = linspace(-4., 4, 40)
+fn = lambda p: polyval(p,x)
+bounds=(-20,-20,-20),(40,40,40)
+sigma = 1
+data = fn((2,-1,3)) + randn(*x.shape)*sigma  # Fake data
+
+
+n=len(bounds[0])
+model = Simulation(f=fn, data=data, sigma=sigma, bounds=bounds)
+sampler = Dream(model=model,
+                population=randn(5*n,4,n),
+                thinning=1,
+                draws=20000,
+                )
+
+mc = sampler.sample()
+figure(6); model.plot(mc.best()[0])
+plot_all(mc)
diff --git a/extra/fit_functions/__init__.py b/extra/fit_functions/__init__.py
new file mode 100644
index 0000000..4a42f02
--- /dev/null
+++ b/extra/fit_functions/__init__.py
@@ -0,0 +1,55 @@
+"""
+models: sample models and functions prepared for use in mystic
+
+
+Functions
+=========
+
+Standard test functions for minimizers:
+
+    rosenbrock         -- Rosenbrock's function
+    step               -- De Jong's step function
+    quartic            -- De Jong's quartic function
+    shekel             -- Shekel's function
+    corana1d,2d,3d,4d  -- Corana's function
+    fosc3d             -- the fOsc3D Mathematica function
+    griewangk          -- Griewangk's function
+    zimmermann         -- Zimmermann's function
+    wavy1              -- a simple sine-based multi-minima function
+    wavy2              -- another simple sine-based multi-minima function
+
+
+Models
+======
+
+Curve fitting tests:
+
+    disk_coverage  -- minimal disk for covering a set of points
+    lorentzian     -- Lorentzian peak model
+    decay          -- Bevington & Robinson's model of dual exponential decay
+    mogi           -- Mogi's model of surface displacements from a point spherical
+                      source in an elastic half space
+
+For each model s there will be a sample data set:
+
+   s_data = {'x':x, 'y':y, 'dy':dy}
+
+and generating parameters if they are available:
+
+   s_pars = { ... }
+
+"""
+
+# models
+from .mogi import mogi
+from .br8 import dual_exponential as decay, data as decay_data
+from .lorentzian import lorentzian, data as lorentzian_data, coeff as lorenztian_pars
+from .circle import disk_coverage, simulate_circle, simulate_disk
+
+# functions
+from .dejong import rosenbrock, step, quartic, shekel
+from .corana import corana1d, corana2d, corana3d, corana4d
+from .fosc3d import fOsc3D
+from .griewangk import griewangk
+from .zimmermann import zimmermann
+from .wavy import wavy1, wavy2
diff --git a/extra/fit_functions/br8.py b/extra/fit_functions/br8.py
new file mode 100755
index 0000000..ddb2972
--- /dev/null
+++ b/extra/fit_functions/br8.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+"""
+Bevington & Robinson's model of dual exponential decay
+
+References::
+    [5] Bevington & Robinson (1992).
+    Data Reduction and Error Analysis for the Physical Sciences,
+    Second Edition, McGraw-Hill, Inc., New York.
+"""
+
+from numpy import exp, sqrt, vstack, array, asarray
+
+def dual_exponential(t, A, B, C, tauA, tauB):
+    """
+    Computes dual exponential decay.
+
+        y = A exp(-t/tauA) + B exp(-t/tauB) + C
+    """
+    t = asarray(t)
+    return C + A*exp(-t/tauA) + B*exp(-t/tauB)
+
+# data from Chapter 8 of [5].
+data = array([[15, 775], [30, 479], [45, 380], [60, 302],
+[75, 185], [90, 157], [105,137], [120, 119], [135, 110],
+[150, 89], [165, 74], [180, 61], [195, 66], [210, 68],
+[225, 48], [240, 54], [255, 51], [270, 46], [285, 55],
+[300, 29], [315, 28], [330, 37], [345, 49], [360, 26],
+[375, 35], [390, 29], [405, 31], [420, 24], [435, 25],
+[450, 35], [465, 24], [480, 30], [495, 26], [510, 28],
+[525, 21], [540, 18], [555, 20], [570, 27], [585, 17],
+[600, 17], [615, 14], [630, 17], [645, 24], [660, 11],
+[675, 22], [690, 17], [705, 12], [720, 10], [735, 13],
+[750, 16], [765, 9], [780, 9], [795, 14], [810, 21],
+[825, 17], [840, 13], [855, 12], [870, 18], [885, 10]])
+
+# Set uncertainty to sqrt(counts)
+data = { 'x': data[0], 'y': data[1], 'dy': sqrt(data[1]) }
+
+#coeff = {'A': 1, 'B': 1, 'C': 1, 'tauA': 1, 'tauB': 1}
diff --git a/extra/fit_functions/circle.py b/extra/fit_functions/circle.py
new file mode 100755
index 0000000..328b71e
--- /dev/null
+++ b/extra/fit_functions/circle.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+"""
+Minimal disk
+
+References::
+    None
+"""
+
+from numpy import array, pi, inf, vstack, linspace
+from numpy import random, sin, cos, sqrt
+
+random.seed(123)
+
+
+def disk_coverage(data, cx, cy, r,
+                  area_penalty=1, visibility_penalty=1, distance_penalty=1):
+    """
+    cost function for minimum enclosing circle for a 2D set of points
+
+    There are three penalty terms:
+
+    - *area_penalty* is the cost per unit area of the disk
+    - *visibility_penalty* is the cost per point not covered by the disk
+    - *distance_penalty* is the weight on the sum squared costs of
+      each point to the disk
+    """
+    if r<0: return inf
+    x,y = data
+    d = sqrt((x-cx)**2 + (y-cy)**2)
+    return area_penalty*pi*r*2 + distance_penalty*sum((d[d>r]-r)**2) + visibility_penalty*sum(d>r)
+
+
+def outline(N=200, cx=0, cy=0, r=1):
+    """
+    generate the outline of a circle using N steps.
+    """
+    theta = linspace(0, 2*pi, N)
+    return vstack( (r*cos(theta)+cx, r*sin(theta)+cy) )
+
+def simulate_disk(N, cx=0, cy=0, r=1):
+    """
+    Generate N random points in a disk
+    """
+    data = array(list(_disk_generator(N)))
+    return vstack( (r*data[:,0]+cx, r*data[:,1]+cy) )
+
+def simulate_circle(N, cx=0, cy=0, r=1):
+    """
+    generate N random points on a circle
+    """
+
+    theta = random.uniform(0,2*pi,size=N)
+    return vstack((r*cos(theta)+cx,r*sin(theta)+cy))
+
+def _disk_generator(N):
+    for _ in range(N):
+        while True:
+            x = random.random()*2.-1.
+            y = random.random()*2.-1.
+            if x*x + y*y <= 1:
+                break
+        yield x,y
+
+
+# End of file
diff --git a/extra/fit_functions/corana.py b/extra/fit_functions/corana.py
new file mode 100644
index 0000000..a1f8d23
--- /dev/null
+++ b/extra/fit_functions/corana.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+"""
+Corana's function
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+from math import pow, floor, copysign
+
+
+def corana1d(x):
+    """Corana in 1D; coeffs = (x,0,0,0)"""
+    return corana4d([x[0], 0, 0, 0])
+
+def corana2d(x):
+    """Corana in 2D; coeffs = (x,0,y,0)"""
+    return corana4d([x[0], 0, x[1], 0])
+
+def corana3d(x):
+    """Corana in 3D; coeffs = (x,0,y,z)"""
+    return corana4d([x[0], 0, x[1], x[2]])
+
+def corana4d(x):
+    """
+    evaluates the Corana function on [x0,x1,x2,x3]
+
+    minimum is f(x)=0.0 at xi=0.0
+    """
+    d = [1., 1000., 10., 100.]
+    r = 0
+    for xj,dj in zip(x,d):
+        zj =  floor( abs(xj/0.2) + 0.49999 ) * copysign(0.2,xj)
+        if abs(xj-zj) < 0.05:
+            r += 0.15 * pow(zj - copysign(0.05,zj), 2) * dj
+        else:
+            r += dj * xj * xj
+    return r
+
+# End of file
diff --git a/extra/fit_functions/dejong.py b/extra/fit_functions/dejong.py
new file mode 100755
index 0000000..71d7603
--- /dev/null
+++ b/extra/fit_functions/dejong.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+"""
+Rosenbrock's function, De Jong's step function, De Jong's quartic function,
+and Shekel's function
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+from six.moves import reduce
+
+from numpy import sum as numpysum
+from numpy import asarray
+from math import floor
+import random
+from math import pow
+
+def rosenbrock(x):
+    """
+    Rosenbrock function:
+
+    A modified second De Jong function, Equation (18) of [2]
+
+    minimum is f(x)=0.0 at xi=1.0
+    """
+    #ensure that there are 2 coefficients
+    assert len(x) >= 2
+    x = asarray(x)
+    return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
+
+def step(x):
+    """
+    De Jong's step function:
+
+    The third De Jong function, Equation (19) of [2]
+
+    minimum is f(x)=0.0 at xi=-5-n where n=[0.0,0.12]
+    """
+    f = 30.
+    for c in x:
+        if abs(c) <= 5.12:
+            f += floor(c)
+        elif c > 5.12:
+            f += 30 * (c - 5.12)
+        else:
+            f += 30 * (5.12 - c)
+    return f
+
+def quartic(x):
+    """
+    De Jong's quartic function:
+    The modified fourth De Jong function, Equation (20) of [2]
+
+    minimum is f(x)=random, but statistically at xi=0
+    """
+    f = 0.
+    for j, c in enumerate(x):
+        f += pow(c,4) * (j+1.0) + random.random()
+    return f
+
+
+def shekel(x):
+    """
+    Shekel: The modified fifth De Jong function, Equation (21) of [2]
+
+    minimum is f(x)=0.0 at x(-32,-32)
+    """
+
+    A = [-32., -16., 0., 16., 32.]
+    a1 = A * 5
+    a2 = reduce(lambda x1,x2: x1+x2, [[c] * 5 for c in A])
+
+    x1,x2 = x
+    r = 0.0
+    for i in range(25):
+        r += 1.0/ (1.0*i + pow(x1-a1[i],6) + pow(x2-a2[i],6) + 1e-15)
+    return 1.0/(0.002 + r)
diff --git a/extra/fit_functions/fosc3d.py b/extra/fit_functions/fosc3d.py
new file mode 100644
index 0000000..d52e6cd
--- /dev/null
+++ b/extra/fit_functions/fosc3d.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+"""
+the fOsc3D Mathematica function
+
+References::
+    [4] Mathematica guidebook
+"""
+from math import sin, exp
+
+def fOsc3D(x,y):
+    """
+    fOsc3D Mathematica function:
+
+    fOsc3D[x_,y_] := -4 Exp[(-x^2 - y^2)] + Sin[6 x] Sin[5 y]
+
+    minimum?
+    """
+
+    func =  -4. * exp( -x*x - y*y ) + sin(6. * x) * sin(5. *y)
+    penalty = 100.*y*y if y<0 else 0
+    return func + penalty
diff --git a/extra/fit_functions/griewangk.py b/extra/fit_functions/griewangk.py
new file mode 100644
index 0000000..8ace723
--- /dev/null
+++ b/extra/fit_functions/griewangk.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+"""
+Griewangk's function
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+from six.moves import reduce
+
+from math import cos, sqrt
+
+def griewangk(coeffs):
+    """
+    Griewangk function: a multi-minima function, Equation (23) of [2]
+
+    minimum is f(x)=0.0 at xi=0.0
+    """
+
+    # ensure that there are 10 coefficients
+    x = [0]*10
+    x[:len(coeffs)]=coeffs
+
+    term1 = sum([xi*xi for xi in x])/4000
+    term2 = prod([cos(xi/sqrt(i+1.)) for i,xi in enumerate(x)])
+    return term1 - term2 + 1
+
+prod = lambda x: reduce(lambda a,b: a*b, x, 1.)
\ No newline at end of file
diff --git a/extra/fit_functions/lorentzian.py b/extra/fit_functions/lorentzian.py
new file mode 100755
index 0000000..f867224
--- /dev/null
+++ b/extra/fit_functions/lorentzian.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+"""
+Lorentzian peak model
+
+References::
+    None
+"""
+
+from numpy import array, pi, asarray, arange, sqrt
+from numpy import random
+
+
+def lorentzian(E, Io, Eo, Gamma, A=0, B=0, C=0):
+    """
+    lorentzian with quadratic background::
+
+        I = Io  (Gamma/2*pi) / ( (E-Eo)^2 + (Gamma/2)^2 ) + (A + B E + C E^2)
+    """
+    E = asarray(E) # force E to be a numpy array
+    return (A + (B + C*E)*E + Io * (Gamma/2/pi) / ( (E-Eo)**2 + (Gamma/2)**2 ))
+
+
+def simulate_events(params,xmin,xmax,npts=4000):
+    """Generate a lorentzian dataset of npts between [min,max] from given params"""
+    def gensample(F, xmin, xmax):
+        a = arange(xmin, xmax, (xmax-xmin)/200.)
+        ymin = 0
+        ymax = F(a).max()
+        while 1:
+            t1 = random.random() * (xmax-xmin) + xmin
+            t2 = random.random() * (ymax-ymin) + ymin
+            t3 = F(t1)
+            if t2 < t3:
+                return t1
+    fwd = lambda x: lorentzian(x, **params)
+    return array([gensample(fwd, xmin,xmax) for _ in range(npts)])
+
+def simulate_histogram(pars, Emin, Emax, dE, npts=4000):
+    events = simulate_events(pars, Emin, Emax)
+    E,I = histogram(events, dE, Emin, Emax)
+    #print min(events),max(events)
+    dI = sqrt(I)
+    data = { 'x': (E[1:]+E[:-1])/2., 'y': I, 'dy': dI }
+    return data
+
+def demo_data():
+    # integrated intensity = 4000
+    # center = 6400
+    # width gamma = 180
+    # background = -(E-6340)^2/1000 + 10
+    bgC = 6340.
+    bgW = 10000.
+    A = 35 - bgC**2/bgW
+    B = 2.*bgC/bgW
+    C = -1./bgW
+    #A,B,C = 0,0,0
+    N = 4000
+    pars = { 'Eo': 6500.0, 'Gamma': 180.0, 'Io': 20*N, 'A': A, 'B': B, 'C': C }
+    Emin,Emax = 6000, 6700
+    dE = 20
+
+    return pars, simulate_histogram(pars, Emin, Emax, dE, N)
+
+
+# probably shouldn't be in here...
+from numpy import histogram as numpyhisto
+def histogram(data,binwidth,xmin,xmax):
+    """
+    generate bin-centered histogram of provided data
+
+    return bins of given binwidth (and histogram) generated between [xmin,xmax]
+    """
+    edges = arange(xmin,xmax+binwidth*0.9999999, binwidth)
+    centers = edges + (0.5 * binwidth)
+    histo,_ = numpyhisto(data, bins=edges)
+    #print data.size, sum(histo), edges[0], edges[-1], min(data),max(data)
+    return centers, histo
+
+coeff, data = demo_data()
+
+def demo():
+    import pylab
+    x,y,dy = data['x'],data['y'],data['dy']
+    A,B,C = coeff['A'],coeff['B'],coeff['C']
+    Io,Eo,Gamma = coeff['Io'],coeff['Eo'],coeff['Gamma']
+    pylab.errorbar(x, y, yerr=dy, label="data")
+    pylab.plot(x, pylab.polyval([C,B,A], x), label="background")
+    pylab.plot(x, lorentzian(x,Eo=Eo,Io=Io,Gamma=Gamma), label="peak")
+    pylab.plot(x, lorentzian(x,**coeff), label="peak+bkg")
+    pylab.legend()
+    pylab.show()
+
+if __name__ == "__main__": demo()
diff --git a/extra/fit_functions/mogi.py b/extra/fit_functions/mogi.py
new file mode 100755
index 0000000..4aa059a
--- /dev/null
+++ b/extra/fit_functions/mogi.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+"""
+Mogi's model of surface displacements from a point spherical source in an
+elastic half space
+
+References::
+    [3] Mogi, K. Relations between the eruptions of various
+    volcanoes and the deformations of the ground surfaces around them,
+    Bull. Earthquake. Res. Inst., 36, 99-134, 1958.
+"""
+
+from numpy import array, pi
+
+def mogi(data, x0, y0, z0, dV):
+    """
+    Computes surface displacements Ux, Uy, Uz in meters from a point spherical
+    pressure source in an elastic half space [3].
+
+    evaluate a single Mogi peak over a 2D (2 by N) numpy array of evalpts,
+    where coeffs = (x0,y0,z0,dV)
+    """
+    dx = data[0,:] - x0
+    dy = data[1,:] - y0
+    dz = 0 - z0
+    c = dV * 3. / 4. * pi
+    # or equivalently c= (3/4) a^3 dP / rigidity
+    # where a = sphere radius, dP = delta Pressure
+    r2 = dx*dx + dy*dy + dz*dz
+    C = c / pow(r2, 1.5)
+    return array((C*dx,C*dy,C*dz))
+
diff --git a/extra/fit_functions/poly.py b/extra/fit_functions/poly.py
new file mode 100755
index 0000000..a9c7eb8
--- /dev/null
+++ b/extra/fit_functions/poly.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+"""
+1d model representation for polynomials
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+
+from numpy import sum as numpysum
+from numpy import polyval
+
+def poly(x, c):
+    return polyval(c,x)
+
+# coefficients for specific Chebyshev polynomials
+chebyshev8coeffs = [128., 0., -256., 0., 160., 0., -32., 0., 1.]
+chebyshev16coeffs = [32768., 0., -131072., 0., 212992., 0., -180224., 0., 84480., 0., -21504., 0., 2688., 0., -128., 0., 1]
+
+class Chebyshev(Polynomial):
+    """Chebyshev polynomial models and functions,
+including specific methods for T8(z) & T16(z), Equation (27-33) of [2]
+
+NOTE: default is T8(z)"""
+
+    def __init__(self,order=8,name='poly',metric=lambda x: numpysum(x*x),sigma=1.0):
+        Polynomial.__init__(self,name,metric,sigma)
+        if order == 8:  self.coeffs = chebyshev8coeffs
+        elif order == 16:  self.coeffs = chebyshev16coeffs
+        else: raise NotImplementedError("provide self.coeffs 'by hand'")
+        return
+
+    def cost(self,trial,M=61):
+        """The costfunction for order-n Chebyshev fitting.
+M evaluation points between [-1, 1], and two end points"""# % (len(self.coeffs)-1)
+        #XXX: throw error when len(trial) != len(self.coeffs) ?
+        myCost = chebyshevcostfactory(self.coeffs)
+        return myCost(trial,M)
+
+    pass
+
+# faster implementation
+def chebyshevcostfactory(target):
+    def chebyshevcost(trial,M=61):
+        """The costfunction for order-n Chebyshev fitting.
+M evaluation points between [-1, 1], and two end points"""
+
+        result=0.0
+        x=-1.0
+        dx = 2.0 / (M-1)
+        for i in range(M):
+            px = polyeval(trial, x)
+            if px<-1 or px>1:
+                result += (1 - px) * (1 - px)
+            x += dx
+
+        px = polyeval(trial, 1.2) - polyeval(target, 1.2)
+        if px<0: result += px*px
+
+        px = polyeval(trial, -1.2) - polyeval(target, -1.2)
+        if px<0: result += px*px
+
+        return result
+    return chebyshevcost
diff --git a/extra/fit_functions/wavy.py b/extra/fit_functions/wavy.py
new file mode 100644
index 0000000..80fe080
--- /dev/null
+++ b/extra/fit_functions/wavy.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+"""
+simple sine-based multi-minima functions
+
+References::
+    None.
+"""
+
+from numpy import absolute as abs
+from numpy import asarray
+from numpy import sin, pi
+
+
+def wavy1(x):
+    """
+    Wave function #1: a simple multi-minima function
+
+    minimum is f(x)=0.0 at xi=0.0
+    """
+    x = asarray(x)
+    return abs(x+3.*sin(x+pi)+pi)
+
+def wavy2(x):
+    """
+    Wave function #2: a simple multi-minima function
+
+    minimum is f(x)=0.0 at xi=0.0
+    """
+
+    x = asarray(x)
+    return 4 *sin(x)+sin(4*x) + sin(8*x)+sin(16*x)+sin(32*x)+sin(64*x)
diff --git a/extra/fit_functions/zimmermann.py b/extra/fit_functions/zimmermann.py
new file mode 100644
index 0000000..97b4b32
--- /dev/null
+++ b/extra/fit_functions/zimmermann.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+"""
+Zimmermann's function
+
+References::
+    [1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
+    Heuristic for Global Optimization over Continuous Spaces. Journal of Global
+    Optimization 11: 341-359, 1997.
+
+    [2] Storn, R. and Price, K.
+    (Same title as above, but as a technical report.)
+    http://www.icsi.berkeley.edu/~storn/deshort1.ps
+"""
+
+def zimmermann(x):
+    """
+    Zimmermann function: a non-continuous function, Equation (24-26) of [2]
+
+    minimum is f(x)=0.0 at x=(7.0,2.0)
+    """
+
+    x0, x1 = x #must provide 2 values (x0,y0)
+    f8 = 9 - x0 - x1
+    c0,c1,c2,c3 = 0,0,0,0
+    if x0 < 0: c0 = -100 * x0
+    if x1 < 0: c1 = -100 * x1
+    xx =  (x0-3.)*(x0-3) + (x1-2.)*(x1-2)
+    if xx > 16: c2 = 100 * (xx-16)
+    if x0 * x1 > 14: c3 = 100 * (x0*x1-14.)
+    return max(f8,c0,c1,c2,c3)
diff --git a/extra/installer-hooks/hook-bumps.py b/extra/installer-hooks/hook-bumps.py
new file mode 100644
index 0000000..c1c200f
--- /dev/null
+++ b/extra/installer-hooks/hook-bumps.py
@@ -0,0 +1,11 @@
+from bumps.gui.resources import resources as gui_resources
+
+# The exported API names are not used within the bumps package, so they
+# need to be listed explicitly.
+hiddenimports = ['names']
+
+# Convert [(dir, [f1,f2, ...]), ...] returned by gui_resource.data_files()
+# into [(f1, dir), (f2, dir), ...] expected for datas.
+datas = [ (fname,dirname)
+          for dirname, filelist in gui_resources.data_files()
+          for fname in filelist ]
diff --git a/extra/installer-hooks/hook-scipy.special._ufuncs.py b/extra/installer-hooks/hook-scipy.special._ufuncs.py
new file mode 100644
index 0000000..9957f93
--- /dev/null
+++ b/extra/installer-hooks/hook-scipy.special._ufuncs.py
@@ -0,0 +1 @@
+hiddenimports = ['_ufuncs_cxx']
diff --git a/extra/jobqueue/README b/extra/jobqueue/README
new file mode 100644
index 0000000..77c035b
--- /dev/null
+++ b/extra/jobqueue/README
@@ -0,0 +1,5 @@
+Simple RESTful service api to a job queue
+
+Requires the following external packages:
+
+* flask
diff --git a/extra/jobqueue/__init__.py b/extra/jobqueue/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/extra/jobqueue/client.py b/extra/jobqueue/client.py
new file mode 100644
index 0000000..b8c886b
--- /dev/null
+++ b/extra/jobqueue/client.py
@@ -0,0 +1,147 @@
+import time
+import json
+from . import rest
+
+json_content = 'application/json'
+
+class Connection(object):
+    def __init__(self, url):
+        self.rest = rest.Connection(url)
+
+    def jobs(self, status=None):
+        """
+        List jobs on the server according to status.
+        """
+        if status is None:
+            response = self.rest.get('/jobs.json')
+        else:
+            response = self.rest.get('/jobs/%s.json'%status.lower())
+        return _process_response(response)['jobs']
+
+    def submit(self, job):
+        """
+        Submit a job to the server.
+        """
+        body = json.dumps(job)
+        response = self.rest.post('/jobs.json',
+                                  mimetype=json_content,
+                                  body=body)
+        return _process_response(response)
+
+    def info(self, id):
+        """
+        Return the job structure associated with id.
+
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+        """
+        response = self.rest.get('/jobs/%s.json'%id)
+        return _process_response(response)
+
+    def status(self, id):
+        """
+        Return the job structure associated with id.
+
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+        """
+        response = self.rest.get('/jobs/%s/status.json'%id)
+        return _process_response(response)
+
+    def output(self, id):
+        """
+        Return the result from processing the job.
+
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+
+        Check response['status'] for 'COMPLETE','CANCEL','ERROR', etc.
+        """
+        response = self.rest.get('/jobs/%s/results.json'%id)
+        return _process_response(response)
+
+    def wait(self, id, pollrate=300, timeout=60*60*24):
+        """
+        Wait for job to complete, returning output.
+
+        *pollrate* is the number of seconds to sleep between checks
+        *timeout* is the maximum number of seconds to wait
+
+        Raises IOError if the timeout is exceeded.
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+        """
+        start = time.clock()
+        while True:
+            results = self.output(id)
+            #print "waiting: result is",results
+            if results['status'] in ('PENDING', 'ACTIVE'):
+                #print "waiting for job %s"%id
+                if time.clock() - start > timeout:
+                    raise IOError('job %s is still pending'%id)
+                time.sleep(pollrate)
+            else:
+                #print "status for %s is"%id,results['status'],'- wait complete'
+                return results
+
+    def stop(self, id):
+        """
+        Stop the job.
+
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+        """
+        response = self.rest.post('/jobs/%s?action=stop'%id)
+        return _process_response(response)
+
+    def delete(self, id):
+        """
+        Delete the job and all associated files.
+
+        Raises ValueError if job not found.
+        Raises IOError if communication error.
+        """
+        response = self.rest.delete('/jobs/%s.json'%id)
+        return _process_response(response)
+
+    def nextjob(self, queue):
+        """
+        Fetch the next job to process from the queue.
+        """
+        # TODO: combine status check and prefetch to reduce traffic
+        # TODO: worker sends active and pending jobs so we can load balance
+        body = json.dumps({'queue': queue})
+        response = self.rest.post('/jobs/nextjob.json',
+                                  mimetype=json_content,
+                                  body=body)
+        return _process_response(response)
+
+    def postjob(self, queue, id, results, files):
+        """
+        Return results from a processed job.
+        """
+        # TODO: sign request
+        fields = {'queue': queue, 'results': json.dumps(results)}
+        response = self.rest.postfiles('/jobs/%s/postjob'%id,
+                                       files=files,
+                                       fields=fields)
+        return _process_response(response)
+
+    def putfiles(self, id, files):
+        # TODO: sign request
+        response = self.rest.putfiles('/jobs/%s/data/'%id,
+                                      files=files)
+        return _process_response(response)
+
+def _process_response(response):
+    headers, body = response
+    #print "response",response[body]
+    if headers['status'] == '200':
+        return json.loads(body)
+    else:
+        err = headers['status']
+        msg = rest.RESPONSE.get(err,("Unknown","Unknown code"))[1]
+        raise IOError("server response %s %s"%(err,msg))
+
+def connect(url):
+    return Connection(url)
diff --git a/extra/jobqueue/daemon.py b/extra/jobqueue/daemon.py
new file mode 100644
index 0000000..9861861
--- /dev/null
+++ b/extra/jobqueue/daemon.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-15 -*-
+"""
+Disk And Execution MONitor (Daemon)
+
+daemonize()
+  Detach the current process from the calling terminal so it can
+  run as a service.
+startstop()
+  Check the first argument to the program.
+
+     ========  ======
+     argument  action
+     ========  ======
+     start     start the daemon
+     stop      stop the daemon
+     restart   stop the daemon if running, then start it again
+     status    display running status
+     run       run but not in daemon mode
+     ========  ======
+
+Configurable daemon behaviors:
+
+1. The current working directory set to the "/" directory.
+2. The current file creation mode mask set to 0.
+3. Close all open files (1024).
+4. Redirect standard I/O streams to "/dev/null".
+
+Almost none of this is necessary (or advisable) if your daemon
+is being started by inetd. In that case, stdin, stdout and stderr are
+all set up for you to refer to the network connection, and the fork()s
+and session manipulation should not be done (to avoid confusing inetd).
+Only the chdir() and umask() steps remain as useful.
+
+References
+==========
+
+UNIX Programming FAQ 1.7 How do I get my program to act like a daemon?
+http://www.steve.org.uk/Reference/Unix/faq_2.html
+
+Advanced Programming in the Unix Environment
+W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
+
+History
+=======
+
+* 2001/07/10 by J�rgen Hermann
+* 2002/08/28 by Noah Spurrier
+* 2003/02/24 by Clark Evans
+* 2005/10/03 by Chad J. Schroeder
+* 2008/11/05 by Paul Kienzle
+* 2009/03/15 by Paul Kienzle (restructured text; fix links)
+
+Based on http://code.activestate.com/recipes/66012/
+"""
+
+# TODO: generalize for windows and os x
+#
+# Need to extend daemon to handle Windows and OS X.
+#
+# startstop should be renamed control
+# control should take additional commands for install/remove, presumably
+# by querying the filename of the caller, assuming we can track that.
+#
+# Should support control from xinetd as well as init.d.  The run command
+# may do so already.
+#
+# Mac OS 10.4 and above uses launchd.  The service will need an info.plist
+# to describe the interactions and the script to run.
+#
+# Windows services can be created and manipulated from python, as shown in::
+#    http://essiene.blogspot.com/2005/04/python-windows-services.html
+#    http://code.activestate.com/recipes/59872/
+#    http://code.activestate.com/recipes/551780/
+#
+# Need to isolate other system dependencies such as /var/log and /var/run
+# so the caller doesn't care where the system normally puts the services.
+# Should be able to run the service as a user, so first try putting files
+# in the traditional place, and if that doesn't work, put them in ~/.service
+
+import sys, os, time, errno
+from signal import SIGTERM
+
+UMASK = 0     # Default to completely private files
+WORKDIR = "/" # Default to running in '/'
+MAXFD = 1024  # Maximum number of file descriptors
+
+if hasattr(os, "devnull"):
+    REDIRECT_TO = os.devnull
+else:
+    REDIRECT_TO = "/dev/null"
+
+# sys.exit() or os._exit()?
+# _exit is like exit(), but it doesn't call any functions registered
+# with atexit (and on_exit) or any registered signal handlers.  It also
+# closes any open file descriptors.  Using exit() may cause all stdio
+# streams to be flushed twice and any temporary files may be unexpectedly
+# removed.  It's therefore recommended that child branches of a fork()
+# and the parent branch(es) of a daemon use _exit().
+exit = os._exit
+
+def _close_all():
+    """
+    Close all open file descriptors.  This prevents the child from keeping
+    open any file descriptors inherited from the parent.
+    """
+
+    # There is a variety of methods to accomplish this task.
+    #
+    # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
+    # number of open file descriptors to close.  If it doesn't exists, use
+    # the default value (configurable).
+    #
+    # try:
+    #    maxfd = os.sysconf("SC_OPEN_MAX")
+    # except (AttributeError, ValueError):
+    #    maxfd = MAXFD
+    #
+    # OR
+    #
+    # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
+    #    maxfd = os.sysconf("SC_OPEN_MAX")
+    # else:
+    #    maxfd = MAXFD
+    #
+    # OR
+    #
+    # Use the getrlimit method to retrieve the maximum file descriptor number
+    # that can be opened by this process.  If there is not limit on the
+    # resource, use the default value.
+
+    import resource              # Resource usage information.
+    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+    if maxfd == resource.RLIM_INFINITY:
+        maxfd = MAXFD
+
+    # Iterate through and close all the file descriptors
+    for fd in range(0, maxfd):
+        try: os.close(fd)
+        except OSError: pass # (ignored) fd wasn't open to begin with
+
+
+def daemonize(stdout=REDIRECT_TO, stderr=None, stdin=REDIRECT_TO,
+              pidfile=None, startmsg = 'started with pid %s' ):
+    """
+    This forks the current process into a daemon.
+
+    The stdin, stdout, and stderr arguments are file names that will
+    be opened and be used to replace the standard file descriptors
+    in sys.stdin, sys.stdout, and sys.stderr.
+    These arguments are optional and default to /dev/null.
+
+    Note that stderr is opened unbuffered, so if it shares a file with
+    stdout then interleaved output may not appear in the order you expect.
+    """
+
+    # Fork a child process so the parent can exit.  This returns control to
+    # the command-line or shell.  It also guarantees that the child will not
+    # be a process group leader, since the child receives a new process ID
+    # and inherits the parent's process group ID.  This step is required
+    # to insure that the next call to os.setsid is successful.
+    try:
+        pid = os.fork()
+        if pid > 0: exit(0) # Exit first parent.
+    except OSError as e:
+        raise Exception("[%d] %s" % (e.errno, e.strerror))
+
+    # Decouple from parent environment.
+    os.chdir(WORKDIR)  # Make sure we are not holding a directory open
+    os.umask(UMASK)    # Clear the inherited mask
+    os.setsid()        # Make this the session leader
+
+    # Fork a second child and exit immediately to prevent zombies.  This
+    # causes the second child process to be orphaned, making the init
+    # process responsible for its cleanup.  And, since the first child is
+    # a session leader without a controlling terminal, it's possible for
+    # it to acquire one by opening a terminal in the future (System V-
+    # based systems).  This second fork guarantees that the child is no
+    # longer a session leader, preventing the daemon from ever acquiring
+    # a controlling terminal.
+    try:
+        pid = os.fork()
+        if pid > 0: exit(0) # Exit second parent.
+    except OSError as e:
+        raise Exception("[%d] %s" % (e.errno, e.strerror))
+
+    # Save pid
+    pid = str(os.getpid())
+    if pidfile:  # Make sure pidfile is written cleanly before close_all
+        fd = open(pidfile,'w+')
+        fd.write("%s\n" % pid)
+        fd.flush()
+        fd.close()
+
+    # Print start message and flush output
+    if startmsg: sys.stderr.write("\n%s\n" % startmsg % pid)
+    sys.stdout.flush()
+    sys.stderr.flush()
+
+    # Close all but the standard file descriptors.
+    #_close_all()  # hmmm...interferes with file output selection
+
+    # Redirect standard file descriptors.
+    if not stderr: stderr = stdout
+    fin = file(stdin, "r")
+    fout = file(stdout, "a+")
+    ferr = file(stderr, "a+")
+    os.dup2(fin.fileno(), 0)
+    os.dup2(fout.fileno(), 1)
+    os.dup2(ferr.fileno(), 2)
+
+def readpid(pidfile):
+    try:
+        pf  = file(pidfile,'r')
+        pid = int(pf.read().strip())
+        pf.close()
+    except IOError:
+        pid = None
+    return pid
+
+def process_is_running(pid):
+    """
+    Check if the given process is running.
+
+    Note that this just checks that the pid is in use; since process
+    ids can be reused, this isn't a reliable test.
+    """
+    # Signal the process with 0.  If successful or if fails because
+    # you don't have permission, then the process is alive otherwise
+    # the process is dead.
+    try:
+        os.kill(pid, 0)
+        return 1
+    except OSError as err:
+        return err.errno == errno.EPERM
+
+def startstop(stdout=REDIRECT_TO, stderr=None, stdin=REDIRECT_TO,
+              pidfile='pid.txt', startmsg = 'started with pid %s' ):
+    """
+    Process start/stop/restart/status/run commands.
+
+    Start/stop/restart allow the process to be used as an init.d service.
+    Run runs the process without daemonizing, e.g., from inittab.
+    """
+    if len(sys.argv) > 1:
+        action = sys.argv[1]
+        pid = readpid(pidfile)
+        if 'stop' == action or ('restart' == action and pid):
+            if not pid:
+                msg = "Could not stop, pid file '%s' missing."%pidfile
+                sys.stderr.write('%s\n'%msg)
+                sys.exit(1)
+
+            try:
+                while 1:
+                    os.kill(pid,SIGTERM)
+                    time.sleep(1)
+            except OSError as err:
+                err = str(err)
+                if err.find("No such process") > 0:
+                    os.remove(pidfile)
+                    if 'stop' == action:
+                        sys.exit(0)
+                    # Fall through to the start action
+                    pid = None
+                else:
+                    raise  # Reraise if it is not a "No such process" error
+
+        if action in ['start', 'restart', 'run']:
+            if pid:
+                msg = "Start aborted since pid file '%s' exists."%pidfile
+                sys.stderr.write('%s\n'%msg)
+                sys.exit(1)
+            # Only return when we are ready to run the main program
+            # Otherwise use sys.exit to end.
+            if action != 'run':
+                daemonize(stdout=stdout,stderr=stderr,stdin=stdin,
+                          pidfile=pidfile,startmsg=startmsg)
+            return
+
+        if action  == 'status':
+            if not pid:
+                status='stopped'
+            elif not process_is_running(pid):
+                status="failed, but pid file '%s' still exists."%pidfile
+            else:
+                status='running with pid %d'%pid
+            sys.stderr.write('Status: %s\n'%status)
+            sys.exit(0)
+
+    print("usage: %s start|stop|restart|status|run" % sys.argv[0])
+    sys.exit(2)
+
+def test():
+    """
+    This is an example main function run by the daemon.
+    This prints a count and timestamp once per second.
+    """
+    sys.stdout.write ('Message to stdout...\n')
+    sys.stderr.write ('Message to stderr...\n')
+    c = 0
+    while 1:
+        sys.stdout.write ('%d: %s\n' % (c, time.ctime(time.time())) )
+        sys.stdout.flush()
+        c = c + 1
+        time.sleep(1)
+
+if __name__ == "__main__":
+    startstop(stdout='/tmp/daemon.log', pidfile='/tmp/daemon.pid')
+    test()
diff --git a/extra/jobqueue/db.py b/extra/jobqueue/db.py
new file mode 100644
index 0000000..5fa992e
--- /dev/null
+++ b/extra/jobqueue/db.py
@@ -0,0 +1,112 @@
+import os.path
+
+from datetime import datetime
+
+import sqlalchemy as db
+from sqlalchemy import Column, ForeignKey, Sequence
+from sqlalchemy import String, Integer, DateTime, Float, Enum
+from sqlalchemy.orm import relationship, sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+DB_URI = 'sqlite:///'+os.path.expanduser('~/.jobqueue.db')
+DEBUG = False
+
+Record = declarative_base()
+Session = sessionmaker(autocommit=False)
+def connect():
+    engine = db.create_engine(DB_URI, echo=DEBUG)
+    Record.metadata.create_all(engine)
+    Session.configure(bind=engine)
+
+# Job status enum
+STATUS = ['PENDING','ACTIVE','CANCEL','COMPLETE','ERROR','DELETE']
+
+class Job(Record):
+    """
+    *id* : Integer
+        Unique id for the job
+    *name* : String(80)
+        Job name as specified by the user.  This need not be unique.
+    *origin* : String(45)
+        IP address originating the request
+    *date* : DateTime utc
+        Request submission time
+    *start* : DateTime utc
+        Time the request was processed
+    *stop* : DateTime utc
+        Time the request was completed
+    *priority* : Float
+        Priority level for the job
+    *notify* : String(254)
+        Email/twitter notification address
+    *status* : PENDING|ACTIVE|CANCEL|COMPLETE|ERROR
+        Job status
+
+    The job request, result and any supplementary information are
+    stored in the directory indicated by jobid.
+    """
+
+    __tablename__ = 'jobs'
+
+    id = Column(Integer, Sequence('jobid_seq'), primary_key=True)
+    name = Column(String(80))
+    origin = Column(String(45)) # <netinet/in.h> #define INET6_ADDRSTRLEN 46
+    date = Column(DateTime, default=datetime.utcnow, index=True)
+    start = Column(DateTime)
+    stop = Column(DateTime)
+    priority = Column(Float, index=True)
+    notify = Column(String(254)) # RFC 3696 errata 1690: max email=254
+    status = Column(Enum(*STATUS, name="status_enum"), index=True)
+
+    def __init__(self, name, origin, notify, priority):
+        self.status = 'PENDING'
+        self.name = name
+        self.origin = origin
+        self.notify = notify
+        self.priority = priority
+
+    def __repr__(self):
+        return "<Job('%s')>" % (self.name)
+
+class ActiveJob(Record):
+    """
+    *id* : Integer
+        Unique id for the job
+    *jobid* : job.id
+        Active job
+    *queue* : String(256)
+        Queue on which the job is running
+    *date* : DateTime utc
+        Date the job was queued
+    """
+    # TODO: split queue into its own table, and create an additional table
+    # TODO: to track how much work is done by each queue
+    __tablename__ = "active_jobs"
+    id = Column(Integer, Sequence('activeid_seq'), primary_key=True)
+    jobid = Column(Integer, ForeignKey(Job.id), unique=True)
+    queue = Column(String(256))
+    date = Column(DateTime, default=datetime.utcnow)
+
+    job = relationship(Job, uselist=False)
+
+    def __init__(self, jobid, queue):
+        self.jobid = jobid
+        self.queue = queue
+
+    def __repr__(self):
+        return "<ActiveJob('%s','%s')>" % (self.job_id, self.queue)
+
+class RemoteQueue(Record):
+    """
+    *id* : Integer
+        Unique id for the remote server
+    *name* : String(80)
+        Name of the remote server
+    *public_key* : String(80)
+        Public key for the remote server
+    """
+    __tablename__ = "remote_queues"
+    id = Column(Integer, Sequence('remotequeueid_seq'),
+                   primary_key=True)
+    name = Column(String(80))
+    public_key = Column(String(80))
diff --git a/extra/jobqueue/dispatcher.py b/extra/jobqueue/dispatcher.py
new file mode 100644
index 0000000..a9f043e
--- /dev/null
+++ b/extra/jobqueue/dispatcher.py
@@ -0,0 +1,189 @@
+
+from datetime import datetime, timedelta
+import logging
+
+from sqlalchemy import and_, or_, func, select
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm.exc import NoResultFound
+
+from . import runjob, store, db, notify
+from .db import Job, ActiveJob
+
+class Scheduler(object):
+    def __init__(self):
+        db.connect()
+
+    def jobs(self, status=None):
+        session = db.Session()
+        if status:
+            jobs = (session.query(Job)
+                .filter(Job.status==status)
+                .order_by(Job.priority)
+                )
+        else:
+            jobs = (session.query(Job)
+                .order_by(Job.priority)
+                )
+        return [j.id for j in jobs]
+    def submit(self, request, origin):
+        session = db.Session()
+        # Find number of jobs for the user in the last 30 days
+        n = (session.query(Job)
+            .filter(or_(Job.notify==request['notify'],Job.origin==origin))
+            .filter(Job.date >= datetime.utcnow() - timedelta(30))
+            .count()
+            )
+        #print "N",n
+        job = Job(name=request['name'],
+                  notify=request['notify'],
+                  origin=origin,
+                  priority=n)
+        session.add(job)
+        session.commit()
+        store.create(job.id)
+        store.put(job.id,'request',request)
+        return job.id
+
+    def _getjob(self, id):
+        session = db.Session()
+        return session.query(Job).filter(Job.id==id).first()
+
+    def results(self, id):
+        job = self._getjob(id)
+        try:
+            return runjob.results(id)
+        except KeyError:
+            if job:
+                return { 'status': job.status }
+            else:
+                return { 'status': 'UNKNOWN' }
+
+    def status(self, id):
+        job = self._getjob(id)
+        return job.status if job else 'UNKNOWN'
+
+    def info(self,id):
+        request = store.get(id,'request')
+        return request
+
+    def cancel(self, id):
+        session = db.Session()
+        (session.query(Job)
+             .filter(Job.id==id)
+             .filter(Job.status.in_('ACTIVE','PENDING'))
+             .update({ 'status': 'CANCEL' })
+             )
+        session.commit()
+
+    def delete(self, id):
+        """
+        Delete any external storage associated with the job id.  Mark the
+        job as deleted.
+        """
+        session = db.Session()
+        (session.query(Job)
+             .filter(Job.id == id)
+             .update({'status': 'DELETE'})
+             )
+        store.destroy(id)
+
+    def nextjob(self, queue):
+        """
+        Make the next PENDING job active, where pending jobs are sorted
+        by priority.  Priority is assigned on the basis of usage and the
+        order of submissions.
+        """
+        session = db.Session()
+
+        # Define a query which returns the lowest job id of the pending jobs
+        # with the minimum priority
+        _priority = select([func.min(Job.priority)],
+                           Job.status=='PENDING')
+        min_id = select([func.min(Job.id)],
+                        and_(Job.priority == _priority,
+                             Job.status == 'PENDING'))
+
+        for _ in range(10): # Repeat if conflict over next job
+            # Get the next job, if there is one
+            try:
+                job = session.query(Job).filter(Job.id==min_id).one()
+                #print job.id, job.name, job.status, job.date, job.start, job.priority
+            except NoResultFound:
+                return {'request': None}
+
+            # Mark the job as active and record it in the active queue
+            (session.query(Job)
+             .filter(Job.id == job.id)
+             .update({'status': 'ACTIVE',
+                      'start': datetime.utcnow(),
+                      }))
+            activejob = db.ActiveJob(jobid=job.id, queue=queue)
+            session.add(activejob)
+
+            # If the job was already taken, roll back and try again.  The
+            # first process to record the job in the active list wins, and
+            # will change the job status from PENDING to ACTIVE.  Since the
+            # job is no longer pending, the  so this
+            # should not be an infinite loop.  Hopefully if the process
+            # that is doing the transaction gets killed in the middle then
+            # the database will be clever enough to roll back, otherwise
+            # we will never get out of this loop.
+            try:
+                session.commit()
+            except IntegrityError:
+                session.rollback()
+                continue
+            break
+        else:
+            logging.critical('dispatch could not assign job %s'%job.id)
+            raise IOError('dispatch could not assign job %s'%job.id)
+
+        request = store.get(job.id,'request')
+        # No reason to include time; email or twitter does that better than
+        # we can without client locale information.
+        notify.notify(user=job.notify,
+                      msg=job.name+" started",
+                      level=1)
+        return { 'id': job.id, 'request': request }
+
+    def postjob(self, id, results):
+        # TODO: redundancy check, confirm queue, check sig, etc.
+
+        # Update db
+        session = db.Session()
+        (session.query(Job)
+            .filter(Job.id == id)
+            .update({'status': results.get('status','ERROR'),
+                     'stop': datetime.utcnow(),
+                     })
+            )
+        (session.query(ActiveJob)
+            .filter(ActiveJob.jobid == id)
+            .delete())
+        try:
+            session.commit()
+        except:
+            session.rollback()
+
+        # Save results
+        store.put(id,'results',results)
+
+        # Post notification
+        job = self._getjob(id)
+        if job.status == 'COMPLETE':
+            if 'value' in results:
+                status_msg = " ended with %s"%results['value']
+            else:
+                status_msg = " complete"
+        elif job.status == 'ERROR':
+            status_msg = " failed"
+        elif job.status == 'CANCEL':
+            status_msg = " cancelled"
+        else:
+            status_msg = " with status "+job.status
+        # Note: no reason to include time; twitter or email will give it
+        # Plus, doing time correctly requires knowing the locale of the
+        # receiving client.
+        notify.notify(user=job.notify,
+                      msg=job.name+status_msg,
+                      level=2)
diff --git a/extra/jobqueue/jobid.py b/extra/jobqueue/jobid.py
new file mode 100644
index 0000000..cd68474
--- /dev/null
+++ b/extra/jobqueue/jobid.py
@@ -0,0 +1,25 @@
+import os
+
+#from park import config
+JOBID_PATH = os.path.expanduser('~/.jobqueue.job')
+
+FID = None
+CURRENT = None
+def get_jobid():
+    global FID, CURRENT
+    if FID is None:
+        path = JOBID_PATH
+        if not os.path.exists(path):
+            dir = os.path.dirname(path)
+            if not os.path.exists(dir):
+                os.makedirs(dir)
+            FID = open(path,'w+')
+            CURRENT = 0
+        else:
+            FID = open(path,'r+')
+            CURRENT = int(FID.read())
+    CURRENT += 1
+    FID.seek(0)
+    FID.write(str(CURRENT))
+    FID.flush()
+    return str(CURRENT)
diff --git a/extra/jobqueue/mimedict.py b/extra/jobqueue/mimedict.py
new file mode 100644
index 0000000..6f93345
--- /dev/null
+++ b/extra/jobqueue/mimedict.py
@@ -0,0 +1,57 @@
+"""
+    Copyright (C) 2008 Benjamin O'Steen
+
+    This file is part of python-fedoracommons.
+
+    python-fedoracommons is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    python-fedoracommons is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with python-fedoracommons.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt'
+__author__ = "Benjamin O'Steen <bosteen at gmail.com>"
+__version__ = '0.1'
+
+class mimeTypes(object):
+    def getDictionary(self):
+        mimetype_to_extension = {}
+        extension_to_mimetype = {}
+        mimetype_to_extension['text/plain'] = 'txt'
+        mimetype_to_extension['text/xml'] = 'xml'
+        mimetype_to_extension['text/css'] = 'css'
+        mimetype_to_extension['text/javascript'] = 'js'
+        mimetype_to_extension['text/rtf'] = 'rtf'
+        mimetype_to_extension['text/calendar'] = 'ics'
+        mimetype_to_extension['application/msword'] = 'doc'
+        mimetype_to_extension['application/msexcel'] = 'xls'
+        mimetype_to_extension['application/x-msword'] = 'doc'
+        mimetype_to_extension['application/vnd.ms-excel'] = 'xls'
+        mimetype_to_extension['application/vnd.ms-powerpoint'] = 'ppt'
+        mimetype_to_extension['application/pdf'] = 'pdf'
+        mimetype_to_extension['text/comma-separated-values'] = 'csv'
+
+
+        mimetype_to_extension['image/jpeg'] = 'jpg'
+        mimetype_to_extension['image/gif'] = 'gif'
+        mimetype_to_extension['image/jpg'] = 'jpg'
+        mimetype_to_extension['image/tiff'] = 'tiff'
+        mimetype_to_extension['image/png'] = 'png'
+
+        # And hacky reverse lookups
+        for mimetype in mimetype_to_extension:
+            extension_to_mimetype[mimetype_to_extension[mimetype]] = mimetype
+
+        mimetype_extension_mapping = {}
+        mimetype_extension_mapping.update(mimetype_to_extension)
+        mimetype_extension_mapping.update(extension_to_mimetype)
+
+        return mimetype_extension_mapping
diff --git a/extra/jobqueue/notify.py b/extra/jobqueue/notify.py
new file mode 100644
index 0000000..105bc37
--- /dev/null
+++ b/extra/jobqueue/notify.py
@@ -0,0 +1,64 @@
+
+import os
+import logging
+
+TWITTER_KEYS = '~/.ssh/twitter'
+EMAIL_SERVER = 'localhost'
+EMAIL_SENDER = 'bumps at reflectometry.org'
+
+def notify(user, msg, body="No body", level=1):
+    """
+    Send a notfication message to the user regarding the job status.
+    """
+
+    if not user:
+        pass
+    elif user.startswith('@'):
+        tweet(user, msg)
+    elif '@' in user:
+        email(EMAIL_SENDER, [user], body, subject=msg, server=EMAIL_SERVER)
+    else:
+        logging.debug("%s : %s"%(user, msg))
+
+twitter = None
+def tweet(user, msg):
+    global twitter
+    if twitter is None: twitter = open_twitter(TWITTER_KEYS)
+    twitter.direct_messages.new(user=user, text=msg)
+
+def open_twitter(authfile):
+    from twitter import Twitter, OAuth #@UnresolvedImport twitter is optional
+    # Load credentials from authfile
+    for line in open(os.path.expanduser(authfile)).readlines():
+        exec(line)
+    auth = OAuth(access_token, access_secret, consumer_key, consumer_secret) #@UndefinedVariable comes from authfile
+    return Twitter(auth=auth)
+
+
+def email(sender, receivers, message, subject='no subject', server='localhost'):
+    """
+    Send an email message to a group of receivers
+    """
+    import smtplib
+
+    if ':' in server:
+        host,port = server.split(':')
+        port = int(port)
+    else:
+        host,port = server,25
+    header="From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n"
+    header %= sender,", ".join(receivers),subject
+    #print "Sending the following mail message:\n"+header+message
+    #print "Trying to connect to",host,port
+    smtp = smtplib.SMTP(host,port)
+    #print "Connection established"
+    smtp.sendmail(sender,receivers,header+message)
+    #print "Mail sent from",sender,"to",", ".join(receivers)
+    smtp.quit()
+
+
+if __name__ == "__main__":
+    msg = 'test notification'
+    body = 'See http://reflectometry.org for details.'
+    #notify('@pkienzle', msg, body)
+    notify('paknist at gmail.com', msg, body)
diff --git a/extra/jobqueue/resourcelimits.py b/extra/jobqueue/resourcelimits.py
new file mode 100644
index 0000000..6222cb6
--- /dev/null
+++ b/extra/jobqueue/resourcelimits.py
@@ -0,0 +1,43 @@
+"""
+Resource limit handling.
+"""
+
+try:
+    from signal import signal, SIGXCPU, SIG_IGN
+    from resource import setrlimit, RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA
+except:
+    print("limits not supported")
+    RLIMIT_CPU=RLIMIT_FSIZE=RLIMIT_DATA=0
+    SIGXCPU = SIG_IGN = 0
+    def setrlimit(resource,limits): pass
+    def signal(value, handler): pass
+
+class CPULimit(Exception): pass
+
+def _xcpuhandler(signum, frame):
+    signal(SIGXCPU, SIG_IGN)
+    raise CPULimit("CPU time exceeded.")
+
+def cpu_limit(limit=3600*2, recovery_time=120):
+    """
+    Set limit on the amount of CPU time available to the process.
+
+    Raises resourcelimits.CPULimit when the cpu limit is exceeded, after
+    which the program has the number of seconds of recovery time left to
+    clean up.
+    """
+    setrlimit(RLIMIT_CPU, (limit,limit+recovery_time))
+    signal(SIGXCPU, _xcpuhandler)
+
+def disk_limit(limit=1e9):
+    """
+    Sets a maximum file size that can be written for the program.
+    """
+    setrlimit(RLIMIT_FSIZE, (int(limit), int(limit+2e6)))
+    # error caught by normal IO handlers
+
+def memory_limit(limit=2e9):
+    """
+    Sets a maximum memory limit for the program.
+    """
+    setrlimit(RLIMIT_DATA, (int(limit), int(limit+2e6)))
diff --git a/extra/jobqueue/rest.py b/extra/jobqueue/rest.py
new file mode 100644
index 0000000..9e5b77c
--- /dev/null
+++ b/extra/jobqueue/rest.py
@@ -0,0 +1,176 @@
+# This code is in the public domain
+# Author: Paul Kienzle
+
+# TODO: better cache control; some requests shouldn't be cached
+# TODO: compression
+# TODO: streaming file transfer
+# TODO: single/multifile response objects?
+
+# Inspirations:
+#   Upload files in python:
+#     http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
+#   urllib2_file:
+#     Fabien Seisen: <fabien at seisen.org>
+#   MultipartPostHandler:
+#     Will Holcomb <wholcomb at gmail.com>
+#   python-rest-client
+#     Benjamin O'Steen
+from six import StringIO
+from six.moves.urllib import parse
+
+import email
+import httplib2
+import mimetypes
+import uuid
+
+
+
+DEFAULT_CONTENT="application/octet-stream"
+class Connection(object):
+    def __init__(self, url, username=None, password=None):
+        self.url = url
+        http = httplib2.Http() #".cache")
+        http.follow_all_redirects = True
+        if username and password:
+            http.add_credentials(username, password)
+        self.url  = url
+        self.http = http
+
+    def get(self, resource, fields={}):
+        return _request(self.http, 'GET', self.url+resource, fields=fields)
+    def head(self, resource, fields={}):
+        return _request(self.http, 'HEAD', self.url+resource, fields=fields)
+    def post(self, resource, fields={}, body=None, mimetype=DEFAULT_CONTENT):
+        return _request(self.http, 'POST', self.url+resource, fields=fields,
+                        body=body, mimetype=mimetype)
+    def put(self, resource, fields={}, body=None, mimetype=DEFAULT_CONTENT):
+        return _request(self.http, 'PUT', self.url+resource, fields=fields,
+                        body=body, mimetype=mimetype)
+    def postfiles(self, resource, fields={}, files=None):
+        return _request(self.http, 'POST', self.url+resource,
+                        fields=fields, files=files)
+    def putfiles(self, resource, fields={}, files=None):
+        return _request(self.http, 'PUT', self.url+resource,
+                        fields=fields, files=files)
+    def delete(self, resource, fields={}):
+        return _request(self.http, 'DELETE', self.url+resource, fields=fields)
+
+def _request(http, verb, location, fields=None,
+             body=None, mimetype=None, files=None):
+
+    headers = {'User-Agent': 'Basic Agent'}
+    if files:
+        if body:
+            raise TypeError("Use fields instead of body with file upload")
+        # Note: this section is public domain; the old code wasn't working
+        boundary = uuid.uuid4().hex
+        buf = StringIO()
+        for key,value in fields.items():
+            buf.write(u'--%s\r\n'%boundary)
+            buf.write(u'Content-Disposition: form-data; name="%s"' % key)
+            buf.write(u'\r\n\r\n%s\r\n'%value)
+        for key,filename in enumerate(files):
+            content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+            buf.write(u'--%s\r\n'%boundary)
+            buf.write(u'Content-Disposition: form-data; name="file"; filename="%s"\r\n' % filename)
+            buf.write(u'Content-Type: %s\r\n\r\n' % content_type)
+            buf.write(open(filename,'rb').read())
+            buf.write(u'\r\n')
+        buf.write(u'--%s--\r\n'%boundary)
+        body = buf.getvalue()
+        headers['Content-Type'] = 'multipart/form-data; boundary='+boundary
+        headers['Content-Length'] = str(len(body))
+        #print "===== body =====\n",body
+    elif body:
+        if fields:
+            raise TypeError("Body, if included, should encode fields directly.")
+        headers['Content-Type']=mimetype
+        headers['Content-Length'] = str(len(body))
+    elif fields:
+        if verb == "GET":
+            location += u'?' + parse.urlencode(fields)
+            body = u''
+        else:
+            headers['Content-Type'] = 'application/x-www-form-urlencoded'
+            body = parse.urlencode(fields)
+
+    #print "uri",location
+    #print "body",body
+    #print "headers",headers
+    #print "method",verb
+    try:
+        response, content = http.request(location, verb,
+                                         body=body, headers=headers)
+    except AttributeError:
+        raise IOError("Could not open "+location)
+
+    return response, content.decode('UTF-8')
+
+
+# Table mapping response codes to messages; entries have the
+# form {code: (shortmessage, longmessage)}.
+RESPONSE = {
+    100: ('Continue', 'Request received, please continue'),
+    101: ('Switching Protocols',
+          'Switching to new protocol; obey Upgrade header'),
+
+    200: ('OK', 'Request fulfilled, document follows'),
+    201: ('Created', 'Document created, URL follows'),
+    202: ('Accepted',
+          'Request accepted, processing continues off-line'),
+    203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
+    204: ('No Content', 'Request fulfilled, nothing follows'),
+    205: ('Reset Content', 'Clear input form for further input.'),
+    206: ('Partial Content', 'Partial content follows.'),
+
+    300: ('Multiple Choices',
+          'Object has several resources -- see URI list'),
+    301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
+    302: ('Found', 'Object moved temporarily -- see URI list'),
+    303: ('See Other', 'Object moved -- see Method and URL list'),
+    304: ('Not Modified',
+          'Document has not changed since given time'),
+    305: ('Use Proxy',
+          'You must use proxy specified in Location to access this '
+          'resource.'),
+    307: ('Temporary Redirect',
+          'Object moved temporarily -- see URI list'),
+
+    400: ('Bad Request',
+          'Bad request syntax or unsupported method'),
+    401: ('Unauthorized',
+          'No permission -- see authorization schemes'),
+    402: ('Payment Required',
+          'No payment -- see charging schemes'),
+    403: ('Forbidden',
+          'Request forbidden -- authorization will not help'),
+    404: ('Not Found', 'Nothing matches the given URI'),
+    405: ('Method Not Allowed',
+          'Specified method is invalid for this server.'),
+    406: ('Not Acceptable', 'URI not available in preferred format.'),
+    407: ('Proxy Authentication Required', 'You must authenticate with '
+          'this proxy before proceeding.'),
+    408: ('Request Timeout', 'Request timed out; try again later.'),
+    409: ('Conflict', 'Request conflict.'),
+    410: ('Gone',
+          'URI no longer exists and has been permanently removed.'),
+    411: ('Length Required', 'Client must specify Content-Length.'),
+    412: ('Precondition Failed', 'Precondition in headers is false.'),
+    413: ('Request Entity Too Large', 'Entity is too large.'),
+    414: ('Request-URI Too Long', 'URI is too long.'),
+    415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
+    416: ('Requested Range Not Satisfiable',
+          'Cannot satisfy request range.'),
+    417: ('Expectation Failed',
+          'Expect condition could not be satisfied.'),
+
+    500: ('Internal Server Error', 'Server got itself in trouble'),
+    501: ('Not Implemented',
+          'Server does not support this operation'),
+    502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
+    503: ('Service Unavailable',
+          'The server cannot process the request due to a high load'),
+    504: ('Gateway Timeout',
+          'The gateway server did not receive a timely response'),
+    505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
+    }
diff --git a/extra/jobqueue/runjob.py b/extra/jobqueue/runjob.py
new file mode 100644
index 0000000..3523864
--- /dev/null
+++ b/extra/jobqueue/runjob.py
@@ -0,0 +1,87 @@
+import os
+import sys
+import traceback
+
+from . import store
+from . import services
+
+try:
+    from . import resourcelimits
+    def setlimits():
+        resourcelimits.cpu_limit(2*3600,120)
+        resourcelimits.disk_limit(1e9)
+        resourcelimits.memory_limit(2e9)
+except:
+    def setlimits(): pass
+
+
+def build_command(id, request):
+    """
+    Build a script file to run the service and return the command
+    needed to start it.
+
+    The script will be in the job execution path.
+
+    The command includes the python interpreter name of the caller.
+
+    The resulting command can be used from "srun" within the slurm queue.
+    """
+    path = store.path(id)
+    script = """
+import os
+from jobqueue import runjob, store
+#import sys; print "\\n".join(sys.path)
+id = "%s"
+request = store.get(id,"request")
+runjob.run(id, request)
+"""%id
+    scriptfile = os.path.join(path,'runner.py'%id)
+    open(scriptfile,'w').write(script)
+    return sys.executable+" "+scriptfile
+
+def run(id, request):
+    """
+    Load a service and run the request.
+
+
+    """
+    try:
+        result = {
+              'status': 'COMPLETE',
+              'result': _run(id, request),
+              }
+    except:
+        # Trim the traceback to exclude run and _run.
+        exc_type,exc_value,exc_trace = sys.exc_info()
+        relevant_list = traceback.extract_tb(exc_trace)[2:]
+        message = traceback.format_exception_only(exc_type,exc_value)
+        trace = traceback.format_list(relevant_list)
+        result = {
+              'status': 'ERROR',
+              'error': "".join(message).rstrip(),
+              'trace': "".join(trace).rstrip(),
+            }
+    store.put(id,'results',result)
+
+def _run(id, request):
+    # Prepare environment
+    #print "\n".join(sys.path)
+    path = store.path(id)
+    store.create(id)  # Path should already exist, but just in case...
+    os.chdir(path)    # Make sure the program starts in the path
+    sys.stdout = open(os.path.join(path,'stdout.txt'),'w')
+    sys.stderr = open(os.path.join(path,'stderr.txt'),'w')
+    setlimits()
+
+    # Run service
+    service = getattr(services, request['service'], None)
+    if service is None:
+        raise ValueError("service <%s> not available"%request['service'])
+    else:
+        return service(request)
+
+def results(id):
+    results = store.get(id,'results')
+    if results is None:
+        raise RuntimeError("Results for %d cannot be empty"%id)
+    return results
diff --git a/extra/jobqueue/server.py b/extra/jobqueue/server.py
new file mode 100644
index 0000000..06d0d0d
--- /dev/null
+++ b/extra/jobqueue/server.py
@@ -0,0 +1,322 @@
+# TODO: Add /jobs/<id>/data.zip to fetch all files at once in a zip file format
+# TODO: Store completed work in /path/to/store/<id>.zip
+
+import os
+import logging
+import json
+import pickle
+import flask
+from flask import redirect, url_for, flash
+from flask import send_from_directory
+from werkzeug.utils import  secure_filename
+
+from . import store
+
+app = flask.Flask(__name__)
+
+# ==== File upload specialization ===
+# By uploading files into a temporary file provided by store, we
+# can then move the files directly into place on the store rather
+# than copy them.  This gives us reduced storage, reduced memory
+# and reduced CPU.
+class Request(flask.Request):
+    # Upload directly into temporary files.
+    def _get_file_stream(self, total_content_length, content_type,
+                         filename=None, content_length=None):
+        #print "returning named temporary file for",filename
+        return store.tempfile()
+app.request_class = Request
+
+
+# ==== Format download specialization ===
+def _format_response(response, format='json', template=None):
+    """
+    Return response as a particular format.
+    """
+    #print "response",response
+    if format == 'html':
+        if template is None: flask.abort(400)
+        return flask.render_template(template, **response)
+    elif format == 'json':
+        return flask.jsonify(**dict((str(k),v) for k,v in response.items()))
+    elif format == 'pickle':
+        return pickle.dumps(response)
+    else:
+        flask.abort(400) # Bad request
+
+
+ at app.route('/jobs.<format>', methods=['GET'])
+def list_jobs(format='json'):
+    """
+    GET /jobs.<format>
+
+    Return a list of all job ids.
+    """
+    response = dict(jobs=SCHEDULER.jobs())
+    return _format_response(response, format, template='list_jobs.html')
+
+ at app.route('/jobs/<any(u"pending",u"active",u"error",u"complete"):status>.<format>',
+           methods=['GET'])
+def filter_jobs(status, format='json'):
+    """
+    GET /jobs/<pending|active|error|complete>.<format>
+
+    Return all jobs with a particular status.
+    """
+    response = dict(jobs=SCHEDULER.jobs(status=str(status).upper()))
+    return _format_response(response, format, template='list_jobs.html')
+
+ at app.route('/jobs.<format>', methods=['POST'])
+def create_job(format='json'):
+    """
+    POST /jobs.<format>
+
+    Schedule a new job, return the job record.
+
+    The POST data should contain::
+
+        {
+        notify: "<user at email or @twitterid>",
+        service: "<name of service>",
+        version: "<service version>",
+        name: "<request name>",
+        data: "<service data>",
+        ...
+        }
+
+    The response contains::
+
+        {
+        id: <job id>,
+        job: <job details>
+        }
+
+    Job details is simply a copy of the original request.
+
+    """
+    request = flask.request.json #@UndefinedVariable in request proxy
+    if request is None: flask.abort(415) # Unsupported media
+    id = SCHEDULER.submit(request, origin=flask.request.remote_addr) #@UndefinedVariable in request proxy
+    flash('Job %s scheduled' % id)
+    response = {'id': id, 'job': SCHEDULER.info(id)}
+    #return redirect(url_for('show_job', id=id, format=format))
+    return _format_response(response, format=format, template='show_job.html')
+
+ at app.route('/jobs/<int:id>.<format>', methods=['GET'])
+def show_job(id, format='json'):
+    """
+    GET /jobs/<id>.<format>
+
+    Get job record by id.
+
+    The response contains::
+
+        {
+        id: <job id>,
+        job: <job details>
+        }
+
+    Job details is simply a copy of the original request.
+    """
+    response = {'id': id, 'job': SCHEDULER.info(id)}
+    return _format_response(response, format=format, template='show_job.html')
+
+ at app.route('/jobs/<int:id>/results.<format>', methods=['GET'])
+def get_results(id, format='json'):
+    """
+    GET /jobs/<id>/results.<format>
+
+    Get job results by id.
+
+    Returns::
+
+        {
+        id: <job id>
+        status: 'PENDING|ACTIVE|COMPLETE|ERROR|UNKNOWN',
+        result: <job value>     (absent if status != COMPLETE)
+        trace: <error trace>    (absent if status != ERROR)
+        }
+    """
+    response = SCHEDULER.results(id)
+    response['id'] = id
+    #print "returning response",response
+    return _format_response(response, format=format)
+
+ at app.route('/jobs/<int:id>/status.<format>', methods=['GET'])
+def get_status(id, format='json'):
+    """
+    GET /jobs/<id>/status.<format>
+
+    Get job status by id.
+
+    Returns::
+
+        {
+        id: <job id>,
+        status: 'PENDING|ACTIVE|COMPLETE|ERROR|UNKNOWN'
+        }
+    """
+    response = { 'status': SCHEDULER.status(id) }
+    response['id'] = id
+    return _format_response(response, format=format)
+
+
+ at app.route('/jobs/<int:id>.<format>', methods=['DELETE'])
+def delete_job(id, format='json'):
+    """
+    DELETE /jobs/<id>.<format>
+
+    Deletes a job, returning the list of remaining jobs as <format>
+    """
+    SCHEDULER.delete(id)
+    flash('Job %s deleted' % id)
+    response = dict(jobs=SCHEDULER.jobs())
+    return _format_response(response, format=format, template="list_jobs.html")
+    #return redirect(url_for('list_jobs', id=id, format=format))
+
+ at app.route('/jobs/nextjob.<format>', methods=['POST'])
+def fetch_work(format='json'):
+    # TODO: verify signature
+    request = flask.request.json #@UndefinedVariable in request proxy
+    if request is None: flask.abort(415) # Unsupported media
+    job = SCHEDULER.nextjob(queue=request['queue'])
+    return _format_response(job, format=format)
+
+ at app.route('/jobs/<int:id>/postjob', methods=['POST'])
+def return_work(id):
+    # TODO: verify signature corresponds to flask.request.form['queue']
+    # TODO: verify that work not already returned by another client
+    try:
+        #print "decoding <%s>"%flask.request.form['results']
+        results = json.loads(flask.request.form['results']) #@UndefinedVariable in request proxy
+    except:
+        import traceback;
+        logging.error(traceback.format_exc())
+        results = {
+            'status': 'ERROR',
+            'error': 'No results returned from the server',
+            'trace': flask.request.form['results'], #@UndefinedVariable in request proxy
+        }
+    _transfer_files(id)
+    SCHEDULER.postjob(id, results)
+    # Should be signaling code 204: No content
+    return _format_response({},format="json")
+
+ at app.route('/jobs/<int:id>/data/index.<format>')
+def listfiles(id, format):
+    try:
+        path = store.path(id)
+        files = sorted(os.listdir(path))
+        finfo = [(f,os.path.getsize(os.path.join(path,f)))
+                 for f in files if os.path.isfile(os.path.join(path,f))]
+    except:
+        finfo = []
+    response = { 'files': finfo }
+    response['id'] = id
+    return _format_response(response, format=format, template="index.html")
+
+# TODO: don't allow putfiles without authentication
+#@app.route('/jobs/<int:id>/data/', methods=['GET','PUT'])
+def putfiles(id):
+    if flask.request.method=='PUT': #@UndefinedVariable in request proxy
+        # TODO: verify signature
+        _transfer_files(id)
+    return redirect(url_for('getfile',id=id,filename='index.html'))
+
+ at app.route('/jobs/<int:id>/data/<filename>')
+def getfile(id, filename):
+    as_attachment = filename.endswith('.htm') or filename.endswith('.html')
+    if filename.endswith('.json'):
+        mimetype = "application/json"
+    else:
+        mimetype = None
+
+    return send_from_directory(store.path(id), filename,
+                               mimetype=mimetype, as_attachment=as_attachment)
+
+#@app.route('/jobs/<int:id>.<format>', methods=['PUT'])
+#def update_job(id, format='.json'):
+#    """
+#    PUT /job/<id>.<format>
+#
+#    Updates a job using data from the job submission form.
+#    """
+#    book = Book(id=id, name=u"I don't know") # Your query
+#    book.name = request.form['name'] # Save it
+#    flash('Book %s updated!' % book.name)
+#    return redirect(url_for('show_job', id=id, format=format))
+
+#@app.route('/jobs/new.html')
+#def new_job_form():
+#    """
+#    GET /jobs/new
+#
+#    Returns a job submission form.
+#    """
+#    return render_template('new_job.html')
+
+#@app.route('/jobss/<int:id>/edit.html')
+#def edit_job_form(id):
+#    """
+#    GET /books/<id>/edit
+#
+#    Form for editing job details
+#    """
+#    book = Book(id=id, name=u'Something crazy') # Your query
+#    return render_template('edit_book.html', book=book)
+
+def _transfer_files(jobid):
+    logging.warn("XSS attacks possible if stored file is mimetype html")
+    for file in flask.request.files.getlist('file'): #@UndefinedVariable in request proxy
+        if not file: continue
+        filename = secure_filename(os.path.split(file.filename)[1])
+        # Because we used named temporary files that aren't deleted on
+        # closing as our streaming file type, we can simply move the
+        # resulting files to the store rather than copying them.
+        file.stream.close()
+        logging.warn("moving %s -> %s"%(file.stream.name, os.path.join(store.path(jobid),filename)))
+        os.rename(file.stream.name, os.path.join(store.path(jobid),filename))
+
+
+def init_scheduler(conf):
+    if conf == 'slurm':
+        from . import slurm
+        Scheduler = slurm.Scheduler
+    elif conf == 'direct':
+        logging.warn("direct scheduler is not a good choice!")
+        try: os.nice(19)
+        except: pass
+        from . import simplequeue
+        Scheduler = simplequeue.Scheduler
+    elif conf == 'dispatch':
+        from . import dispatcher
+        Scheduler = dispatcher.Scheduler
+    else:
+        raise ValueError("unknown scheduler %s"%conf)
+    return Scheduler()
+
+def serve():
+    app.run(host='0.0.0.0')
+
+def fullpath(p): return os.path.abspath(os.path.expanduser(p))
+def configure(jobstore=None, jobkey=None, jobdb=None, scheduler=None):
+    global SCHEDULER, app
+
+    if jobstore:
+        store.ROOT = fullpath(jobstore)
+    if jobkey:
+        app.config['SECRET_KEY'] = open(fullpath(jobkey)).read()
+    if jobdb:
+        from . import db
+        db.DB_URI = jobdb
+
+    SCHEDULER = init_scheduler(scheduler)
+
+if __name__ == '__main__':
+    configure(jobstore='/tmp/server/%s',
+              jobdb='sqlite:///tmp/jobqueue.db',
+              jobkey='~/.bumps/key',
+              scheduler='dispatch',
+              )
+    app.config['DEBUG'] = True
+    serve()
diff --git a/extra/jobqueue/services.py b/extra/jobqueue/services.py
new file mode 100644
index 0000000..bc498fd
--- /dev/null
+++ b/extra/jobqueue/services.py
@@ -0,0 +1,29 @@
+"""
+Available services.
+
+During configuration of your worker you can add and remove services from
+the system using, for example::
+
+    from jobqueue import services
+
+    del services.fitter
+
+    import integration
+    services.integrate = integration.service.service
+"""
+from __future__ import print_function
+
+# TODO: modify runjob so that services can be downloaded
+# TODO: support over the wire transport for privileged users
+
+def fitter(request):
+    from bumps.fitservice import fitservice
+    return fitservice(request)
+
+def count(request):
+    print("counting")
+    total = 0
+    for _ in range(request['data']):
+        total += 1
+    print("done")
+    return total
diff --git a/extra/jobqueue/simplequeue.py b/extra/jobqueue/simplequeue.py
new file mode 100644
index 0000000..4cc7045
--- /dev/null
+++ b/extra/jobqueue/simplequeue.py
@@ -0,0 +1,83 @@
+import threading
+from multiprocessing import Process
+
+from . import runjob, jobid, store
+
+class Scheduler(object):
+    def __init__(self):
+        self._lock = threading.Lock()
+        self._nextjob = threading.Event()
+        self._jobs = []
+        self._pending = []
+        self._info = {}
+        self._status = {}
+        self._results = {}
+        self._jobmonitor = threading.Thread(target=self._run_queue)
+        self._jobmonitor.start()
+        self._current_id = None
+    def _run_queue(self):
+        while True:
+            self._nextjob.wait()
+            with self._lock:
+                if not self._pending:
+                    self._nextjob.clear()
+                    continue
+                self._current_id = self._pending.pop(0)
+                self._status[self._current_id] = 'ACTIVE'
+                request = self._info[self._current_id]
+                self._stopping = None
+                self._current_process = Process(target=runjob.run,
+                                                args=(self._current_id,request))
+            self._current_process.start()
+            self._current_process.join()
+            results = runjob.results(self._current_id)
+            with self._lock:
+                self._results[self._current_id] = results
+                self._status[self._current_id] = results['status']
+
+    def jobs(self, status=None):
+        with self._lock:
+            if status is None:
+                response = self._jobs[:]
+            else:
+                response = [j for j in self._jobs if self._status[j] == status]
+        return response
+    def submit(self, request, origin):
+        with self._lock:
+            id = int(jobid.get_jobid())
+            store.create(id)
+            store.put(id,'request',request)
+            request['id'] = id
+            self._jobs.append(id)
+            self._info[id] = request
+            self._status[id] = 'PENDING'
+            self._results[id] = {'status':'PENDING'}
+            self._pending.append(id)
+            self._nextjob.set()
+        return id
+    def results(self, id):
+        with self._lock:
+            return self._results.get(id,{'status':'UNKNOWN'})
+    def status(self, id):
+        with self._lock:
+            return self._status.get(id,'UNKNOWN')
+    def info(self, id):
+        with self._lock:
+            return self._info[id]
+    def cancel(self, id):
+        with self._lock:
+            try: self._pending.remove(id)
+            except ValueError: pass
+            if self._current_id == id and not self._stopping == id:
+                self._stopping = id
+                self._current_process.terminate()
+            self._status[id] = 'CANCEL'
+    def delete(self, id):
+        self.cancel(id)
+        with self._lock:
+            try: self._jobs.remove(id)
+            except ValueError: pass
+            self._info.pop(id, None)
+            self._results.pop(id, None)
+            self._status.pop(id, None)
+        store.destroy(id)
diff --git a/extra/jobqueue/slurm.py b/extra/jobqueue/slurm.py
new file mode 100644
index 0000000..7dc76ba
--- /dev/null
+++ b/extra/jobqueue/slurm.py
@@ -0,0 +1,184 @@
+"""
+Note: slurm configuration for debian::
+
+    sudo apt-get install slurm-llnl
+    sudo vi /etc/slurm-llnl/slurm.conf
+    sudo mkdir /var/run/slurm-llnl
+    sudo chmod slurm:slurm /var/run/slurm-llnl
+
+Now put it in rc.d::
+
+    sudo update-rc.d munge defaults
+    sudo update-rc.d slurm-llnl defaults
+
+Or run the following each time::
+
+    sudo service munge start
+    sudo service slurm-llnl start
+
+"""
+
+import os
+import subprocess
+import multiprocessing
+
+from .jobid import get_jobid
+from . import store, runjob
+
+#from park import config
+#from park import environment
+
+# Queue status words
+_ACTIVE = ["RUNNING", "COMPLETING"]
+_INACTIVE = ["PENDING", "SUSPENDED"]
+_ERROR = ["CANCELLED", "FAILED", "TIMEOUT", "NODE_FAIL"]
+_COMPLETE = ["COMPLETED"]
+
+
+class Scheduler(object):
+    def jobs(self, status=None):
+        """
+        Return a list of jobs on the queue.
+        """
+        #TODO: list completed but not deactivated as well as completed
+        #print "queue"
+        output,_ = _slurm_exec('squeue','-o', '%i %M %j')
+        #print "output",output
+        return output
+
+    def deactivate(self, jobid):
+        #TODO: remove the job from the active list
+        pass
+
+    # Job specific commands
+    def submit(self, request, origin):
+        """
+        Put a command on batch queue, returning its job id.
+        """
+        #print "submitting job",jobid
+        jobid = get_jobid()
+        store.create(jobid)
+        store.put(jobid,'request',request)
+
+        service = runjob.build_command(jobid,request)
+
+        num_workers = multiprocessing.cpu_count()
+        jobdir = store.path(jobid)
+        script = os.path.join(jobdir,"J%s.sh"%jobid)
+        #commands = ['export %s="%s"'%(k,v) for k,v in config.env().items()]
+        commands = ["srun -n 1 -K -o slurm-out.txt nice -n 19 %s &"%service]
+#                     "srun -n %d -K -o kernel.out nice -n 19 %s"%(num_workers,kernel)]
+        create_batchfile(script,commands)
+
+        _out,err = _slurm_exec('sbatch',
+                            '-n',str(num_workers), # Number of tasks
+                            #'-K', # Kill if any process returns error
+                            #'-o', 'job%j.out',  # output file
+                            '-D',jobdir,  # Start directory
+                            script)
+        if not err.startswith('sbatch: Submitted batch job '):
+            raise RuntimeError(err)
+        slurmid = err[28:].strip()
+        store.put(jobid,'slurmid',slurmid)
+        return jobid
+
+    def results(self, id):
+        try:
+            return runjob.results(id)
+        except KeyError:
+            pass
+
+        return { 'status': self.status(id) }
+
+    def info(self,id):
+        request = store.get(id,'request')
+        request['id'] = id
+        return request
+
+    def status(self, id):
+        """
+        Returns the follow states:
+        PENDING   --  Job is waiting to be processed
+        ACTIVE    --  Job is busy being processed through the queue
+        COMPLETE  --  Job has completed successfully
+        ERROR     --  Job has either been canceled by the user or an
+                      error has been raised
+        """
+
+        # Simple checks first
+        jobdir = store.path(id)
+        if not os.path.exists(jobdir):
+            return "UNKNOWN"
+        elif store.contains(id, 'results'):
+            return "COMPLETE"
+
+        # Translate job id to slurm id
+        slurmid = store.get(id,'slurmid')
+
+        # Check slurm queue for job id
+        out,_ = _slurm_exec('squeue', '-h', '--format=%i %T')
+        out = out.strip()
+
+        state = ''
+        inqueue = False
+        if out != "":
+            for line in out.split('\n'):
+                line = line.split()
+                if slurmid == line[0]:
+                    state = line[1]
+                    inqueue = True
+                    break
+
+        if inqueue:
+            if state in _ACTIVE:
+                return "ACTIVE"
+            elif state in _INACTIVE:
+                return "PENDING"
+            elif state in _COMPLETE:
+                return "COMPLETE"
+            elif state in _ERROR:
+                return "ERROR"
+            else:
+                raise RuntimeError("unexpected state from squeue: %s"%state)
+        else:
+            return "ERROR"
+
+    def cancel(self, jobid):
+        #print "canceling",jobid
+        slurmid = store.get(jobid,'slurmid')
+        _slurm_exec('scancel',slurmid)
+
+    def delete(self, jobid):
+        #jobdir = store.path(jobid)
+        self.cancel(jobid)
+        store.destroy(jobid)
+
+    def nextjob(self, queue):
+        raise NotImplementedError("SLURM queues do not support work sharing")
+
+    def postjob(self, id, results):
+        raise NotImplementedError("SLURM queues do not support work sharing")
+
+def create_batchfile(script, commands):
+    """
+    Create the batchfile to run the job.
+    """
+    fid = open(script,'w')
+    fid.write("#!/bin/sh\n")
+    fid.write("\n".join(commands))
+    fid.write("\nwait\n")
+    fid.close()
+    return script
+
+def _slurm_exec(cmd, *args):
+    """
+    Run a slurm command, capturing any errors.
+    """
+    #print "cmd",cmd,"args",args
+    process = subprocess.Popen([cmd]+list(args),
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+    out,err = process.communicate()
+    if err.startswith(cmd+': error: '):
+        raise RuntimeError(cmd+': '+err[15:].strip())
+    return out,err
diff --git a/extra/jobqueue/store.py b/extra/jobqueue/store.py
new file mode 100644
index 0000000..e50b098
--- /dev/null
+++ b/extra/jobqueue/store.py
@@ -0,0 +1,53 @@
+import os
+import json
+import shutil
+from tempfile import NamedTemporaryFile
+
+ROOT = '/var/lib/jobqueue/server/%s'
+
+def tempfile():
+    create('temp')
+    return NamedTemporaryFile(delete=False, dir=path('temp'))
+
+def path(id):
+    return ROOT%id
+
+def create(id):
+    #print "making %s"%path(id)
+    if not os.path.exists(path(id)):
+        os.makedirs(path(id))
+
+def destroy(id):
+    shutil.rmtree(path(id))
+
+def put(id, key, value):
+    value = json.dumps(value)
+    datapath = path(id)
+    datafile = os.path.join(datapath,"K-%s.json"%(key))
+    try:
+        open(datafile,'wb').write(value)
+    except:
+        raise KeyError("Could not store key %s-%s in %s"%(id,key,datafile))
+
+def get(id, key):
+    datapath = path(id)
+    datafile = os.path.join(datapath,"K-%s.json"%(key))
+    try:
+        value = open(datafile,'rb').read()
+    except:
+        raise KeyError("Could not retrieve key %s-%s"%(id,key))
+    #if value == "": print "key %s-%s is empty"%(id,key)
+    return json.loads(value) if value != "" else None
+
+def contains(id, key):
+    datapath = path(id)
+    datafile = os.path.join(datapath,"K-%s.json"%(key))
+    return os.path.exists(datafile)
+
+def delete(id, key):
+    datapath = path(id)
+    datafile = os.path.join(datapath,"K-%s.json"%(key))
+    try:
+        os.unlink(datafile)
+    except:
+        raise KeyError("Could not delete key %s-%s"%(id,key))
diff --git a/extra/jobqueue/templates/index.html b/extra/jobqueue/templates/index.html
new file mode 100644
index 0000000..782574f
--- /dev/null
+++ b/extra/jobqueue/templates/index.html
@@ -0,0 +1,8 @@
+<title>{% block title %}{% endblock %}</title>
+<table>
+<caption>Available files for <a href="/jobs/{{jobid}}">job {{ jobid }}</a></caption>
+<tr><th>Name</th><th>Size</th></tr>
+{% for f,size in files %}
+  <tr><td><a href="{{ f }}">{{f}} </a></td><td>{{size}} bytes</td></tr>
+{% endfor %}
+</table>
diff --git a/extra/jobqueue/test/__init__.py b/extra/jobqueue/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/extra/jobqueue/test/test_crud.py b/extra/jobqueue/test/test_crud.py
new file mode 100644
index 0000000..1de5a79
--- /dev/null
+++ b/extra/jobqueue/test/test_crud.py
@@ -0,0 +1,49 @@
+from __future__ import print_function
+
+from jobqueue.client import connect
+
+DEBUG = True
+
+#server = connect('http://reflectometry.org/queue')
+server = connect('http://localhost:5000')
+
+def checkqueue(pending=[], active=[], complete=[]):
+    qpending = server.jobs('PENDING')
+    qactive = server.jobs('ACTIVE')
+    qcomplete = server.jobs('COMPLETE')
+    if DEBUG: print("pending",qpending,"active",qactive,"complete",qcomplete)
+    #assert pending == qpending
+    #assert active == qactive
+    #assert complete == qcomplete
+
+long = {'service':'count','data':1000000,
+        'name':'long count','notify':'me'}
+short = {'service':'count','data':200,
+        'name':'short count','notify':'me'}
+fail1 = {'service':'count','data':'string',
+         'name':'short count','notify':'me'}
+fail2 = {'service':'noservice','data':'string',
+         'name':'short count','notify':'me'}
+
+job = server.submit(int)
+print("submit",job)
+#import sys; sys.exit()
+checkqueue()
+job2 = server.submit(short)
+print("submit",job2)
+result = server.wait(job['id'], pollrate=10, timeout=120)
+print("result",result)
+checkqueue()
+print("fetch",server.info(job['id']))
+print("delete",server.delete(job['id']))
+checkqueue()
+job3 = server.submit(fail1)
+job4 = server.submit(fail2)
+print("===incorrect service options")
+result = server.wait(job3['id'], pollrate=1, timeout=120)
+print(result['error'])
+print(result['trace'])
+print("===incorrect service")
+result = server.wait(job4['id'], pollrate=1, timeout=120)
+print(result['error'])
+print(result['trace'])
diff --git a/extra/jobqueue/test/test_db.py b/extra/jobqueue/test/test_db.py
new file mode 100755
index 0000000..8751270
--- /dev/null
+++ b/extra/jobqueue/test/test_db.py
@@ -0,0 +1,102 @@
+from __future__ import print_function
+
+import os
+import time
+
+from jobqueue import dispatcher, db, store, notify
+
+db.DEBUG = False
+DEBUG = False
+
+path = os.path.abspath(os.path.dirname(__file__))
+URI = "sqlite:///%s/test.db"%path
+
+def setupdb(uri):
+    db.DB_URI = uri
+    store.ROOT = "/tmp/test_store/%s"
+    if uri.startswith("sqlite"):
+        try: os.unlink(uri[10:])
+        except:
+            print("could not unlink",uri[10:])
+            pass
+    queue = dispatcher.Scheduler()
+    return queue
+
+def checkspeed(uri=URI):
+    # Isolate the cost of database access
+    store.create = lambda *args: {}
+    store.get = lambda *args: {}
+    store.put = lambda *args: {}
+    notify.notify = lambda *args, **kw: None
+    queue = setupdb(uri)
+    testj = { 'name' : 'test1', 'notify' : 'me' }
+    t = time.time()
+    for i in range(80):
+        for j in range(10):
+            testj['notify'] = 'me%d'%j
+            queue.submit(testj, origin="here%d"%j)
+        for j in range(10):
+            request = queue.nextjob(queue='cue')
+            queue.postjob(1, {'status': 'COMPLETE', 'result': 0})
+        print(10*(i+1), time.time()-t)
+        t = time.time()
+
+def test(uri=URI):
+
+    queue = setupdb(uri)
+    def checkqueue(pending=[], active=[], complete=[]):
+        qpending = queue.jobs('PENDING')
+        qactive = queue.jobs('ACTIVE')
+        qcomplete = queue.jobs('COMPLETE')
+        if DEBUG: print("pending",qpending,"active",qactive,"complete",qcomplete)
+        assert pending == qpending
+        assert active == qactive
+        assert complete == qcomplete
+
+
+    test1 = { 'name' : 'test1', 'notify' : 'me' }
+    test2 = { 'name' : 'test2', 'notify' : 'me' }
+    test3 = { 'name' : 'test3', 'notify' : 'you' }
+
+    # No jobs available for running initially
+    checkqueue([],[],[])
+    request = queue.nextjob(queue='cue')
+    if DEBUG: print("nextjob",request)
+    assert request['request'] is None
+
+    #jobs = queue.jobs()
+    #print "initially empty job list", jobs
+    #assert jobs == []
+    job1 = queue.submit(test1, origin="here")
+    if DEBUG: print("first job id",job1)
+    assert job1 == 1
+    job2 = queue.submit(test2, origin="here")
+    assert job2 == 2
+    checkqueue([1,2],[],[])
+
+    if DEBUG: print("status(0)",queue.status(1))
+    assert queue.status(1) == 'PENDING'
+
+    if DEBUG: print("status(3)",queue.status(3))
+    assert queue.status(3) == 'UNKNOWN'
+
+    if DEBUG: print("info(0)", queue.info(1))
+    assert queue.info(1)['name'] == test1['name']
+
+    request = queue.nextjob(queue='cue')
+    if DEBUG: print("nextjob",request)
+    assert request['request']['name'] == test1['name']
+    checkqueue([2],[1],[])
+
+    job2 = queue.submit(test3, origin="there")
+    request = queue.nextjob(queue='cue')
+    if DEBUG: print("nextjob",request)
+    assert request['request']['name'] == test3['name']
+    checkqueue([2],[1,3],[])
+
+    queue.postjob(1, {'status': 'COMPLETE', 'result': 0})
+    checkqueue([2],[3],[1])
+
+if __name__ == "__main__":
+    test()
+    #checkspeed()
diff --git a/extra/jobqueue/worker.py b/extra/jobqueue/worker.py
new file mode 100644
index 0000000..f539ea3
--- /dev/null
+++ b/extra/jobqueue/worker.py
@@ -0,0 +1,129 @@
+try: from _thread import start_new_thread
+except: from thread import start_new_thread
+
+import os, sys
+import logging
+import traceback
+import time
+from multiprocessing import Process
+
+from jobqueue import runjob, store
+from jobqueue.client import connect
+
+store.ROOT = '/tmp/worker/%s'
+DEFAULT_DISPATCHER = 'http://reflectometry.org/queue'
+POLLRATE = 10
+
+def log_errors(f):
+    def wrapped(*args, **kw):
+        try:
+            return f(*args, **kw)
+        except:
+            exc_type,exc_value,exc_trace = sys.exc_info()
+            trace = traceback.format_tb(exc_trace)
+            message = traceback.format_exception_only(exc_type,exc_value)
+            logging.error(message+trace)
+    return wrapped
+
+def wait_for_result(remote, id, process, queue):
+    """
+    Wait for job processing to finish.  Meanwhile, prefetch the next
+    request.
+    """
+    next_request = { 'request': None }
+    canceling = False
+    while True:
+        # Check if process is complete
+        process.join(POLLRATE)
+        if not process.is_alive(): break
+
+        # Check that the job is still active, and that it hasn't been
+        # canceled, or results reported back from a second worker.
+        # If remote server is down, assume the job is still active.
+        try: response = remote.status(id)
+        except: response = None
+        if response and response['status'] != 'ACTIVE':
+            #print "canceling process"
+            process.terminate()
+            canceling = True
+            break
+
+        # Prefetch the next job; this strategy works well if there is
+        # only one worker.  If there are many, we may want to leave it
+        # for another worker to process.
+        if not next_request['request']:
+            # Ignore remote server down errors
+            try: next_request = remote.nextjob(queue=queue)
+            except: pass
+
+    # Grab results from the store
+    try:
+        results = runjob.results(id)
+    except KeyError:
+        if canceling:
+            results = { 'status': 'CANCEL', 'message': 'Job canceled' }
+        else:
+            results = { 'status': 'ERROR', 'message': 'Results not found' }
+
+    #print "returning results",results
+    return results, next_request
+
+ at log_errors
+def update_remote(dispatcher, id, queue, results):
+    """
+    Update remote server with results.
+    """
+    #print "updating remote"
+    path= store.path(id)
+    # Remove results key, if it is there
+    try: store.delete(id, 'results')
+    except KeyError: pass
+    files = [os.path.join(path,f) for f in os.listdir(path)]
+    #print "sending results",results
+    # This is done with a separate connection to the server so that it can
+    # run inside a thread.  That way the server can start the next job
+    # while the megabytes of results are being transfered in the background.
+    private_remote = connect(dispatcher)
+    private_remote.postjob(id=id, results=results, queue=queue, files=files)
+    # Clean up files
+    for f in files: os.unlink(f)
+    os.rmdir(path)
+
+def serve(dispatcher, queue):
+    """
+    Run the work server.
+    """
+    assert queue is not None
+    next_request = { 'request': None }
+    remote = connect(dispatcher)
+    while True:
+        if not next_request['request']:
+            try: next_request = remote.nextjob(queue=queue)
+            except: logging.error(traceback.format_exc())
+        if next_request['request']:
+            jobid = next_request['id']
+            if jobid is None:
+                logging.error('request has no job id')
+                next_request = {'request': None}
+                continue
+            logging.info('processing job %s'%jobid)
+            process = Process(target=runjob.run,
+                              args=(jobid,next_request['request']))
+            process.start()
+            results, next_request = wait_for_result(remote, jobid, process, queue)
+            start_new_thread(update_remote,
+                             (dispatcher, jobid, queue, results))
+        else:
+            time.sleep(POLLRATE)
+
+def main():
+    try: os.nice(19)
+    except: pass
+    if len(sys.argv) <= 1:
+        print("Requires queue name")
+    queue = sys.argv[1]
+    dispatcher = sys.argv[2] if len(sys.argv) > 2 else DEFAULT_DISPATCHER
+    serve(queue=queue, dispatcher=dispatcher)
+
+if __name__ == "__main__":
+    main()
diff --git a/extra/jobqueue/www/hello.html b/extra/jobqueue/www/hello.html
new file mode 100644
index 0000000..76c3ac3
--- /dev/null
+++ b/extra/jobqueue/www/hello.html
@@ -0,0 +1 @@
+<html><body>hello</body></html>
diff --git a/extra/jobqueue/www/jobqueue.wsgi b/extra/jobqueue/www/jobqueue.wsgi
new file mode 100644
index 0000000..442dd99
--- /dev/null
+++ b/extra/jobqueue/www/jobqueue.wsgi
@@ -0,0 +1,50 @@
+"""
+Install application into a mod_wsgi stack.
+
+To be sure there is no cross-talk between installed packages, you will
+want to create a blank virtualenv to run mod_wsgi and a virtualenv for
+each application you are running on the server.
+
+To create the blank environment::
+
+    virtualenv --no-site-packages /usr/local/pythonenv/clean
+
+To configure WSGI, edit /etc/apache2/mods-available/wsgi.conf, setting::
+
+    WSGIPythonHome /usr/local/pythonenv/clean
+
+Then create a bumps user account with its private virtualenv::
+
+    virtualenv --no-site-packages ~/bumps
+
+You can now populate the virtualenv with the required packages::
+
+    cd ~/bumps
+    # Use system install for binary packages
+    ln -s /usr/share/pyshared/numpy lib/python2.6/site-packages
+    ln -s /usr/share/pyshared/matplotlib lib/python2.6/site-packages
+    ln -s /usr/share/pyshared/pytz lib/python2.6/site-packages
+    ln -s /usr/share/pyshared/scipy lib/python2.6/site-packages
+    # Install the less common packages by hand
+    bin/pip install flask
+    bin/pip install ...
+"""
+
+import os
+
+# === Configure virtual environment ===
+import site
+PYROOT=os.path.abspath(os.path.expanduser('~/bumps'))
+sitepackages = "lib/python%d.%d/site-packages"%sys.version_info[0:2]
+site.addsitedir(PYROOT+sitepackages))
+
+# === Configure resource locations ===
+import jobqueue.server
+jobqueue.server.configure(
+    jobstore='~/.bumps/server/%s',
+    jobkey='~/.bumps/key',
+    jobdb='sqlite://'+os.path.abspath(os.path.expanduser('~/.bumps/db')),
+    scheduler='dispatch',
+    )
+
+application = jobqueue.server.app
diff --git a/extra/jobqueue/www/test.wsgi b/extra/jobqueue/www/test.wsgi
new file mode 100644
index 0000000..f00600d
--- /dev/null
+++ b/extra/jobqueue/www/test.wsgi
@@ -0,0 +1,8 @@
+def application(environ, start_response):
+  status = '200 OK'
+  output = 'Hello World@'
+  response_headers = [('Content-type', 'text/plain'),
+    ('Content-Length',str(len(output)))]
+  start_response(status, response_headers)
+  return [output]
+
diff --git a/extra/sasview/FitPage2.fitv b/extra/sasview/FitPage2.fitv
new file mode 100644
index 0000000..501e2c5
--- /dev/null
+++ b/extra/sasview/FitPage2.fitv
@@ -0,0 +1,393 @@
+<?xml version="1.0" ?>
+<SASroot version="1.0" xmlns="cansas1d/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="cansas1d/1.0 http://svn.smallangles.net/svn/canSAS/1dwg/trunk/cansas1d.xsd">
+	<SASentry>
+		<Title>
+			cyl_400_20.txt
+		</Title>
+		<Run>
+			cyl_400_20.txt
+		</Run>
+		<SASdata>
+			<Idata>
+				<Q unit="1/A">
+					0.025
+				</Q>
+				<I unit="1/cm">
+					125.852
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.05
+				</Q>
+				<I unit="1/cm">
+					53.6662
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.075
+				</Q>
+				<I unit="1/cm">
+					26.0733
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.1
+				</Q>
+				<I unit="1/cm">
+					11.8935
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.125
+				</Q>
+				<I unit="1/cm">
+					4.61714
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.15
+				</Q>
+				<I unit="1/cm">
+					1.29983
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.175
+				</Q>
+				<I unit="1/cm">
+					0.171347
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.2
+				</Q>
+				<I unit="1/cm">
+					0.0417614
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.225
+				</Q>
+				<I unit="1/cm">
+					0.172719
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.25
+				</Q>
+				<I unit="1/cm">
+					0.247876
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.275
+				</Q>
+				<I unit="1/cm">
+					0.20301
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.3
+				</Q>
+				<I unit="1/cm">
+					0.104599
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.325
+				</Q>
+				<I unit="1/cm">
+					0.0285595
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.35
+				</Q>
+				<I unit="1/cm">
+					0.00213344
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.375
+				</Q>
+				<I unit="1/cm">
+					0.0137511
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.4
+				</Q>
+				<I unit="1/cm">
+					0.0312374
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.425
+				</Q>
+				<I unit="1/cm">
+					0.0350328
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.45
+				</Q>
+				<I unit="1/cm">
+					0.0243172
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.475
+				</Q>
+				<I unit="1/cm">
+					0.00923067
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+			<Idata>
+				<Q unit="1/A">
+					0.5
+				</Q>
+				<I unit="1/cm">
+					0.00121297
+				</I>
+				<Idev unit="1/cm">
+					0.0
+				</Idev>
+				<Qdev unit="1/A">
+					0.0
+				</Qdev>
+			</Idata>
+		</SASdata>
+		<SASsample name="">
+			<ID>
+				
+			</ID>
+		</SASsample>
+		<SASinstrument>
+			<name>
+				
+			</name>
+			<SASsource/>
+		</SASinstrument>
+		<SASnote/>
+		<fitting_plug_in version="1.0">
+			<filename>
+				cyl_400_20.txt
+			</filename>
+			<timestamp epoch="1389997943.92">
+				Fri Jan 17 17:32:23 2014
+			</timestamp>
+			<Attributes>
+				<is_data is_data="True"/>
+				<group_id group_id="287"/>
+				<data_name data_name="cyl_400_20.txt"/>
+				<data_id data_id="cyl_400_20.txt1389997565.29"/>
+				<name name=""/>
+				<data_name data_name="cyl_400_20.txt"/>
+				<engine_type engine_type="scipy"/>
+				<qmin qmin="0.025"/>
+				<qmax qmax="0.5"/>
+				<npts npts="50"/>
+				<categorycombobox categorycombobox="Shapes"/>
+				<formfactorcombobox formfactorcombobox="CylinderModel"/>
+				<structurecombobox structurecombobox="None"/>
+				<multi_factor multi_factor="None"/>
+				<magnetic_on magnetic_on="False"/>
+				<enable_smearer enable_smearer="False"/>
+				<disable_smearer disable_smearer="True"/>
+				<pinhole_smearer pinhole_smearer="False"/>
+				<slit_smearer slit_smearer="False"/>
+				<enable_disp enable_disp="True"/>
+				<disable_disp disable_disp="False"/>
+				<dI_noweight dI_noweight="True"/>
+				<dI_didata dI_didata="False"/>
+				<dI_sqrdata dI_sqrdata="False"/>
+				<dI_idata dI_idata="False"/>
+				<enable2D enable2D="False"/>
+				<cb1 cb1="False"/>
+				<tcChi tcChi="2.4299e-11"/>
+				<smearer smearer="None"/>
+				<smear_type smear_type="None"/>
+				<dq_l dq_l="None"/>
+				<dq_r dq_r="None"/>
+				<dx_max dx_max="0.0"/>
+				<dx_min dx_min="0.0"/>
+				<dxl dxl="0.0"/>
+				<dxw dxw=""/>
+				<values/>
+				<weights/>
+				<disp_obj_dict/>
+				<parameters>
+					<parameter error_displayed="True" error_value="" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="background" selected_to_fit="False" unit="[1/cm]" value="0"/>
+					<parameter error_displayed="True" error_value="91.191" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="length" selected_to_fit="True" unit="[A]" value="399.99"/>
+					<parameter error_displayed="True" error_value="0.62637" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="radius" selected_to_fit="True" unit="[A]" value="20"/>
+					<parameter error_displayed="True" error_value="" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="scale" selected_to_fit="False" unit="" value="1"/>
+					<parameter error_displayed="True" error_value="6.3206e-08" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="sldCyl" selected_to_fit="True" unit="[1/A^(2)]" value="4e-06"/>
+					<parameter error_displayed="True" error_value="" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="sldSolv" selected_to_fit="False" unit="[1/A^(2)]" value="1e-06"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_phi" selected_to_fit="True" unit="[deg]" value="60"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_theta" selected_to_fit="True" unit="[deg]" value="60"/>
+				</parameters>
+				<str_parameters/>
+				<orientation_parameters>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_phi" selected_to_fit="True" unit="[deg]" value="60"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_theta" selected_to_fit="True" unit="[deg]" value="60"/>
+				</orientation_parameters>
+				<dispersity_parameters>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_theta.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_theta.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_theta.width" selected_to_fit="False" unit="" value="0"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_phi.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_phi.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_phi.width" selected_to_fit="False" unit="" value="0"/>
+				</dispersity_parameters>
+				<fixed_param>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="length.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="length.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="radius.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="radius.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_theta.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_theta.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_phi.npts" selected_to_fit="None" unit="None" value="35"/>
+					<parameter error_displayed="None" error_value="None" maximum_displayed="None" maximum_value="None" minimum_displayed="None" minimum_value="None" name="cyl_phi.nsigmas" selected_to_fit="None" unit="None" value="3"/>
+				</fixed_param>
+				<fittable_param>
+					<parameter error_displayed="True" error_value="" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="length.width" selected_to_fit="False" unit="" value="0"/>
+					<parameter error_displayed="True" error_value="1.6546e+06" maximum_displayed="True" maximum_value="" minimum_displayed="True" minimum_value="" name="radius.width" selected_to_fit="True" unit="" value="0"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_theta.width" selected_to_fit="False" unit="" value="0"/>
+					<parameter error_displayed="False" error_value="" maximum_displayed="False" maximum_value="" minimum_displayed="False" minimum_value="" name="cyl_phi.width" selected_to_fit="False" unit="" value="0"/>
+				</fittable_param>
+			</Attributes>
+		</fitting_plug_in>
+	</SASentry>
+</SASroot>
diff --git a/extra/sasview/cyl_400_40.txt b/extra/sasview/cyl_400_40.txt
new file mode 100644
index 0000000..b533fa1
--- /dev/null
+++ b/extra/sasview/cyl_400_40.txt
@@ -0,0 +1,56 @@
+<X>   <Y>
+0  -1.#IND
+0.00925926  1246.59
+0.0185185  612.143
+0.0277778  361.142
+0.037037  211.601
+0.0462963  122.127
+0.0555556  65.2385
+0.0648148  30.8914
+0.0740741  12.4737
+0.0833333  3.51371
+0.0925926  0.721835
+0.101852  0.583607
+0.111111  1.31084
+0.12037  1.9432
+0.12963  1.94286
+0.138889  1.58912
+0.148148  0.987076
+0.157407  0.456678
+0.166667  0.147595
+0.175926  0.027441
+0.185185  0.0999575
+0.194444  0.198717
+0.203704  0.277667
+0.212963  0.288172
+0.222222  0.220056
+0.231481  0.139378
+0.240741  0.0541106
+0.25  0.0140158
+0.259259  0.0132187
+0.268519  0.0336301
+0.277778  0.0672911
+0.287037  0.0788983
+0.296296  0.0764438
+0.305556  0.0555445
+0.314815  0.0280548
+0.324074  0.0111798
+0.333333  0.00156156
+0.342593  0.00830883
+0.351852  0.0186266
+0.361111  0.0275426
+0.37037  0.03192
+0.37963  0.0255329
+0.388889  0.0175216
+0.398148  0.0073075
+0.407407  0.0016631
+0.416667  0.00224153
+0.425926  0.0051335
+0.435185  0.0112914
+0.444444  0.0138209
+0.453704  0.0137453
+0.462963  0.0106682
+0.472222  0.00532472
+0.481481  0.00230646
+0.490741  0.000335344
+0.5  0.00177224
diff --git a/extra/sasview/model.py b/extra/sasview/model.py
new file mode 100644
index 0000000..1cf0247
--- /dev/null
+++ b/extra/sasview/model.py
@@ -0,0 +1,51 @@
+from __future__ import print_function
+
+# ======
+# Put current directory and sasview directory on path.
+# This won't be necessary once bumps is in sasview
+import os, sys
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+try: 
+    import sans
+except ImportError:
+    from distutils.util import get_platform
+    platform = '.%s-%s'%(get_platform(),sys.version[:3])
+    sasview = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','sasview','build','lib'+platform))
+    sys.path.insert(0,sasview)
+    #print("\n".join(sys.path))
+# ======
+
+from sasbumps import *
+
+# Set up the target model
+sldSamp = Parameter(2.07, name='sample sld')
+sldSolv = Parameter(1.0, name='solvent sld')
+sphere = load_model('SphereModel', radius=60, radius_width=0.1,
+                    sldSph=1e-6*sldSamp, sldSolv=1e-6*sldSolv,
+                    background=0, scale=1.0)
+ellipsoid = load_model('EllipsoidModel', radius_a=60, radius_b=160,
+                       sldEll=1e-6*sldSamp, sldSolv=1e-6*sldSolv,
+                       background=0, scale=1.0)
+
+# Simulate data
+# Use seed(n) for reproducible data, or seed() for new data each time.
+try: k = int(sys.argv[2])
+except: k = 1
+with seed(k): data = sim_data(ellipsoid, noise=15)
+
+# Fit to sphere or ellipse, depending on command line
+if "sphere" in sys.argv[1:]:
+    M = Experiment(model=sphere, data=data)
+    M['radius'].range(0,200)
+    M['radius.width'].range(0,0.7)
+else: # ellipse
+    M = Experiment(model=ellipsoid, data=data)
+    M['radius_a'].range(0,1e3)
+    M['radius_b'].range(0,1e3)
+    #M['scale'].range(0,100)
+sldSamp.range(0,1e2)
+sldSolv.range(1,7)
+
+problem = FitProblem([M])
+problem.randomize()
+
diff --git a/extra/sasview/modelcyl.py b/extra/sasview/modelcyl.py
new file mode 100644
index 0000000..c19cce1
--- /dev/null
+++ b/extra/sasview/modelcyl.py
@@ -0,0 +1,36 @@
+from __future__ import print_function
+
+# Look for the peak fitter in the same file as the modeller
+import os, sys
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+try: 
+    import sans
+except ImportError:
+    from distutils.util import get_platform
+    platform = '.%s-%s'%(get_platform(),sys.version[:3])
+    sasview = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','..','sasview','build','lib'+platform))
+    sys.path.insert(0,sasview)
+    #raise Exception("\n".join(sys.path))
+
+from sasbumps import *
+
+# Load data
+data = load_data('cyl_400_40.txt')
+
+# Set up the target model
+sample_sld = Parameter(2.07, name='sample sld')
+solvent_sld = Parameter(1.0, name='solvent sld')
+model = load_model('CylinderModel', radius=60, radius_width=0,
+                   sldCyl=1e-6*sample_sld, sldSolv=1e-6*solvent_sld,
+                   background=0, scale=1.0)
+
+M = Experiment(model=model, data=data)
+M['length'].range(0,1000)
+M['radius'].range(0,200)
+#M['length.width'].range(0,0.7)
+#M['radius.width'].range(0,0.7)
+sample_sld.range(solvent_sld.value,solvent_sld.value+7)
+
+problem = FitProblem([M])
+problem.randomize()
+
diff --git a/extra/sasview/sasbumps.py b/extra/sasview/sasbumps.py
new file mode 100644
index 0000000..8b5bf24
--- /dev/null
+++ b/extra/sasview/sasbumps.py
@@ -0,0 +1,247 @@
+from __future__ import division, print_function
+__all__ = ['Experiment', 'load_data', 'load_model', 'load_fit', 'sim_data',
+           'Parameter', 'FitProblem', 'FreeVariables',
+           'pmath', 'preview', 'fit', 'np', 'sys', 'sans', 'seed']
+
+# symbols loaded for export
+import sys
+import numpy as np
+from bumps.names import Parameter, FitProblem, FreeVariables, pmath, preview, fit
+from bumps.parameter import BaseParameter
+from bumps.util import push_seed as seed
+import sans
+import sans.models
+
+# symbols needed internally
+from sans.dataloader.data_info import Data1D, Data2D
+from sans.fit.AbstractFitEngine import FitData1D, FitData2D
+from sans.perspectives.fitting.pagestate import Reader as FitReader
+from sans.dataloader.loader import Loader as DataLoader
+
+def load_data(filename):
+    return DataLoader().load(filename)
+
+def sim_data(model, noise=5, qmin=0.005, qmax=0.5, nq=100, dq=0):
+    for pid, p in getattr(model,'_bumps_pars', {}).items():
+        model.setParam(pid,p.value)
+    q = np.logspace(np.log10(qmin), np.log10(qmax), nq)
+    # if dq != 0 then need smearing
+    I = model.evalDistribution(q)
+    dI = I*noise/100.
+    I += np.random.randn(*q.shape)*dI
+    return Data1D(x=q, dx=dq, y=I, dy=dI)
+
+def load_model(model, name=None, **kw):
+    sans = __import__('sans.models.'+model)
+    ModelClass = getattr(getattr(sans.models,model,None),model,None)
+    if ModelClass is None:
+        raise ValueError("could not find model %r in sans.models"%model)
+    M = ModelClass()
+    prefix = (name if name else _model_name(M)) + " "
+    M._bumps_pars = {}
+    valid_pars = M.getParamList()
+    for k,v in kw.items():
+        # dispersion parameters initialized with _field instead of .field
+        if k.endswith('_width'): k = k[:-6]+'.width'
+        elif k.endswith('_npts'): k = k[:-5]+'.npts'
+        elif k.endswith('_nsigmas'): k = k[:-7]+'.nsigmas'
+        elif k.endswith('_type'): k = k[:-5]+'.type'
+        if k not in valid_pars:
+            formatted_pars = ", ".join(valid_pars)
+            raise KeyError("invalid parameter %r for %s--use one of: %s"
+                           %(k, model, formatted_pars))
+        if '.' in k and not k.endswith('.width'):
+            M.setParam(k, v)
+        elif isinstance(v, BaseParameter):
+            M._bumps_pars[k] = v
+        elif isinstance(v, (tuple,list)):
+            low, high = v
+            P = Parameter((low+high)/2, bounds=v, name=prefix+k)
+            M._bumps_pars[k] = P
+        else:
+            P = Parameter(v, name=prefix+k)
+            M._bumps_pars[k] = P
+    return M
+
+def load_fit(filename):
+    data = FitReader(call_back=lambda **kw:None).read('FitPage2.fitv')
+    data = data[0] # no support for multiset files
+    fit = data.meta_data['fitstate']
+    model_name = fit.formfactorcombobox
+    pars = dict((p[1],float(p[2])) for p in fit.parameters) 
+    for k,v in pars.items():
+        if abs(v) < 1e-5 and v != 0:
+            pars[k] = 1e-6*Parameter(v*1e6, name=model_name+" "+k)
+    model = load_model(model_name, **pars)
+    experiment = Experiment(model=model, data=data)
+    for p in fit.parameters:
+        if p[0]:
+            low = float(p[5][1]) if p[5][1] else -np.inf
+            high= float(p[6][1]) if p[6][1] else np.inf
+            try:
+                experiment[p[1]].range(low, high)
+            except KeyError:
+                print("%s not in experiment"%p[1])
+    return experiment
+
+
+def _model_name(model):
+    name = model.__class__.__name__
+    if name.endswith('Model'): name = name[:-5]
+    return name.lower()
+
+def _sas_parameter(model, pid, prefix):
+    par = getattr(model, '_bumps_pars', {}).get(pid, None)
+    if par is None:
+        ## Don't have bounds on dispersion parameters with model details
+        #bounds = model.details.get(pid,[None,None,None])[1:3]
+        value = model.getParam(pid)
+        par = Parameter(value, name=prefix+pid)
+    return par
+
+def _build_parameters(model, prefix, oriented, magnetic):
+    # Gather the list of parameters, stripping out the distribution attributes
+    pars = set(pid for pid in model.getParamList()
+               if '.' not in pid or pid.endswith('.width'))
+    if not oriented: pars -= set(model.orientation_params)
+    if not magnetic: pars -= set(model.magnetic_params)
+    return dict((pid,_sas_parameter(model, pid, prefix)) for pid in pars)
+
+def _set_parameters(model, pars):
+    for pid,p in pars.items():
+        #print("setting %r to %g"%(pid,p.value))
+        model.setParam(pid,p.value)
+
+class Experiment(object):
+    def __init__(self, model, data, smearer=None, qmin=None, qmax=None, name=''):
+        self.name = name if name else model.__class__.__name__
+        self.model = model
+
+        self.oriented = isinstance(data, Data2D)
+        self.magnetic = False
+
+        # Convert data to fitdata
+        if self.oriented:
+            self.fitdata = FitData2D(sans_data2d=data, data=data.data,
+                                     err_data=data.err_data)
+        else:
+            self.fitdata = FitData1D(x=data.x, y=data.y,
+                                     dx=data.dx, dy=data.dy,
+                                     data=data)
+        self.fitdata.sans_data = data
+        self.fitdata.set_fit_range(qmin, qmax)
+        #self.fitdata.set_smearer(smearer)
+
+        # save some bits of info
+        self._saved_y = self.fitdata.y
+        prefix = (name if name else _model_name(model)) + " "
+        self._pars = _build_parameters(model, prefix, self.oriented, self.magnetic)
+        self._cache = {}
+
+    def __getitem__(self, key):
+        return self._pars[key]
+
+    def __setitem__(self, key, value):
+        self._pars[key] = value
+
+    def theory(self):
+        key = 'theory'
+        if key not in self._cache:
+            _set_parameters(self.model, self._pars)
+            resid,fx = self.fitdata.residuals(self.model.evalDistribution)
+            self._cache[key] = fx
+            self._cache['residuals'] = resid
+        return self._cache[key]
+
+
+    def parameters(self):
+        """
+        Return the set of parameters in the model.
+        """
+        return self._pars
+
+    def update(self):
+        """
+        Called when parameters have been updated.  Any cached values will need to
+        be cleared and the model reevaluated.
+        """
+        self._cache = {}
+
+    def numpoints(self):
+        """
+        Return the number of data points.
+        """
+        return len(self.fitdata.x)
+
+    def nllf(self):
+        """
+        Return the negative log likelihood value of the current parameter set.
+        """
+        return 0.5*np.sum(self.residuals()**2)
+
+    def resynth_data(self):
+        """
+        Generate fake data based on uncertainties in the real data.  For Monte Carlo
+        resynth-refit uncertainty analysis.  Bootstrapping?
+        """
+        y,dy = self._saved_y,self.fitdata.dy
+        self.data.y = y + np.random.randn(len(y))*dy
+
+    def restore_data(self):
+        """
+        Restore the original data in the model (after resynth).
+        """
+        self.data.y = self._saved_y
+
+    def residuals(self):
+        """
+        Return residuals for current theory minus data.  For levenburg-marquardt.
+        """
+        self.theory() # automatically calculates residuals
+        return self._cache['residuals']
+
+    def save(self, basename):
+        """
+        Save the model to a file based on basename+extension.  This will point to
+        a path to a directory on a remote machine; don't make any assumptions about
+        information stored on the server.  Return the set of files saved so that
+        the monitor software can make a pretty web page.
+        """
+        pass
+
+    def plot(self, view=None):
+        """
+        Plot the model to the current figure.  You only get one figure, but you
+        can make it as complex as you want.  This will be saved as a png on
+        the server, and composed onto a results webpage.
+        """
+        #print("view",view)
+        import pylab
+        if self.oriented:
+            qx,qy,Iqxy = self.fitdata.qx_data, self.fitdata.qy_data, self.fitdata.data
+            xlabel,ylabel = self.fitdata.sans_data
+            pylab.subplot(311)
+            pylab.pcolormesh(qx,qy,Iqxy)
+            pylab.title('Data')
+            pylab.subplot(312)
+            pylab.pcolormesh(qx,qy,self.theory())
+            pylab.title('Theory')
+            pylab.subplot(313)
+            pylab.pcolormesh(qx,qy,self.residuals(), vmin=-3, vmax=3)
+            pylab.title('Residuals +/- 3 sigma')
+        elif view=='residual':
+            pylab.plot(self.fitdata.x, self.residuals(), '.')
+            pylab.axhline(1, color='black', ls='--',lw=1)
+            pylab.axhline(0, color='black', lw=1)
+            pylab.axhline(-1, color='black', ls='--',lw=1)
+            pylab.xlabel('Q (inv A)')
+            pylab.ylabel('(theory-data)/error')
+            pylab.legend(numpoints=1)
+        else:
+            pylab.errorbar(self.fitdata.x, self.fitdata.y,
+                           xerr=self.fitdata.dx, yerr=self.fitdata.dy,
+                           fmt='o',label="data "+self.name)
+            pylab.plot(self.fitdata.x, self.theory(), '-',
+                       label="fit "+self.name, hold=True)
+            pylab.xscale('log')
+            pylab.yscale('log')
diff --git a/extra/sasview/smodel.py b/extra/sasview/smodel.py
new file mode 100644
index 0000000..47f7e5d
--- /dev/null
+++ b/extra/sasview/smodel.py
@@ -0,0 +1,27 @@
+# ======
+# Put current directory and sasview directory on path.
+# This won't be necessary once bumps is in sasview
+import os, sys
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+try:
+    import periodictable
+except ImportError:
+    from distutils.util import get_platform
+    platform = '.%s-%s'%(get_platform(),sys.version[:3])
+    pt = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','..','periodictable'))
+    sys.path.insert(0,pt)
+try: 
+    import sans
+except ImportError:
+    from distutils.util import get_platform
+    platform = '.%s-%s'%(get_platform(),sys.version[:3])
+    sasview = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','..','sasview','build','lib'+platform))
+    sys.path.insert(0,sasview)
+    #raise Exception("\n".join(sys.path))
+# ======
+
+from sasbumps import *
+
+M = load_fit('FitPage2.fitv')
+problem = FitProblem([M])
+
diff --git a/installer.spec b/installer.spec
new file mode 100644
index 0000000..5b0dac1
--- /dev/null
+++ b/installer.spec
@@ -0,0 +1,31 @@
+# -*- mode: python -*-
+import sys
+import os
+
+import bumps
+print "found bumps in %r"%bumps.__file__
+version = str(bumps.__version__)
+
+excludes = ['IPython.html','IPython.nbconvert','IPython.qt','IPython.testing',
+            'sphinx','docutils','jinja2',
+            ]
+a = Analysis(['bin/bumps_gui'],
+             pathex=[],
+             hookspath=['extra/installer-hooks'],
+             excludes=excludes,
+             runtime_hooks=None)
+#print "\n".join("%s: %s"%(f[-1],", ".join(f[:-1])) for f in a.datas)
+pyz = PYZ(a.pure)
+exe = EXE(pyz,
+          a.scripts,
+          a.binaries,
+          a.zipfiles,
+          a.datas,
+          name='Bumps %s'%version,
+          debug=False,
+          strip=None,
+          upx=True,
+          console=False , icon='extra/bumps.icns')
+app = BUNDLE(exe,
+             name='Bumps %s.app'%version,
+             icon='extra/bumps.icns')
diff --git a/master_builder.py b/master_builder.py
new file mode 100755
index 0000000..fc76d49
--- /dev/null
+++ b/master_builder.py
@@ -0,0 +1,543 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2006-2011, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Author: James Krycka
+
+"""
+This script builds the Bumps application and documentation from source and
+runs unit tests and doc tests.  It supports building on Windows and Linux.
+
+Usually, you downloaded this script into a top-level directory (the root)
+and run it from there which downloads the files from the application
+repository into a subdirectory (the package directory).  For example if
+test1 is the root directory, we might have:
+  E:/work/test1/master_builder.py
+               /bumps/master_builder.py
+               /bumps/...
+
+Alternatively, you can download the whole application repository and run
+this script from the application's package directory where it is stored.
+The script determines whether it is executing from the root or the package
+directory and makes the necessary adjustments.  In this case, the root
+directory is defined as one-level-up and the repository is not downloaded
+(as it is assumed to be fully present).  In the example below test1 is the
+implicit root (i.e. top-level) directory.
+  E:/work/test1/bumps/master_builder.py
+               /bumps/...
+
+
+Need to update this for bumps rather refl1d standalone gui distribution.
+"""
+from six.moves import input
+
+import os
+import sys
+import shutil
+import subprocess
+
+sys.dont_write_bytecode = True
+
+# Windows commands to run utilities
+GIT = r"C:\Program Files (x86)\Git\bin\git.exe"
+REPO_NEW = '"%s" clone git at github.com:reflectometry/bumps.git' % GIT
+REPO_UPDATE = '"%s" pull origin master' % GIT
+
+INNO = r"C:\Program Files (x86)\Inno Setup 5\ISCC.exe"  # command line operation
+
+# Name of the package
+PKG_NAME = "bumps"
+# Name of the application we're building
+APP_NAME = "Bumps"
+
+
+# Required versions of Python packages and utilities to build the application.
+MIN_PYTHON = "2.5"
+MAX_PYTHON = "3.0"
+MIN_MATPLOTLIB = "1.0.0"
+MIN_NUMPY = "1.3.0"
+MIN_SCIPY = "0.7.0"
+MIN_WXPYTHON = "2.8.10.1"
+MIN_SETUPTOOLS = "0.6c9"
+MIN_GCC = "3.4.4"
+MIN_PYPARSING = "1.5.2"
+MIN_PERIODICTABLE = "1.3"
+# Required versions of Python packages to run tests.
+MIN_NOSE = "0.11"
+# Required versions of Python packages and utilities to build documentation.
+MIN_SPHINX = "1.0.3"
+MIN_DOCUTILS = "0.5"
+MIN_PYGMENTS = "1.0"
+MIN_JINJA2 = "2.5.2"
+#MIN_MATHJAX = "1.0.1"
+# Required versions of Python packages and utilities to build Windows frozen
+# image and Windows installer.
+MIN_PY2EXE = "0.6.9"
+MIN_INNO = "5.3.10"
+
+# Create a line separator string for printing
+SEPARATOR = "\n" + "/" * 79
+
+# Relative path for local install under our build tree; this is used in place
+# of the default installation path on Windows of C:\PythonNN\Lib\site-packages
+LOCAL_INSTALL = "local-site-packages"
+
+# Determine the full directory paths of the top-level, source, and installation
+# directories based on the directory where the script is running.  Here the
+# top-level directory refers to the parent directory of the package.
+RUN_DIR = os.path.dirname(os.path.abspath(sys.argv[0]))
+head, tail = os.path.split(RUN_DIR)
+if tail == PKG_NAME:
+    TOP_DIR = head
+else:
+    TOP_DIR = RUN_DIR
+SRC_DIR = os.path.join(TOP_DIR, PKG_NAME)
+INS_DIR = os.path.join(TOP_DIR, LOCAL_INSTALL)
+
+# Put PYTHON in the environment and add the python directory and its
+# corresponding script directory (for nose, sphinx, pip, etc) to the path.
+PYTHON = sys.executable
+PYTHONDIR = os.path.dirname(os.path.abspath(PYTHON))
+SCRIPTDIR = os.path.join(PYTHONDIR, 'Scripts')
+os.environ['PATH'] = ";".join((PYTHONDIR, SCRIPTDIR, os.environ['PATH']))
+os.environ['PYTHON'] = "/".join(PYTHON.split("\\"))
+
+
+def get_version():
+    # Get the version string of the application for use later.
+    # This has to be done after we have checked out the repository.
+    for line in open(os.path.join(SRC_DIR, PKG_NAME, '__init__.py')).readlines():
+        if (line.startswith('__version__')):
+            exec(line.strip())
+            break
+    else:
+        raise RuntimeError("Could not find package version")
+
+    global PKG_VERSION, EGG_NAME, PKG_DIR
+    PKG_VERSION = __version__
+    EGG_NAME = "%s-%s-py%d.%d-%s.egg" % (PKG_NAME, PKG_VERSION,
+                                         sys.version_info[0],
+                                         sys.version_info[1],
+                                         sys.platform)
+    PKG_DIR = os.path.join(INS_DIR, EGG_NAME)
+    # Add the local site packages to the python path
+    os.environ['PYTHONPATH'] = PKG_DIR
+
+#==============================================================================
+
+
+def build_it():
+    # If no arguments, start at the first step
+    start_with = sys.argv[1] if len(sys.argv) > 1 else 'deps'
+    started = False
+    only = len(sys.argv) > 2 and sys.argv[2] == "only"
+
+    # Clean the install tree
+    started = started or start_with == 'clean'
+    if started:
+        clean()
+    if started and only:
+        return
+
+    # Check the system for all required dependent packages.
+    started = started or start_with == 'deps'
+    if started:
+        check_dependencies()
+    if started and only:
+        return
+
+    # Checkout code from repository.
+    started = started or start_with in ('co', 'checkout', 'update')
+    if started:
+        checkout_code()
+    if started and only:
+        return
+
+    # Version may have been updated on another repository
+    get_version()
+
+    # Install the application in a local directory tree.
+    started = started or start_with == 'build'
+    if started:
+        install_package()
+    if started and only:
+        return
+
+    # Run unittests and doctests using a test script.
+    started = started or start_with == 'test'
+    if started:
+        run_tests()
+    if started and only:
+        return
+
+    # Build HTML and PDF documentaton using sphinx.
+    # This step is done before building the Windows installer so that PDF
+    # documentation can be included in the installable product.
+    started = started or start_with == 'docs'
+    if started:
+        build_documentation()
+    if started and only:
+        return
+
+    # Create an archive of the source code.
+    started = started or start_with == 'zip'
+    if started:
+        create_archive(PKG_VERSION)
+    if started and only:
+        return
+
+    # Create a Windows executable file using py2exe.
+    started = started or start_with == 'exe'
+    if started and os.name == 'nt':
+        create_windows_exe()
+    if started and only:
+        return
+
+    # Create a Windows installer/uninstaller exe using the Inno Setup Compiler.
+    started = started or start_with == 'installer'
+    if started and os.name == 'nt':
+        create_windows_installer(PKG_VERSION)
+    if started and only:
+        return
+
+
+def checkout_code():
+    # Checkout the application code from the repository into a directory tree
+    # under the top level directory.
+    print(SEPARATOR)
+    print("\nStep 1 - Checking out application code from the repository ...\n")
+
+    if RUN_DIR == TOP_DIR:
+        os.chdir(TOP_DIR)
+        exec_cmd(REPO_NEW)
+    else:
+        os.chdir(SRC_DIR)
+        exec_cmd(REPO_UPDATE)
+
+
+def create_archive(version=None):
+    # Create zip and tar archives of the source code and a manifest file
+    # containing the names of all files.
+    print(SEPARATOR)
+    print("\nStep 2 - Creating an archive of the source code ...\n")
+    os.chdir(SRC_DIR)
+
+    try:
+        # Create zip and tar archives in the dist subdirectory.
+        exec_cmd("%s setup.py sdist --formats=zip,gztar" % (PYTHON))
+    except:
+        print("*** Failed to create source archive ***")
+    else:
+        # Copy the archives and its source listing to the top-level directory.
+        # The location of the file that contains the source listing and the
+        # name of the file varies depending on what package is used to import
+        # setup, so its copy is made optional while we are making setup
+        # changes.
+        shutil.move(os.path.join("dist", PKG_NAME + "-" + str(version) + ".zip"),
+                    os.path.join(TOP_DIR, PKG_NAME + "-" + str(version) + "-source.zip"))
+        shutil.move(os.path.join("dist", PKG_NAME + "-" + str(version) + ".tar.gz"),
+                    os.path.join(TOP_DIR, PKG_NAME + "-" + str(version) + "-source.tar.gz"))
+        listing = os.path.join(SRC_DIR, PKG_NAME + ".egg-info", "SOURCES.txt")
+        if os.path.isfile(listing):
+            shutil.copy(listing,
+                        os.path.join(TOP_DIR, PKG_NAME + "-" + str(version) + "-source-list.txt"))
+
+def clean():
+    if os.path.isdir(INS_DIR):
+        shutil.rmtree(INS_DIR, ignore_errors=True)
+
+def install_package():
+    # Install the application package in a private directory tree.
+    # If the INS_DIR directory already exists, warn the user.
+    # Intermediate work files are stored in the <SRC_DIR>/build directory tree.
+    print(SEPARATOR)
+    print("\nStep 3 - Installing the %s package in %s...\n" %
+          (PKG_NAME, INS_DIR))
+    os.chdir(SRC_DIR)
+
+    # Perform the installation to a private directory tree and create the
+    # PYTHONPATH environment variable to pass this info to the py2exe build
+    # script later on.
+    os.environ["PYTHONPATH"] = INS_DIR
+    if not os.path.exists(INS_DIR):
+        os.makedirs(INS_DIR)
+    exec_cmd("%s setup.py -q install --install-lib=%s" % (PYTHON, INS_DIR))
+
+
+def build_documentation():
+    # Run the Sphinx utility to build the application's documentation.
+    print(SEPARATOR)
+    print("\nStep 4 - Running the Sphinx utility to build documentation ...\n")
+    os.chdir(os.path.join(SRC_DIR, "doc"))
+
+    # Run pylit on the examples directory, creating the tutorial directory
+    exec_cmd("%s gentut.py"%(PYTHON, ))
+
+    if False:
+        # Delete any left over files from a previous build.
+        # Create documentation in HTML and PDF format.
+        sphinx_cmd = '"%s" -m sphinx.__init__ -b %%s -d _build/doctrees -D latex_paper_size=letter .'
+        exec_cmd(sphinx_cmd%"html")
+        exec_cmd(sphinx_cmd%"pdf")
+        # Copy PDF to the doc directory where the py2exe script will look for it.
+        pdf = os.path.join("_build", "latex", APP_NAME + ".pdf")
+        if os.path.isfile(pdf):
+            shutil.copy(pdf, ".")
+
+
+def create_windows_exe():
+    # Use py2exe to create a Win32 executable along with auxiliary files in the
+    # <SRC_DIR>/dist directory tree.
+    print(SEPARATOR)
+    print("\nStep 5 - Using py2exe to create a Win32 executable ...\n")
+    os.chdir(SRC_DIR)
+
+    exec_cmd("%s setup_py2exe.py" % PYTHON)
+
+
+def create_windows_installer(version=None):
+    # Run the Inno Setup Compiler to create a Win32 installer/uninstaller for
+    # the application.
+    print(SEPARATOR)
+    print("\nStep 6 - Running Inno Setup Compiler to create Win32 "
+          "installer/uninstaller ...\n")
+    os.chdir(SRC_DIR)
+
+    # First create an include file to convey the application's version
+    # information to the Inno Setup compiler.
+    f = open("iss-version", "w")
+    f.write('#define MyAppVersion "%s"\n' % version)  # version must be quoted
+    f.close()
+
+    # Run the Inno Setup Compiler to create a Win32 installer/uninstaller.
+    # Override the output specification in <PKG_NAME>.iss to put the executable
+    # and the manifest file in the top-level directory.
+    exec_cmd("%s /Q /O%s %s.iss" % (INNO, TOP_DIR, PKG_NAME))
+
+
+def run_tests():
+    # Run unittests and doctests using a test script.
+    # Running from a test script allows customization of the system path.
+    print(SEPARATOR)
+    print("\nStep 7 - Running tests from test.py (using Nose) ...\n")
+    #os.chdir(os.path.join(INS_DIR, PKG_NAME))
+    os.chdir(SRC_DIR)
+
+    exec_cmd("%s test.py" % PYTHON)
+
+
+def check_dependencies():
+    """
+    Checks that the system has the necessary Python packages installed.
+    """
+
+    import platform
+    from pkg_resources import parse_version as PV
+
+    # ------------------------------------------------------
+    python_ver = platform.python_version()
+    print("Using Python " + python_ver)
+    print("")
+    if PV(python_ver) < PV(MIN_PYTHON) or PV(python_ver) >= PV(MAX_PYTHON):
+        print("ERROR - build requires Python >= %s, but < %s"
+              % (MIN_PYTHON, MAX_PYTHON))
+        sys.exit()
+
+    req_pkg = {}
+
+    # ------------------------------------------------------
+    try:
+        from matplotlib import __version__ as mpl_ver
+    except:
+        mpl_ver = "0"
+    finally:
+        req_pkg["matplotlib"] = (mpl_ver, MIN_MATPLOTLIB)
+
+    # ------------------------------------------------------
+    try:
+        from numpy import __version__ as numpy_ver
+    except:
+        numpy_ver = "0"
+    finally:
+        req_pkg["numpy"] = (numpy_ver, MIN_NUMPY)
+
+    # ------------------------------------------------------
+    try:
+        from scipy import __version__ as scipy_ver
+    except:
+        scipy_ver = "0"
+    finally:
+        req_pkg["scipy"] = (scipy_ver, MIN_SCIPY)
+
+    # ------------------------------------------------------
+    try:
+        from wx import __version__ as wx_ver
+    except:
+        wx_ver = "0"
+    finally:
+        req_pkg["wxpython"] = (wx_ver, MIN_WXPYTHON)
+
+    # ------------------------------------------------------
+    try:
+        from setuptools import __version__ as setup_ver
+    except:
+        setup_ver = "0"
+    finally:
+        req_pkg["setuptools"] = (setup_ver, MIN_SETUPTOOLS)
+
+    # ------------------------------------------------------
+    try:
+        flag = (os.name != 'nt')
+        p = subprocess.Popen("gcc -dumpversion", stdout=subprocess.PIPE,
+                             shell=flag)
+        gcc_ver = p.stdout.read().strip()
+    except:
+        gcc_ver = "0"
+    finally:
+        req_pkg["gcc"] = (gcc_ver, MIN_GCC)
+
+    # ------------------------------------------------------
+    try:
+        from pyparsing import __version__ as parse_ver
+    except:
+        parse_ver = "0"
+    finally:
+        req_pkg["pyparsing"] = (parse_ver, MIN_PYPARSING)
+
+    # ------------------------------------------------------
+    try:
+        from nose import __version__ as nose_ver
+    except:
+        nose_ver = "0"
+    finally:
+        req_pkg["nose"] = (nose_ver, MIN_NOSE)
+
+    # ------------------------------------------------------
+    try:
+        from sphinx import __version__ as sphinx_ver
+    except:
+        sphinx_ver = "0"
+    finally:
+        req_pkg["sphinx"] = (sphinx_ver, MIN_SPHINX)
+
+    # ------------------------------------------------------
+    try:
+        from docutils import __version__ as docutils_ver
+    except:
+        docutils_ver = "0"
+    finally:
+        req_pkg["docutils"] = (docutils_ver, MIN_DOCUTILS)
+
+    # ------------------------------------------------------
+    try:
+        from pygments import __version__ as pygments_ver
+    except:
+        pygments_ver = "0"
+    finally:
+        req_pkg["pygments"] = (pygments_ver, MIN_PYGMENTS)
+
+    # ------------------------------------------------------
+    try:
+        from jinja2 import __version__ as jinja2_ver
+    except:
+        jinja2_ver = "0"
+    finally:
+        req_pkg["jinja2"] = (jinja2_ver, MIN_JINJA2)
+
+    # ------------------------------------------------------
+    if os.name == 'nt':
+        try:
+            from py2exe import __version__ as py2exe_ver
+        except:
+            py2exe_ver = "0"
+        finally:
+            req_pkg["py2exe"] = (py2exe_ver, MIN_PY2EXE)
+
+        if os.path.isfile(INNO):
+            req_pkg["Inno Setup Compiler"] = ("?", MIN_INNO)
+        else:
+            req_pkg["Inno Setup Compiler"] = ("0", MIN_INNO)
+
+    # ------------------------------------------------------
+    error = False
+    for key, values in req_pkg.items():
+        if req_pkg[key][0] == "0":
+            print("====> %s not found; version %s or later is required - ERROR"
+                  % (key, req_pkg[key][1]))
+            error = True
+        elif req_pkg[key][0] == "?":
+            print("Found %s" % (key))  # version is unknown
+        elif PV(req_pkg[key][0]) >= PV(req_pkg[key][1]):
+            print("Found %s %s" % (key, req_pkg[key][0]))
+        else:
+            print("Found %s %s but minimum tested version is %s - WARNING"
+                  % (key, req_pkg[key][0], req_pkg[key][1]))
+            error = True
+
+    if error:
+        ans = input("\nDo you want to continue (Y|N)? [N]: ")
+        if ans.upper() != "Y":
+            sys.exit()
+    else:
+        print("\nSoftware dependencies have been satisfied")
+
+
+def exec_cmd(command):
+    """Runs the specified command in a subprocess."""
+
+    flag = (os.name != 'nt')
+
+    print("$ " + command)
+    subprocess.call(command, shell=flag)
+
+
+if __name__ == "__main__":
+    START_POINTS = (
+        'clean', 'deps', 'co', 'checkout', 'update', 'build', 'test',
+        'docs', 'zip', 'exe', 'installer',
+    )
+
+    if len(sys.argv) > 1:
+        # Display help if requested.
+        if len(sys.argv) > 1 and sys.argv[1] not in START_POINTS:
+            print("\nUsage: python master_builder.py [<start>] [only]\n")
+            print("Build start points:")
+            print("  clean      clean old build")
+            print("  deps       check dependencies")
+            print("  update     update archive")
+            print("  build      build package")
+            print("  test       test package")
+            print("  docs       build docs")
+            print("  zip        build source archive")
+            print("  exe        build executable")
+            print("  installer  build installer")
+            print("Add 'only' to the command to only perform a single step")
+            sys.exit()
+
+    print("\nBuilding the %s application from the %s repository ...\n"
+          % (APP_NAME, PKG_NAME))
+    print("Current working directory  = " + RUN_DIR)
+    print("Top-level (root) directory = " + TOP_DIR)
+    print("Package (source) directory = " + SRC_DIR)
+    print("Installation directory     = " + INS_DIR)
+    print("")
+
+    build_it()
diff --git a/rtd-requirements b/rtd-requirements
new file mode 100644
index 0000000..c7740fc
--- /dev/null
+++ b/rtd-requirements
@@ -0,0 +1,3 @@
+numpy>=1.0
+scipy>=0.7.0
+matplotlib>=1.0
diff --git a/run.py b/run.py
new file mode 100755
index 0000000..5d48f72
--- /dev/null
+++ b/run.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Build and run bumps.
+
+Usage:
+
+./run.py [bumps cli args]
+"""
+
+import os
+import sys
+
+
+def addpath(path):
+    """
+    Add a directory to the python path environment, and to the PYTHONPATH
+    environment variable for subprocesses.
+    """
+    path = os.path.abspath(path)
+    if 'PYTHONPATH' in os.environ:
+        PYTHONPATH = path + os.pathsep + os.environ['PYTHONPATH']
+    else:
+        PYTHONPATH = path
+    os.environ['PYTHONPATH'] = PYTHONPATH
+    sys.path.insert(0, path)
+
+from contextlib import contextmanager
+
+
+ at contextmanager
+def cd(path):
+    old_dir = os.getcwd()
+    os.chdir(path)
+    yield
+    os.chdir(old_dir)
+
+
+def prepare():
+    # Make sure that we have a private version of mplconfig
+    #mplconfig = os.path.join(os.getcwd(), '.mplconfig')
+    #os.environ['MPLCONFIGDIR'] = mplconfig
+    #if not os.path.exists(mplconfig):
+    #    os.mkdir(mplconfig)
+
+    # To avoid cluttering the source tree with .pyc or __pycache__ files, you
+    # can suppress the bytecode generation when running in place. Unfortunately
+    # this is a pretty big performance hit on Windows, so we are going to
+    # suppress this behaviour and rely on .gitignore instead
+    #sys.dont_write_bytecode = True
+
+    #import numpy as np; np.seterr(all='raise')
+    root = os.path.abspath(os.path.dirname(__file__))
+
+    # Add the root to the system path
+    addpath(root)
+
+    # Make sample data and models available
+    os.environ['BUMPS_DATA'] = os.path.join(root, 'bumps', 'gui', 'resources')
+
+if __name__ == "__main__":
+    import multiprocessing
+    multiprocessing.freeze_support()
+    prepare()
+    import bumps.cli
+    bumps.cli.main()
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..ab400e8
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+import sys
+import os
+
+if len(sys.argv) == 1:
+    sys.argv.append('install')
+
+# Use our own nose-based test harness
+if sys.argv[1] == 'test':
+    from subprocess import call
+    sys.exit(call([sys.executable, 'test.py'] + sys.argv[2:]))
+
+#sys.dont_write_bytecode = True
+
+from setuptools import setup, find_packages
+
+sys.path.insert(0, os.path.dirname(__file__))
+import bumps
+from bumps.gui.resources import resources as gui_resources
+
+packages = find_packages(exclude=['amqp_map', 'fit_functions', 'jobqueue'])
+
+
+# TODO: write a proper dependency checker for packages which cannot be
+# installed by easy_install
+#dependency_check('numpy>=1.0', 'scipy>=0.6', 'matplotlib>=1.0', 'wx>=2.8.9')
+# print bumps.package_data()
+
+#sys.dont_write_bytecode = False
+dist = setup(
+    name='bumps',
+    version=bumps.__version__,
+    author='Paul Kienzle',
+    author_email='paul.kienzle at nist.gov',
+    url='http://www.reflectometry.org/danse/software.html',
+    description='Data fitting with bayesian uncertainty analysis',
+    long_description=open('README.rst').read(),
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'Environment :: Console',
+        'Intended Audience :: Science/Research',
+        'License :: Public Domain',
+        'Operating System :: OS Independent',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 3',
+        'Topic :: Scientific/Engineering',
+        'Topic :: Scientific/Engineering :: Chemistry',
+        'Topic :: Scientific/Engineering :: Physics',
+    ],
+    packages=packages,
+    package_data=gui_resources.package_data(),
+    scripts=['bin/bumps'],
+    install_requires=['six'],
+    #install_requires = ['httplib2'],
+)
+
+# End of file
diff --git a/setup_py2app.py b/setup_py2app.py
new file mode 100644
index 0000000..cc69c0f
--- /dev/null
+++ b/setup_py2app.py
@@ -0,0 +1,113 @@
+# This program is in the public domain.
+"""
+Setup file for constructing OS X applications.
+
+Run using::
+
+    % python setup_py2app.py
+"""
+
+import os
+import sys
+import shutil
+
+sys.dont_write_bytecode = True
+
+# Force build before continuing
+os.system('"%s" setup.py build' % sys.executable)
+
+# Remove the current directory from the python path
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path = [p for p in sys.path if os.path.abspath(p) != here]
+
+import py2app
+from distutils.core import setup
+from distutils.util import get_platform
+
+if len(sys.argv) == 1:
+    sys.argv.append('py2app')
+
+# Put the build lib on the start of the path.
+# For packages with binary extensions, need platform.  If it is a pure
+# script library, use an empty platform string.
+platform = '.%s-%s' % (get_platform(), sys.version[:3])
+#platform = ''
+build_lib = os.path.abspath('build/lib' + platform)
+sys.path.insert(0, build_lib)
+
+# print "\n".join(sys.path)
+
+
+# TODO: Combine with setup-py2exe so that consistency is easier.
+packages = ['numpy', 'scipy', 'matplotlib', 'pytz']
+includes = []
+excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg', 'numpy.distutils.test',
+            'sphinx', 'docutils', 'jinja2', ]
+PACKAGE_DATA = {}
+
+import bumps
+from bumps.gui.resources import resources as gui_resources
+
+NAME = 'Bumps'
+# Until we figure out why packages=... doesn't work reliably,
+# use py2app_main with explicit imports of everything we
+# might need.
+#SCRIPT = 'py2app_main.py'
+SCRIPT = 'bin/bumps_gui'
+VERSION = bumps.__version__
+ICON = 'extra/bumps.icns'
+ID = 'Bumps'
+COPYRIGHT = 'This program is public domain'
+DATA_FILES = gui_resources.data_files()
+
+plist = dict(
+    CFBundleIconFile=ICON,
+    CFBundleName=NAME,
+    CFBundleShortVersionString=' '.join([NAME, VERSION]),
+    CFBundleGetInfoString=NAME,
+    CFBundleExecutable=NAME,
+    CFBundleIdentifier='org.reflectometry.%s' % ID,
+    NSHumanReadableCopyright=COPYRIGHT
+)
+
+
+app_data = dict(script=SCRIPT, plist=plist)
+py2app_opt = dict(argv_emulation=True,
+                  packages=packages,
+                  includes=includes,
+                  excludes=excludes,
+                  iconfile=ICON,
+                  optimize=2)
+options = dict(py2app=py2app_opt,)
+
+
+def build_app():
+    setup(
+        data_files=DATA_FILES,
+        package_data=PACKAGE_DATA,
+        app=[app_data],
+        options=options,
+    )
+    # Add cli interface to the app directory
+    os.system('cp -p extra/appbin/* "dist/%s.app"' % NAME)
+
+
+def build_dmg():
+    """DMG builder; should include docs"""
+    PRODUCT = NAME + " " + VERSION
+    PRODUCTDASH = NAME + "-" + VERSION
+    APP = "dist/%s.app" % PRODUCT
+    DMG = "dist/%s.dmg" % PRODUCTDASH
+    # Remove previous build if it is still sitting there
+    if os.path.exists(APP):
+        shutil.rmtree(APP)
+    if os.path.exists(DMG):
+        os.unlink(DMG)
+    os.rename("dist/%s.app" % NAME, APP)
+    os.system('cd dist && ../extra/dmgpack.sh "%s" "%s.app" ../doc/_build/html ../doc/examples' %
+              (PRODUCTDASH, PRODUCT))
+    os.system('chmod a+r "%s"' % DMG)
+
+if __name__ == "__main__":
+    build_app()
+    build_dmg()
diff --git a/setup_py2exe.py b/setup_py2exe.py
new file mode 100755
index 0000000..42fb0ab
--- /dev/null
+++ b/setup_py2exe.py
@@ -0,0 +1,315 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2006-2010, University of Maryland
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Author: James Krycka
+
+"""
+This script uses py2exe to create dist\bumps.exe and dist\bumps_gui.exe for
+running the Bumps application in either CLI or GUI mode.
+
+These executables start the application and import the rest of the application
+code stored in library.zip.  The python interpreter and other required python
+packages and dlls are also placed in the zip file.  Additional resource files
+that are needed when Bumps is run are copied to the dist directory tree.  On
+completion, the contents of the dist directory tree can be used by the Inno
+Setup Compiler (via a separate script) to build a Windows installer/uninstaller
+for deployment of the Bumps application.  For testing purposes, bumps.exe or
+bumps_gui.exe can be run from the dist directory.
+"""
+
+import os
+import sys
+
+#sys.dont_write_bytecode = True
+
+# Force build before continuing
+os.system('"%s" setup.py build' % sys.executable)
+
+# Remove the current directory from the python path
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path = [p for p in sys.path if os.path.abspath(p) != here]
+
+import glob
+
+from distutils.core import setup
+from distutils.util import get_platform
+
+# Augment the setup interface with the py2exe command and make sure the py2exe
+# option is passed to setup.
+import py2exe
+
+if len(sys.argv) == 1:
+    sys.argv.append('py2exe')
+
+# Put the build lib on the start of the path.
+# For packages with binary extensions, need platform.  If it is a pure
+# script library, use an empty platform string.
+platform = '.%s-%s' % (get_platform(), sys.version[:3])
+#platform = ''
+build_lib = os.path.abspath('build/lib' + platform)
+sys.path.insert(0, build_lib)
+
+# print "\n".join(sys.path)
+
+import wx
+import matplotlib
+matplotlib.use('WXAgg')
+
+# Retrieve the application version string.
+import bumps
+version = bumps.__version__
+from bumps.gui.resources import resources as gui_resources
+
+# A manifest is required to be included in a py2exe image (or accessible as a
+# file in the image directory) when wxPython is included so that the Windows XP
+# theme is used when rendering wx widgets.  The manifest must be matched to the
+# version of Python that is being used.
+#
+# Create a manifest for use with Python 2.5 on Windows XP or Vista.  It is
+# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
+
+manifest_for_python25 = """
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+<assemblyIdentity
+    version="1.0.0.0"
+    processorArchitecture="x86"
+    name="%(prog)s"
+    type="win32"
+/>
+<description>%(prog)s</description>
+<dependency>
+    <dependentAssembly>
+        <assemblyIdentity
+            type="win32"
+            name="Microsoft.Windows.Common-Controls"
+            version="6.0.0.0"
+            processorArchitecture="X86"
+            publicKeyToken="6595b64144ccf1df"
+            language="*"
+        />
+    </dependentAssembly>
+</dependency>
+</assembly>
+"""
+
+# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
+
+manifest_for_python26 = """
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+  <assemblyIdentity
+    version="5.0.0.0"
+    processorArchitecture="x86"
+    name="%(prog)s"
+    type="win32">
+  </assemblyIdentity>
+  <description>%(prog)s</description>
+  <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+    <security>
+      <requestedPrivileges>
+        <requestedExecutionLevel
+          level="asInvoker"
+          uiAccess="false">
+        </requestedExecutionLevel>
+      </requestedPrivileges>
+    </security>
+  </trustInfo>
+  <dependency>
+    <dependentAssembly>
+      <assemblyIdentity
+        type="win32"
+        name="Microsoft.VC90.CRT"
+        version="9.0.21022.8"
+        processorArchitecture="x86"
+        publicKeyToken="1fc8b3b9a1e18e3b">
+      </assemblyIdentity>
+    </dependentAssembly>
+  </dependency>
+  <dependency>
+    <dependentAssembly>
+      <assemblyIdentity
+        type="win32"
+        name="Microsoft.Windows.Common-Controls"
+        version="6.0.0.0"
+        processorArchitecture="x86"
+        publicKeyToken="6595b64144ccf1df"
+        language="*">
+      </assemblyIdentity>
+    </dependentAssembly>
+  </dependency>
+</assembly>
+"""
+
+# Select the appropriate manifest to use.
+manifest = manifest_for_python26 if sys.version_info >= (2, 6) else manifest_for_python25
+
+# Create a list of all files to include along side the executable being built
+# in the dist directory tree.  Each element of the data_files list is a tuple
+# consisting of a path (relative to dist\) and a list of files in that path.
+data_files = []
+
+# Add resource files that need to reside in the same directory as the image.
+data_files.append(('.', [os.path.join('.', 'LICENSE.txt')]))
+data_files.append(('.', [os.path.join('.', 'README.rst')]))
+data_files.append(('.', [os.path.join('.', 'bin', 'launch.bat')]))
+
+# Add application specific data files from the bumps\bumps-data folder.
+data_files += gui_resources.data_files()
+
+# Add data files from the matplotlib\mpl-data folder and its subfolders.
+# For matploblib prior to version 0.99 see the examples at the end of the file.
+data_files += matplotlib.get_py2exe_datafiles()
+
+# Add example directories and their files.  An empty directory is ignored.
+# Note that Inno Setup will determine where these files will be placed such as
+# C:\My Documents\... instead of the installation folder.
+for path in glob.glob(os.path.join('doc', 'tutorial', '*')):
+    if os.path.isdir(path):
+        for file in glob.glob(os.path.join(path, '*.*')):
+            data_files.append((path, [file]))
+    else:
+        data_files.append(('doc', [path]))
+
+# Add PDF documentation to the dist staging directory.
+#pdf = os.path.join('doc', 'Bumps.pdf')
+#if os.path.isfile(pdf):
+#    data_files.append(('doc', [pdf]))
+#else:
+#    print("*** %s not found - building frozen image without it ***" % pdf)
+
+# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
+# Python 2.6 or 2.7.  This kit will be installed on the target system as part
+# of the installation process for the frozen image.  Note that the Python 2.5
+# interpreter requires msvcr71.dll which is included in the Python25 package,
+# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
+# with the Python26 or Python27 package.  Thus, for Python 2.6 and later, the
+# appropriate dll must be present on the target system at runtime.
+if sys.version_info >= (2, 6):
+    pypath = os.path.dirname(sys.executable)
+    data_files.append(('.', [os.path.join(pypath, 'vcredist_x86.exe')]))
+
+# Specify required packages to bundle in the executable image.
+packages = [
+    'numpy', 'scipy', 'matplotlib', 'pytz', 'pyparsing', 'bumps',
+]
+
+# Specify files to include in the executable image.
+includes = []
+
+# Specify files to exclude from the executable image.
+# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
+# - We do not use ssl services so they are omitted.
+# - We can safely exclude the TkAgg matplotlib backend because our app uses
+#   "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
+# - On the web it is widely recommended to exclude certain lib*.dll modules
+#   but this does not seem necessary any more (but adding them does not hurt).
+# - Python25 requires mscvr71.dll, however, Win XP includes this file.
+# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
+# - For some reason cygwin1.dll gets included by default, but it is not needed.
+
+excludes = [
+    'Tkinter', 'PyQt4', '_ssl', '_tkagg', 'numpy.distutils.test',
+    # don't know why sympy, etc, are being included...
+    'OpenGL', 'PIL', 'Pythonwin',
+    'colorama', 'docutils', 'jinja2', 'markupsafe', 'pygments',
+    'scipy.ndimage', 'scipy.signal', 'scipy.sparse',
+    'scipy.spatial', 'scipy.weave',
+    'simplejson', 'sklearn', 'sphinx',
+    'sympy', 'tornado', 'zmq',
+]
+
+dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
+                'libgobject-2.0-0.dll',
+                'libgdk-win32-2.0-0.dll',
+                'tcl85.dll',
+                'tk85.dll',
+                'QtGui4.dll',
+                'QtCore4.dll',
+                'msvcr71.dll',
+                'msvcp90.dll',
+                'w9xpopen.exe',
+                'cygwin1.dll',
+]
+
+
+class Target(object):
+
+    """This class stores metadata about the distribution in a dictionary."""
+
+    def __init__(self, **kw):
+        self.__dict__.update(kw)
+        self.version = version
+
+clientCLI = Target(
+    name='Bumps',
+    description='Bumps CLI application',
+    # module to run on application start
+    script=os.path.join('bin', 'bumps'),
+    dest_base='bumps',  # file name part of the exe file to create
+    # also need to specify in data_files
+    icon_resources=[
+        (1, os.path.join('bumps', 'gui', 'resources', 'bumps.ico'))],
+    bitmap_resources=[],
+    other_resources=[(24, 1, manifest % dict(prog='Bumps'))])
+
+clientGUI = Target(
+    name='Bumps',
+    description='Bumps GUI application',
+    # module to run on application start
+    script=os.path.join('bin', 'bumps_gui'),
+    dest_base='bumps_gui',  # file name part of the exe file to create
+    # also need to specify in data_files
+    icon_resources=[
+        (1, os.path.join('bumps', 'gui', 'resources', 'bumps.ico'))],
+    bitmap_resources=[],
+    other_resources=[(24, 1, manifest % dict(prog='Bumps'))])
+
+# Now we do the work to create a standalone distribution using py2exe.
+#
+# When the application is run in console mode, a console window will be created
+# to receive any logging or error messages and the application will then create
+# a separate GUI application window.
+#
+# When the application is run in windows mode, it will create a GUI application
+# window and no console window will be provided.  Output to stderr will be
+# written to <app-image-name>.log.
+setup(
+    console=[clientCLI],
+    windows=[clientGUI],
+    options={'py2exe': {
+        'packages': packages,
+        'includes': includes,
+        'excludes': excludes,
+        'dll_excludes': dll_excludes,
+        'compressed': 1,   # standard compression
+        'optimize': 0,     # no byte-code optimization
+        'dist_dir': "dist",  # where to put py2exe results
+        'xref': False,     # display cross reference (as html doc)
+        'bundle_files': 1,  # bundle python25.dll in library.zip
+    }
+    },
+    # Since we are building two exe's, do not put the shared library in each
+    # of them.  Instead create a single, separate library.zip file.
+    # zipfile=None,               # bundle library.zip in exe
+    data_files=data_files           # list of files to copy to dist directory
+)
diff --git a/test.py b/test.py
new file mode 100755
index 0000000..d17b777
--- /dev/null
+++ b/test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+"""
+Run tests for bumps.
+
+Usage:
+
+./test.py
+    - run all tests
+
+./test.py --with-coverage
+    - run all tests with coverage report
+"""
+
+import os
+import sys
+import subprocess
+from glob import glob
+import nose
+
+from distutils.util import get_platform
+platform = '.%s-%s' % (get_platform(), sys.version[:3])
+
+# Make sure that we have a private version of mplconfig
+mplconfig = os.path.join(os.getcwd(), '.mplconfig')
+os.environ['MPLCONFIGDIR'] = mplconfig
+if not os.path.exists(mplconfig):
+    os.mkdir(mplconfig)
+import matplotlib
+matplotlib.use('Agg')
+# print(matplotlib.__file__)
+import pylab
+pylab.hold(False)
+
+
+def addpath(path):
+    """
+    Add a directory to the python path environment, and to the PYTHONPATH
+    environment variable for subprocesses.
+    """
+    path = os.path.abspath(path)
+    if 'PYTHONPATH' in os.environ:
+        PYTHONPATH = path + os.pathsep + os.environ['PYTHONPATH']
+    else:
+        PYTHONPATH = path
+    os.environ['PYTHONPATH'] = PYTHONPATH
+    sys.path.insert(0, path)
+
+sys.dont_write_bytecode = True
+
+sys.stderr = sys.stdout  # Doctest doesn't see sys.stderr
+#import numpy as np; np.seterr(all='raise')
+
+# Check that we are running from the root.
+root = os.path.abspath(os.getcwd())
+assert os.path.exists(
+    os.path.join(root, 'bumps', 'cli.py')), "Not in bumps root"
+addpath(root)
+
+# Set the nosetest args
+nose_args = ['-v', '--all-modules',
+             '-m(^_?test_|_test$|^test$)',
+             '--with-doctest', '--doctest-extension=.rst',
+             '--doctest-options=+ELLIPSIS,+NORMALIZE_WHITESPACE',
+             '--cover-package=bumps',
+             '-e.*amqp_map.*',
+             ]
+
+# exclude gui subdirectory if wx is not available
+try:
+    import wx
+except ImportError:
+    nose_args.append('-egui')
+
+nose_args += sys.argv[1:]  # allow coverage arguments
+
+# Add targets
+nose_args += [os.path.join(root,'bumps')]
+nose_args += glob('doc/g*/*.rst')
+nose_args += glob('doc/_examples/*/*.rst')
+
+print("nosetests " + " ".join(nose_args))
+if not nose.run(argv=nose_args):
+    sys.exit(1)
+
+# Run the command line version of bumps which should display help text.
+# for p in ['bin/bumps']:
+#    ret = subprocess.call((sys.executable, p), shell=False)
+#    if ret != 0: sys.exit()
diff --git a/unix_build.sh b/unix_build.sh
new file mode 100755
index 0000000..da5f091
--- /dev/null
+++ b/unix_build.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+set -x
+
+python setup.py build
+python test.py
+(cd doc && make html pdf)
+# make sure the pdf got built by copying it to the current directory
+cp doc/_build/latex/Bumps.pdf .

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/python-bumps.git



More information about the debian-science-commits mailing list